summaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/compiler')
-rw-r--r--deps/v8/src/compiler/access-builder.cc13
-rw-r--r--deps/v8/src/compiler/access-info.cc22
-rw-r--r--deps/v8/src/compiler/access-info.h6
-rw-r--r--deps/v8/src/compiler/arm/code-generator-arm.cc136
-rw-r--r--deps/v8/src/compiler/arm/instruction-codes-arm.h547
-rw-r--r--deps/v8/src/compiler/arm/instruction-scheduler-arm.cc31
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm.cc234
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc44
-rw-r--r--deps/v8/src/compiler/arm64/instruction-codes-arm64.h2
-rw-r--r--deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc2
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc135
-rw-r--r--deps/v8/src/compiler/basic-block-instrumentor.cc22
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc26
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h7
-rw-r--r--deps/v8/src/compiler/code-assembler.cc55
-rw-r--r--deps/v8/src/compiler/code-assembler.h34
-rw-r--r--deps/v8/src/compiler/code-generator.cc32
-rw-r--r--deps/v8/src/compiler/code-generator.h7
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.cc4
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.h6
-rw-r--r--deps/v8/src/compiler/common-operator.h2
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.cc249
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.h28
-rw-r--r--deps/v8/src/compiler/constant-folding-reducer.cc4
-rw-r--r--deps/v8/src/compiler/constant-folding-reducer.h6
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc501
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.h2
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.cc3
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc22
-rw-r--r--deps/v8/src/compiler/graph-assembler.h10
-rw-r--r--deps/v8/src/compiler/graph-visualizer.h1
-rw-r--r--deps/v8/src/compiler/ia32/code-generator-ia32.cc208
-rw-r--r--deps/v8/src/compiler/ia32/instruction-codes-ia32.h711
-rw-r--r--deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc35
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc384
-rw-r--r--deps/v8/src/compiler/instruction-selector.cc114
-rw-r--r--deps/v8/src/compiler/instruction-selector.h2
-rw-r--r--deps/v8/src/compiler/instruction.h2
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc136
-rw-r--r--deps/v8/src/compiler/int64-lowering.h3
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc689
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h7
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc3
-rw-r--r--deps/v8/src/compiler/js-context-specialization.h5
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc287
-rw-r--r--deps/v8/src/compiler/js-create-lowering.h28
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc19
-rw-r--r--deps/v8/src/compiler/js-heap-broker.cc1410
-rw-r--r--deps/v8/src/compiler/js-heap-broker.h250
-rw-r--r--deps/v8/src/compiler/js-heap-copy-reducer.cc85
-rw-r--r--deps/v8/src/compiler/js-heap-copy-reducer.h38
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.h2
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc78
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.h12
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc80
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h7
-rw-r--r--deps/v8/src/compiler/js-operator.cc46
-rw-r--r--deps/v8/src/compiler/js-operator.h35
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc60
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h6
-rw-r--r--deps/v8/src/compiler/linkage.cc2
-rw-r--r--deps/v8/src/compiler/load-elimination.cc63
-rw-r--r--deps/v8/src/compiler/load-elimination.h1
-rw-r--r--deps/v8/src/compiler/loop-variable-optimizer.cc3
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc5
-rw-r--r--deps/v8/src/compiler/machine-graph.cc1
-rw-r--r--deps/v8/src/compiler/machine-graph.h2
-rw-r--r--deps/v8/src/compiler/machine-operator.cc377
-rw-r--r--deps/v8/src/compiler/machine-operator.h70
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc2
-rw-r--r--deps/v8/src/compiler/mips/OWNERS5
-rw-r--r--deps/v8/src/compiler/mips/instruction-selector-mips.cc17
-rw-r--r--deps/v8/src/compiler/mips64/OWNERS5
-rw-r--r--deps/v8/src/compiler/mips64/instruction-selector-mips64.cc20
-rw-r--r--deps/v8/src/compiler/node-matchers.h2
-rw-r--r--deps/v8/src/compiler/node-properties.cc2
-rw-r--r--deps/v8/src/compiler/opcodes.h221
-rw-r--r--deps/v8/src/compiler/operation-typer.cc22
-rw-r--r--deps/v8/src/compiler/operation-typer.h5
-rw-r--r--deps/v8/src/compiler/operator-properties.cc2
-rw-r--r--deps/v8/src/compiler/pipeline.cc80
-rw-r--r--deps/v8/src/compiler/pipeline.h2
-rw-r--r--deps/v8/src/compiler/ppc/code-generator-ppc.cc40
-rw-r--r--deps/v8/src/compiler/ppc/instruction-codes-ppc.h4
-rw-r--r--deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc2
-rw-r--r--deps/v8/src/compiler/ppc/instruction-selector-ppc.cc19
-rw-r--r--deps/v8/src/compiler/property-access-builder.cc3
-rw-r--r--deps/v8/src/compiler/property-access-builder.h6
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h8
-rw-r--r--deps/v8/src/compiler/s390/code-generator-s390.cc1
-rw-r--r--deps/v8/src/compiler/s390/instruction-selector-s390.cc20
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc96
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.h4
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc66
-rw-r--r--deps/v8/src/compiler/simplified-lowering.h10
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.cc2
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.h6
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc53
-rw-r--r--deps/v8/src/compiler/simplified-operator.h30
-rw-r--r--deps/v8/src/compiler/type-narrowing-reducer.cc2
-rw-r--r--deps/v8/src/compiler/type-narrowing-reducer.h2
-rw-r--r--deps/v8/src/compiler/typed-optimization.cc6
-rw-r--r--deps/v8/src/compiler/typed-optimization.h6
-rw-r--r--deps/v8/src/compiler/typer.cc323
-rw-r--r--deps/v8/src/compiler/typer.h6
-rw-r--r--deps/v8/src/compiler/types.cc38
-rw-r--r--deps/v8/src/compiler/types.h9
-rw-r--r--deps/v8/src/compiler/verifier.cc18
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc830
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h59
-rw-r--r--deps/v8/src/compiler/x64/code-generator-x64.cc10
-rw-r--r--deps/v8/src/compiler/x64/instruction-codes-x64.h2
-rw-r--r--deps/v8/src/compiler/x64/instruction-scheduler-x64.cc2
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc92
114 files changed, 5918 insertions, 3701 deletions
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index 0b78795e00..0342a9c950 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -12,6 +12,7 @@
#include "src/objects-inl.h"
#include "src/objects/arguments.h"
#include "src/objects/js-collection.h"
+#include "src/objects/js-generator.h"
#include "src/objects/module.h"
namespace v8 {
@@ -733,13 +734,11 @@ FieldAccess AccessBuilder::ForJSGlobalProxyNativeContext() {
// static
FieldAccess AccessBuilder::ForJSArrayIteratorIteratedObject() {
- FieldAccess access = {kTaggedBase,
- JSArrayIterator::kIteratedObjectOffset,
- Handle<Name>(),
- MaybeHandle<Map>(),
- Type::ReceiverOrUndefined(),
- MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSArrayIterator::kIteratedObjectOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Receiver(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 62ed7e7d85..0b7d1a18a1 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -237,7 +237,7 @@ Handle<Cell> PropertyAccessInfo::export_cell() const {
return Handle<Cell>::cast(constant_);
}
-AccessInfoFactory::AccessInfoFactory(const JSHeapBroker* js_heap_broker,
+AccessInfoFactory::AccessInfoFactory(JSHeapBroker* js_heap_broker,
CompilationDependencies* dependencies,
Handle<Context> native_context, Zone* zone)
: js_heap_broker_(js_heap_broker),
@@ -399,9 +399,9 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
dependencies()->DependOnFieldType(MapRef(js_heap_broker(), map),
number);
// Remember the field map, and try to infer a useful type.
- field_type = Type::For(js_heap_broker(),
- descriptors_field_type->AsClass());
- field_map = descriptors_field_type->AsClass();
+ Handle<Map> map(descriptors_field_type->AsClass(), isolate());
+ field_type = Type::For(js_heap_broker(), map);
+ field_map = MaybeHandle<Map>(map);
}
}
*access_info = PropertyAccessInfo::DataField(
@@ -428,10 +428,8 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
DCHECK(map->is_prototype_map());
Handle<PrototypeInfo> proto_info =
Map::GetOrCreatePrototypeInfo(map, isolate());
- DCHECK(proto_info->weak_cell()->IsWeakCell());
Handle<JSModuleNamespace> module_namespace(
- JSModuleNamespace::cast(
- WeakCell::cast(proto_info->weak_cell())->value()),
+ JSModuleNamespace::cast(proto_info->module_namespace()),
isolate());
Handle<Cell> cell(
Cell::cast(module_namespace->module()->exports()->Lookup(
@@ -583,9 +581,9 @@ namespace {
Maybe<ElementsKind> GeneralizeElementsKind(ElementsKind this_kind,
ElementsKind that_kind) {
- if (IsHoleyOrDictionaryElementsKind(this_kind)) {
+ if (IsHoleyElementsKind(this_kind)) {
that_kind = GetHoleyElementsKind(that_kind);
- } else if (IsHoleyOrDictionaryElementsKind(that_kind)) {
+ } else if (IsHoleyElementsKind(that_kind)) {
this_kind = GetHoleyElementsKind(this_kind);
}
if (this_kind == that_kind) return Just(this_kind);
@@ -703,9 +701,9 @@ bool AccessInfoFactory::LookupTransition(Handle<Map> map, Handle<Name> name,
dependencies()->DependOnFieldType(
MapRef(js_heap_broker(), transition_map), number);
// Remember the field map, and try to infer a useful type.
- field_type =
- Type::For(js_heap_broker(), descriptors_field_type->AsClass());
- field_map = descriptors_field_type->AsClass();
+ Handle<Map> map(descriptors_field_type->AsClass(), isolate());
+ field_type = Type::For(js_heap_broker(), map);
+ field_map = MaybeHandle<Map>(map);
}
}
dependencies()->DependOnTransition(MapRef(js_heap_broker(), transition_map));
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
index fa737ce0c4..e9890bbb7a 100644
--- a/deps/v8/src/compiler/access-info.h
+++ b/deps/v8/src/compiler/access-info.h
@@ -140,7 +140,7 @@ class PropertyAccessInfo final {
// Factory class for {ElementAccessInfo}s and {PropertyAccessInfo}s.
class AccessInfoFactory final {
public:
- AccessInfoFactory(const JSHeapBroker* js_heap_broker,
+ AccessInfoFactory(JSHeapBroker* js_heap_broker,
CompilationDependencies* dependencies,
Handle<Context> native_context, Zone* zone);
@@ -169,13 +169,13 @@ class AccessInfoFactory final {
PropertyAccessInfo* access_info);
CompilationDependencies* dependencies() const { return dependencies_; }
- const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
Factory* factory() const;
Isolate* isolate() const { return isolate_; }
Handle<Context> native_context() const { return native_context_; }
Zone* zone() const { return zone_; }
- const JSHeapBroker* const js_heap_broker_;
+ JSHeapBroker* const js_heap_broker_;
CompilationDependencies* const dependencies_;
Handle<Context> const native_context_;
Isolate* const isolate_;
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc
index d129274863..8e1c1ab8f4 100644
--- a/deps/v8/src/compiler/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/arm/code-generator-arm.cc
@@ -14,6 +14,7 @@
#include "src/double.h"
#include "src/heap/heap-inl.h"
#include "src/optimized-compilation-info.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -415,6 +416,48 @@ void ComputePoisonedAddressForLoad(CodeGenerator* codegen,
__ dmb(ISH); \
} while (0)
+#define ASSEMBLE_ATOMIC64_ARITH_BINOP(instr1, instr2) \
+ do { \
+ Label binop; \
+ __ add(i.TempRegister(0), i.InputRegister(2), i.InputRegister(3)); \
+ __ dmb(ISH); \
+ __ bind(&binop); \
+ __ ldrexd(i.OutputRegister(0), i.OutputRegister(1), i.TempRegister(0)); \
+ __ instr1(i.TempRegister(1), i.OutputRegister(0), i.InputRegister(0), \
+ SBit::SetCC); \
+ __ instr2(i.TempRegister(2), i.OutputRegister(1), \
+ Operand(i.InputRegister(1))); \
+ DCHECK_EQ(LeaveCC, i.OutputSBit()); \
+ __ strexd(i.TempRegister(3), i.TempRegister(1), i.TempRegister(2), \
+ i.TempRegister(0)); \
+ __ teq(i.TempRegister(3), Operand(0)); \
+ __ b(ne, &binop); \
+ __ dmb(ISH); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC64_LOGIC_BINOP(instr) \
+ do { \
+ Label binop; \
+ __ add(i.TempRegister(0), i.InputRegister(2), i.InputRegister(3)); \
+ __ dmb(ISH); \
+ __ bind(&binop); \
+ __ ldrexd(i.OutputRegister(0), i.OutputRegister(1), i.TempRegister(0)); \
+ __ instr(i.TempRegister(1), i.OutputRegister(0), \
+ Operand(i.InputRegister(0))); \
+ __ instr(i.TempRegister(2), i.OutputRegister(1), \
+ Operand(i.InputRegister(1))); \
+ __ strexd(i.TempRegister(3), i.TempRegister(1), i.TempRegister(2), \
+ i.TempRegister(0)); \
+ __ teq(i.TempRegister(3), Operand(0)); \
+ __ b(ne, &binop); \
+ __ dmb(ISH); \
+ } while (0)
+
+#define ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(op) \
+ if (arch_opcode == kArmWord64AtomicNarrow##op) { \
+ __ mov(i.OutputRegister(1), Operand(0)); \
+ }
+
#define ASSEMBLE_IEEE754_BINOP(name) \
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
@@ -1148,6 +1191,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
+ case kArmRev:
+ __ rev(i.OutputRegister(), i.InputRegister(0));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
case kArmClz:
__ clz(i.OutputRegister(), i.InputRegister(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -2623,7 +2670,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kWord32AtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldr);
break;
-
case kWord32AtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(strb);
break;
@@ -2638,17 +2684,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
case kWord32AtomicExchangeUint8:
+ case kArmWord64AtomicNarrowExchangeUint8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexb, strexb);
+ ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(ExchangeUint8);
break;
case kWord32AtomicExchangeInt16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexh, strexh);
__ sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
case kWord32AtomicExchangeUint16:
+ case kArmWord64AtomicNarrowExchangeUint16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexh, strexh);
+ ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(ExchangeUint16);
break;
case kWord32AtomicExchangeWord32:
+ case kArmWord64AtomicNarrowExchangeUint32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrex, strex);
+ ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(ExchangeUint32);
break;
case kWord32AtomicCompareExchangeInt8:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
@@ -2658,10 +2710,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
case kWord32AtomicCompareExchangeUint8:
+ case kArmWord64AtomicNarrowCompareExchangeUint8:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
__ uxtb(i.TempRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb,
i.TempRegister(2));
+ ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(CompareExchangeUint8);
break;
case kWord32AtomicCompareExchangeInt16:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
@@ -2671,15 +2725,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
case kWord32AtomicCompareExchangeUint16:
+ case kArmWord64AtomicNarrowCompareExchangeUint16:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
__ uxth(i.TempRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh,
i.TempRegister(2));
+ ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(CompareExchangeUint16);
break;
case kWord32AtomicCompareExchangeWord32:
+ case kArmWord64AtomicNarrowCompareExchangeUint32:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrex, strex,
i.InputRegister(2));
+ ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(CompareExchangeUint32);
break;
#define ATOMIC_BINOP_CASE(op, inst) \
case kWord32Atomic##op##Int8: \
@@ -2687,17 +2745,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sxtb(i.OutputRegister(0), i.OutputRegister(0)); \
break; \
case kWord32Atomic##op##Uint8: \
+ case kArmWord64AtomicNarrow##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP(ldrexb, strexb, inst); \
+ ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(op##Uint8); \
break; \
case kWord32Atomic##op##Int16: \
ASSEMBLE_ATOMIC_BINOP(ldrexh, strexh, inst); \
__ sxth(i.OutputRegister(0), i.OutputRegister(0)); \
break; \
case kWord32Atomic##op##Uint16: \
+ case kArmWord64AtomicNarrow##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP(ldrexh, strexh, inst); \
+ ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(op##Uint16); \
break; \
case kWord32Atomic##op##Word32: \
+ case kArmWord64AtomicNarrow##op##Uint32: \
ASSEMBLE_ATOMIC_BINOP(ldrex, strex, inst); \
+ ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(op##Uint32); \
break;
ATOMIC_BINOP_CASE(Add, add)
ATOMIC_BINOP_CASE(Sub, sub)
@@ -2705,11 +2769,81 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, orr)
ATOMIC_BINOP_CASE(Xor, eor)
#undef ATOMIC_BINOP_CASE
+ case kArmWord32AtomicPairLoad:
+ __ add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ ldrexd(i.OutputRegister(0), i.OutputRegister(1), i.TempRegister(0));
+ __ dmb(ISH);
+ break;
+ case kArmWord32AtomicPairStore: {
+ Label store;
+ __ add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ dmb(ISH);
+ __ bind(&store);
+ __ ldrexd(i.TempRegister(1), i.TempRegister(2), i.TempRegister(0));
+ __ strexd(i.TempRegister(1), i.InputRegister(2), i.InputRegister(3),
+ i.TempRegister(0));
+ __ teq(i.TempRegister(1), Operand(0));
+ __ b(ne, &store);
+ __ dmb(ISH);
+ break;
+ }
+#define ATOMIC_ARITH_BINOP_CASE(op, instr1, instr2) \
+ case kArmWord32AtomicPair##op: { \
+ ASSEMBLE_ATOMIC64_ARITH_BINOP(instr1, instr2); \
+ break; \
+ }
+ ATOMIC_ARITH_BINOP_CASE(Add, add, adc)
+ ATOMIC_ARITH_BINOP_CASE(Sub, sub, sbc)
+#undef ATOMIC_ARITH_BINOP_CASE
+#define ATOMIC_LOGIC_BINOP_CASE(op, instr) \
+ case kArmWord32AtomicPair##op: { \
+ ASSEMBLE_ATOMIC64_LOGIC_BINOP(instr); \
+ break; \
+ }
+ ATOMIC_LOGIC_BINOP_CASE(And, and_)
+ ATOMIC_LOGIC_BINOP_CASE(Or, orr)
+ ATOMIC_LOGIC_BINOP_CASE(Xor, eor)
+ case kArmWord32AtomicPairExchange: {
+ Label exchange;
+ __ add(i.TempRegister(0), i.InputRegister(2), i.InputRegister(3));
+ __ dmb(ISH);
+ __ bind(&exchange);
+ __ ldrexd(i.OutputRegister(0), i.OutputRegister(1), i.TempRegister(0));
+ __ strexd(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1),
+ i.TempRegister(0));
+ __ teq(i.TempRegister(1), Operand(0));
+ __ b(ne, &exchange);
+ __ dmb(ISH);
+ break;
+ }
+ case kArmWord32AtomicPairCompareExchange: {
+ __ add(i.TempRegister(0), i.InputRegister(4), i.InputRegister(5));
+ Label compareExchange;
+ Label exit;
+ __ dmb(ISH);
+ __ bind(&compareExchange);
+ __ ldrexd(i.OutputRegister(0), i.OutputRegister(1), i.TempRegister(0));
+ __ teq(i.InputRegister(0), Operand(i.OutputRegister(0)));
+ __ b(ne, &exit);
+ __ teq(i.InputRegister(1), Operand(i.OutputRegister(1)));
+ __ b(ne, &exit);
+ __ strexd(i.TempRegister(1), i.InputRegister(2), i.InputRegister(3),
+ i.TempRegister(0));
+ __ teq(i.TempRegister(1), Operand(0));
+ __ b(ne, &compareExchange);
+ __ bind(&exit);
+ __ dmb(ISH);
+ break;
+ }
+#undef ATOMIC_LOGIC_BINOP_CASE
+#undef ATOMIC_NARROW_OP_CLEAR_HIGH_WORD
#undef ASSEMBLE_ATOMIC_LOAD_INTEGER
#undef ASSEMBLE_ATOMIC_STORE_INTEGER
#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER
#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER
#undef ASSEMBLE_ATOMIC_BINOP
+#undef ASSEMBLE_ATOMIC64_ARITH_BINOP
+#undef ASSEMBLE_ATOMIC64_LOGIC_BINOP
#undef ASSEMBLE_IEEE754_BINOP
#undef ASSEMBLE_IEEE754_UNOP
#undef ASSEMBLE_NEON_NARROWING_OP
diff --git a/deps/v8/src/compiler/arm/instruction-codes-arm.h b/deps/v8/src/compiler/arm/instruction-codes-arm.h
index a9f9be38ef..ca8684a375 100644
--- a/deps/v8/src/compiler/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/arm/instruction-codes-arm.h
@@ -11,264 +11,295 @@ namespace compiler {
// ARM-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(ArmAdd) \
- V(ArmAnd) \
- V(ArmBic) \
- V(ArmClz) \
- V(ArmCmp) \
- V(ArmCmn) \
- V(ArmTst) \
- V(ArmTeq) \
- V(ArmOrr) \
- V(ArmEor) \
- V(ArmSub) \
- V(ArmRsb) \
- V(ArmMul) \
- V(ArmMla) \
- V(ArmMls) \
- V(ArmSmull) \
- V(ArmSmmul) \
- V(ArmSmmla) \
- V(ArmUmull) \
- V(ArmSdiv) \
- V(ArmUdiv) \
- V(ArmMov) \
- V(ArmMvn) \
- V(ArmBfc) \
- V(ArmUbfx) \
- V(ArmSbfx) \
- V(ArmSxtb) \
- V(ArmSxth) \
- V(ArmSxtab) \
- V(ArmSxtah) \
- V(ArmUxtb) \
- V(ArmUxth) \
- V(ArmUxtab) \
- V(ArmRbit) \
- V(ArmUxtah) \
- V(ArmAddPair) \
- V(ArmSubPair) \
- V(ArmMulPair) \
- V(ArmLslPair) \
- V(ArmLsrPair) \
- V(ArmAsrPair) \
- V(ArmVcmpF32) \
- V(ArmVaddF32) \
- V(ArmVsubF32) \
- V(ArmVmulF32) \
- V(ArmVmlaF32) \
- V(ArmVmlsF32) \
- V(ArmVdivF32) \
- V(ArmVabsF32) \
- V(ArmVnegF32) \
- V(ArmVsqrtF32) \
- V(ArmVcmpF64) \
- V(ArmVaddF64) \
- V(ArmVsubF64) \
- V(ArmVmulF64) \
- V(ArmVmlaF64) \
- V(ArmVmlsF64) \
- V(ArmVdivF64) \
- V(ArmVmodF64) \
- V(ArmVabsF64) \
- V(ArmVnegF64) \
- V(ArmVsqrtF64) \
- V(ArmVrintmF32) \
- V(ArmVrintmF64) \
- V(ArmVrintpF32) \
- V(ArmVrintpF64) \
- V(ArmVrintzF32) \
- V(ArmVrintzF64) \
- V(ArmVrintaF64) \
- V(ArmVrintnF32) \
- V(ArmVrintnF64) \
- V(ArmVcvtF32F64) \
- V(ArmVcvtF64F32) \
- V(ArmVcvtF32S32) \
- V(ArmVcvtF32U32) \
- V(ArmVcvtF64S32) \
- V(ArmVcvtF64U32) \
- V(ArmVcvtS32F32) \
- V(ArmVcvtU32F32) \
- V(ArmVcvtS32F64) \
- V(ArmVcvtU32F64) \
- V(ArmVmovU32F32) \
- V(ArmVmovF32U32) \
- V(ArmVmovLowU32F64) \
- V(ArmVmovLowF64U32) \
- V(ArmVmovHighU32F64) \
- V(ArmVmovHighF64U32) \
- V(ArmVmovF64U32U32) \
- V(ArmVmovU32U32F64) \
- V(ArmVldrF32) \
- V(ArmVstrF32) \
- V(ArmVldrF64) \
- V(ArmVld1F64) \
- V(ArmVstrF64) \
- V(ArmVst1F64) \
- V(ArmVld1S128) \
- V(ArmVst1S128) \
- V(ArmFloat32Max) \
- V(ArmFloat64Max) \
- V(ArmFloat32Min) \
- V(ArmFloat64Min) \
- V(ArmFloat64SilenceNaN) \
- V(ArmLdrb) \
- V(ArmLdrsb) \
- V(ArmStrb) \
- V(ArmLdrh) \
- V(ArmLdrsh) \
- V(ArmStrh) \
- V(ArmLdr) \
- V(ArmStr) \
- V(ArmPush) \
- V(ArmPoke) \
- V(ArmPeek) \
- V(ArmDsbIsb) \
- V(ArmF32x4Splat) \
- V(ArmF32x4ExtractLane) \
- V(ArmF32x4ReplaceLane) \
- V(ArmF32x4SConvertI32x4) \
- V(ArmF32x4UConvertI32x4) \
- V(ArmF32x4Abs) \
- V(ArmF32x4Neg) \
- V(ArmF32x4RecipApprox) \
- V(ArmF32x4RecipSqrtApprox) \
- V(ArmF32x4Add) \
- V(ArmF32x4AddHoriz) \
- V(ArmF32x4Sub) \
- V(ArmF32x4Mul) \
- V(ArmF32x4Min) \
- V(ArmF32x4Max) \
- V(ArmF32x4Eq) \
- V(ArmF32x4Ne) \
- V(ArmF32x4Lt) \
- V(ArmF32x4Le) \
- V(ArmI32x4Splat) \
- V(ArmI32x4ExtractLane) \
- V(ArmI32x4ReplaceLane) \
- V(ArmI32x4SConvertF32x4) \
- V(ArmI32x4SConvertI16x8Low) \
- V(ArmI32x4SConvertI16x8High) \
- V(ArmI32x4Neg) \
- V(ArmI32x4Shl) \
- V(ArmI32x4ShrS) \
- V(ArmI32x4Add) \
- V(ArmI32x4AddHoriz) \
- V(ArmI32x4Sub) \
- V(ArmI32x4Mul) \
- V(ArmI32x4MinS) \
- V(ArmI32x4MaxS) \
- V(ArmI32x4Eq) \
- V(ArmI32x4Ne) \
- V(ArmI32x4GtS) \
- V(ArmI32x4GeS) \
- V(ArmI32x4UConvertF32x4) \
- V(ArmI32x4UConvertI16x8Low) \
- V(ArmI32x4UConvertI16x8High) \
- V(ArmI32x4ShrU) \
- V(ArmI32x4MinU) \
- V(ArmI32x4MaxU) \
- V(ArmI32x4GtU) \
- V(ArmI32x4GeU) \
- V(ArmI16x8Splat) \
- V(ArmI16x8ExtractLane) \
- V(ArmI16x8ReplaceLane) \
- V(ArmI16x8SConvertI8x16Low) \
- V(ArmI16x8SConvertI8x16High) \
- V(ArmI16x8Neg) \
- V(ArmI16x8Shl) \
- V(ArmI16x8ShrS) \
- V(ArmI16x8SConvertI32x4) \
- V(ArmI16x8Add) \
- V(ArmI16x8AddSaturateS) \
- V(ArmI16x8AddHoriz) \
- V(ArmI16x8Sub) \
- V(ArmI16x8SubSaturateS) \
- V(ArmI16x8Mul) \
- V(ArmI16x8MinS) \
- V(ArmI16x8MaxS) \
- V(ArmI16x8Eq) \
- V(ArmI16x8Ne) \
- V(ArmI16x8GtS) \
- V(ArmI16x8GeS) \
- V(ArmI16x8UConvertI8x16Low) \
- V(ArmI16x8UConvertI8x16High) \
- V(ArmI16x8ShrU) \
- V(ArmI16x8UConvertI32x4) \
- V(ArmI16x8AddSaturateU) \
- V(ArmI16x8SubSaturateU) \
- V(ArmI16x8MinU) \
- V(ArmI16x8MaxU) \
- V(ArmI16x8GtU) \
- V(ArmI16x8GeU) \
- V(ArmI8x16Splat) \
- V(ArmI8x16ExtractLane) \
- V(ArmI8x16ReplaceLane) \
- V(ArmI8x16Neg) \
- V(ArmI8x16Shl) \
- V(ArmI8x16ShrS) \
- V(ArmI8x16SConvertI16x8) \
- V(ArmI8x16Add) \
- V(ArmI8x16AddSaturateS) \
- V(ArmI8x16Sub) \
- V(ArmI8x16SubSaturateS) \
- V(ArmI8x16Mul) \
- V(ArmI8x16MinS) \
- V(ArmI8x16MaxS) \
- V(ArmI8x16Eq) \
- V(ArmI8x16Ne) \
- V(ArmI8x16GtS) \
- V(ArmI8x16GeS) \
- V(ArmI8x16ShrU) \
- V(ArmI8x16UConvertI16x8) \
- V(ArmI8x16AddSaturateU) \
- V(ArmI8x16SubSaturateU) \
- V(ArmI8x16MinU) \
- V(ArmI8x16MaxU) \
- V(ArmI8x16GtU) \
- V(ArmI8x16GeU) \
- V(ArmS128Zero) \
- V(ArmS128Dup) \
- V(ArmS128And) \
- V(ArmS128Or) \
- V(ArmS128Xor) \
- V(ArmS128Not) \
- V(ArmS128Select) \
- V(ArmS32x4ZipLeft) \
- V(ArmS32x4ZipRight) \
- V(ArmS32x4UnzipLeft) \
- V(ArmS32x4UnzipRight) \
- V(ArmS32x4TransposeLeft) \
- V(ArmS32x4TransposeRight) \
- V(ArmS32x4Shuffle) \
- V(ArmS16x8ZipLeft) \
- V(ArmS16x8ZipRight) \
- V(ArmS16x8UnzipLeft) \
- V(ArmS16x8UnzipRight) \
- V(ArmS16x8TransposeLeft) \
- V(ArmS16x8TransposeRight) \
- V(ArmS8x16ZipLeft) \
- V(ArmS8x16ZipRight) \
- V(ArmS8x16UnzipLeft) \
- V(ArmS8x16UnzipRight) \
- V(ArmS8x16TransposeLeft) \
- V(ArmS8x16TransposeRight) \
- V(ArmS8x16Concat) \
- V(ArmS8x16Shuffle) \
- V(ArmS32x2Reverse) \
- V(ArmS16x4Reverse) \
- V(ArmS16x2Reverse) \
- V(ArmS8x8Reverse) \
- V(ArmS8x4Reverse) \
- V(ArmS8x2Reverse) \
- V(ArmS1x4AnyTrue) \
- V(ArmS1x4AllTrue) \
- V(ArmS1x8AnyTrue) \
- V(ArmS1x8AllTrue) \
- V(ArmS1x16AnyTrue) \
- V(ArmS1x16AllTrue)
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(ArmAdd) \
+ V(ArmAnd) \
+ V(ArmBic) \
+ V(ArmClz) \
+ V(ArmCmp) \
+ V(ArmCmn) \
+ V(ArmTst) \
+ V(ArmTeq) \
+ V(ArmOrr) \
+ V(ArmEor) \
+ V(ArmSub) \
+ V(ArmRsb) \
+ V(ArmMul) \
+ V(ArmMla) \
+ V(ArmMls) \
+ V(ArmSmull) \
+ V(ArmSmmul) \
+ V(ArmSmmla) \
+ V(ArmUmull) \
+ V(ArmSdiv) \
+ V(ArmUdiv) \
+ V(ArmMov) \
+ V(ArmMvn) \
+ V(ArmBfc) \
+ V(ArmUbfx) \
+ V(ArmSbfx) \
+ V(ArmSxtb) \
+ V(ArmSxth) \
+ V(ArmSxtab) \
+ V(ArmSxtah) \
+ V(ArmUxtb) \
+ V(ArmUxth) \
+ V(ArmUxtab) \
+ V(ArmRbit) \
+ V(ArmRev) \
+ V(ArmUxtah) \
+ V(ArmAddPair) \
+ V(ArmSubPair) \
+ V(ArmMulPair) \
+ V(ArmLslPair) \
+ V(ArmLsrPair) \
+ V(ArmAsrPair) \
+ V(ArmVcmpF32) \
+ V(ArmVaddF32) \
+ V(ArmVsubF32) \
+ V(ArmVmulF32) \
+ V(ArmVmlaF32) \
+ V(ArmVmlsF32) \
+ V(ArmVdivF32) \
+ V(ArmVabsF32) \
+ V(ArmVnegF32) \
+ V(ArmVsqrtF32) \
+ V(ArmVcmpF64) \
+ V(ArmVaddF64) \
+ V(ArmVsubF64) \
+ V(ArmVmulF64) \
+ V(ArmVmlaF64) \
+ V(ArmVmlsF64) \
+ V(ArmVdivF64) \
+ V(ArmVmodF64) \
+ V(ArmVabsF64) \
+ V(ArmVnegF64) \
+ V(ArmVsqrtF64) \
+ V(ArmVrintmF32) \
+ V(ArmVrintmF64) \
+ V(ArmVrintpF32) \
+ V(ArmVrintpF64) \
+ V(ArmVrintzF32) \
+ V(ArmVrintzF64) \
+ V(ArmVrintaF64) \
+ V(ArmVrintnF32) \
+ V(ArmVrintnF64) \
+ V(ArmVcvtF32F64) \
+ V(ArmVcvtF64F32) \
+ V(ArmVcvtF32S32) \
+ V(ArmVcvtF32U32) \
+ V(ArmVcvtF64S32) \
+ V(ArmVcvtF64U32) \
+ V(ArmVcvtS32F32) \
+ V(ArmVcvtU32F32) \
+ V(ArmVcvtS32F64) \
+ V(ArmVcvtU32F64) \
+ V(ArmVmovU32F32) \
+ V(ArmVmovF32U32) \
+ V(ArmVmovLowU32F64) \
+ V(ArmVmovLowF64U32) \
+ V(ArmVmovHighU32F64) \
+ V(ArmVmovHighF64U32) \
+ V(ArmVmovF64U32U32) \
+ V(ArmVmovU32U32F64) \
+ V(ArmVldrF32) \
+ V(ArmVstrF32) \
+ V(ArmVldrF64) \
+ V(ArmVld1F64) \
+ V(ArmVstrF64) \
+ V(ArmVst1F64) \
+ V(ArmVld1S128) \
+ V(ArmVst1S128) \
+ V(ArmFloat32Max) \
+ V(ArmFloat64Max) \
+ V(ArmFloat32Min) \
+ V(ArmFloat64Min) \
+ V(ArmFloat64SilenceNaN) \
+ V(ArmLdrb) \
+ V(ArmLdrsb) \
+ V(ArmStrb) \
+ V(ArmLdrh) \
+ V(ArmLdrsh) \
+ V(ArmStrh) \
+ V(ArmLdr) \
+ V(ArmStr) \
+ V(ArmPush) \
+ V(ArmPoke) \
+ V(ArmPeek) \
+ V(ArmDsbIsb) \
+ V(ArmF32x4Splat) \
+ V(ArmF32x4ExtractLane) \
+ V(ArmF32x4ReplaceLane) \
+ V(ArmF32x4SConvertI32x4) \
+ V(ArmF32x4UConvertI32x4) \
+ V(ArmF32x4Abs) \
+ V(ArmF32x4Neg) \
+ V(ArmF32x4RecipApprox) \
+ V(ArmF32x4RecipSqrtApprox) \
+ V(ArmF32x4Add) \
+ V(ArmF32x4AddHoriz) \
+ V(ArmF32x4Sub) \
+ V(ArmF32x4Mul) \
+ V(ArmF32x4Min) \
+ V(ArmF32x4Max) \
+ V(ArmF32x4Eq) \
+ V(ArmF32x4Ne) \
+ V(ArmF32x4Lt) \
+ V(ArmF32x4Le) \
+ V(ArmI32x4Splat) \
+ V(ArmI32x4ExtractLane) \
+ V(ArmI32x4ReplaceLane) \
+ V(ArmI32x4SConvertF32x4) \
+ V(ArmI32x4SConvertI16x8Low) \
+ V(ArmI32x4SConvertI16x8High) \
+ V(ArmI32x4Neg) \
+ V(ArmI32x4Shl) \
+ V(ArmI32x4ShrS) \
+ V(ArmI32x4Add) \
+ V(ArmI32x4AddHoriz) \
+ V(ArmI32x4Sub) \
+ V(ArmI32x4Mul) \
+ V(ArmI32x4MinS) \
+ V(ArmI32x4MaxS) \
+ V(ArmI32x4Eq) \
+ V(ArmI32x4Ne) \
+ V(ArmI32x4GtS) \
+ V(ArmI32x4GeS) \
+ V(ArmI32x4UConvertF32x4) \
+ V(ArmI32x4UConvertI16x8Low) \
+ V(ArmI32x4UConvertI16x8High) \
+ V(ArmI32x4ShrU) \
+ V(ArmI32x4MinU) \
+ V(ArmI32x4MaxU) \
+ V(ArmI32x4GtU) \
+ V(ArmI32x4GeU) \
+ V(ArmI16x8Splat) \
+ V(ArmI16x8ExtractLane) \
+ V(ArmI16x8ReplaceLane) \
+ V(ArmI16x8SConvertI8x16Low) \
+ V(ArmI16x8SConvertI8x16High) \
+ V(ArmI16x8Neg) \
+ V(ArmI16x8Shl) \
+ V(ArmI16x8ShrS) \
+ V(ArmI16x8SConvertI32x4) \
+ V(ArmI16x8Add) \
+ V(ArmI16x8AddSaturateS) \
+ V(ArmI16x8AddHoriz) \
+ V(ArmI16x8Sub) \
+ V(ArmI16x8SubSaturateS) \
+ V(ArmI16x8Mul) \
+ V(ArmI16x8MinS) \
+ V(ArmI16x8MaxS) \
+ V(ArmI16x8Eq) \
+ V(ArmI16x8Ne) \
+ V(ArmI16x8GtS) \
+ V(ArmI16x8GeS) \
+ V(ArmI16x8UConvertI8x16Low) \
+ V(ArmI16x8UConvertI8x16High) \
+ V(ArmI16x8ShrU) \
+ V(ArmI16x8UConvertI32x4) \
+ V(ArmI16x8AddSaturateU) \
+ V(ArmI16x8SubSaturateU) \
+ V(ArmI16x8MinU) \
+ V(ArmI16x8MaxU) \
+ V(ArmI16x8GtU) \
+ V(ArmI16x8GeU) \
+ V(ArmI8x16Splat) \
+ V(ArmI8x16ExtractLane) \
+ V(ArmI8x16ReplaceLane) \
+ V(ArmI8x16Neg) \
+ V(ArmI8x16Shl) \
+ V(ArmI8x16ShrS) \
+ V(ArmI8x16SConvertI16x8) \
+ V(ArmI8x16Add) \
+ V(ArmI8x16AddSaturateS) \
+ V(ArmI8x16Sub) \
+ V(ArmI8x16SubSaturateS) \
+ V(ArmI8x16Mul) \
+ V(ArmI8x16MinS) \
+ V(ArmI8x16MaxS) \
+ V(ArmI8x16Eq) \
+ V(ArmI8x16Ne) \
+ V(ArmI8x16GtS) \
+ V(ArmI8x16GeS) \
+ V(ArmI8x16ShrU) \
+ V(ArmI8x16UConvertI16x8) \
+ V(ArmI8x16AddSaturateU) \
+ V(ArmI8x16SubSaturateU) \
+ V(ArmI8x16MinU) \
+ V(ArmI8x16MaxU) \
+ V(ArmI8x16GtU) \
+ V(ArmI8x16GeU) \
+ V(ArmS128Zero) \
+ V(ArmS128Dup) \
+ V(ArmS128And) \
+ V(ArmS128Or) \
+ V(ArmS128Xor) \
+ V(ArmS128Not) \
+ V(ArmS128Select) \
+ V(ArmS32x4ZipLeft) \
+ V(ArmS32x4ZipRight) \
+ V(ArmS32x4UnzipLeft) \
+ V(ArmS32x4UnzipRight) \
+ V(ArmS32x4TransposeLeft) \
+ V(ArmS32x4TransposeRight) \
+ V(ArmS32x4Shuffle) \
+ V(ArmS16x8ZipLeft) \
+ V(ArmS16x8ZipRight) \
+ V(ArmS16x8UnzipLeft) \
+ V(ArmS16x8UnzipRight) \
+ V(ArmS16x8TransposeLeft) \
+ V(ArmS16x8TransposeRight) \
+ V(ArmS8x16ZipLeft) \
+ V(ArmS8x16ZipRight) \
+ V(ArmS8x16UnzipLeft) \
+ V(ArmS8x16UnzipRight) \
+ V(ArmS8x16TransposeLeft) \
+ V(ArmS8x16TransposeRight) \
+ V(ArmS8x16Concat) \
+ V(ArmS8x16Shuffle) \
+ V(ArmS32x2Reverse) \
+ V(ArmS16x4Reverse) \
+ V(ArmS16x2Reverse) \
+ V(ArmS8x8Reverse) \
+ V(ArmS8x4Reverse) \
+ V(ArmS8x2Reverse) \
+ V(ArmS1x4AnyTrue) \
+ V(ArmS1x4AllTrue) \
+ V(ArmS1x8AnyTrue) \
+ V(ArmS1x8AllTrue) \
+ V(ArmS1x16AnyTrue) \
+ V(ArmS1x16AllTrue) \
+ V(ArmWord32AtomicPairLoad) \
+ V(ArmWord32AtomicPairStore) \
+ V(ArmWord32AtomicPairAdd) \
+ V(ArmWord32AtomicPairSub) \
+ V(ArmWord32AtomicPairAnd) \
+ V(ArmWord32AtomicPairOr) \
+ V(ArmWord32AtomicPairXor) \
+ V(ArmWord32AtomicPairExchange) \
+ V(ArmWord32AtomicPairCompareExchange) \
+ V(ArmWord64AtomicNarrowAddUint8) \
+ V(ArmWord64AtomicNarrowAddUint16) \
+ V(ArmWord64AtomicNarrowAddUint32) \
+ V(ArmWord64AtomicNarrowSubUint8) \
+ V(ArmWord64AtomicNarrowSubUint16) \
+ V(ArmWord64AtomicNarrowSubUint32) \
+ V(ArmWord64AtomicNarrowAndUint8) \
+ V(ArmWord64AtomicNarrowAndUint16) \
+ V(ArmWord64AtomicNarrowAndUint32) \
+ V(ArmWord64AtomicNarrowOrUint8) \
+ V(ArmWord64AtomicNarrowOrUint16) \
+ V(ArmWord64AtomicNarrowOrUint32) \
+ V(ArmWord64AtomicNarrowXorUint8) \
+ V(ArmWord64AtomicNarrowXorUint16) \
+ V(ArmWord64AtomicNarrowXorUint32) \
+ V(ArmWord64AtomicNarrowExchangeUint8) \
+ V(ArmWord64AtomicNarrowExchangeUint16) \
+ V(ArmWord64AtomicNarrowExchangeUint32) \
+ V(ArmWord64AtomicNarrowCompareExchangeUint8) \
+ V(ArmWord64AtomicNarrowCompareExchangeUint16) \
+ V(ArmWord64AtomicNarrowCompareExchangeUint32)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
index e538020f69..56ff02689a 100644
--- a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
@@ -49,6 +49,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmUxtab:
case kArmUxtah:
case kArmRbit:
+ case kArmRev:
case kArmAddPair:
case kArmSubPair:
case kArmMulPair:
@@ -263,6 +264,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmLdrsh:
case kArmLdr:
case kArmPeek:
+ case kArmWord32AtomicPairLoad:
return kIsLoadOperation;
case kArmVstrF32:
@@ -275,6 +277,35 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmPush:
case kArmPoke:
case kArmDsbIsb:
+ case kArmWord32AtomicPairStore:
+ case kArmWord32AtomicPairAdd:
+ case kArmWord32AtomicPairSub:
+ case kArmWord32AtomicPairAnd:
+ case kArmWord32AtomicPairOr:
+ case kArmWord32AtomicPairXor:
+ case kArmWord32AtomicPairExchange:
+ case kArmWord32AtomicPairCompareExchange:
+ case kArmWord64AtomicNarrowAddUint8:
+ case kArmWord64AtomicNarrowAddUint16:
+ case kArmWord64AtomicNarrowAddUint32:
+ case kArmWord64AtomicNarrowSubUint8:
+ case kArmWord64AtomicNarrowSubUint16:
+ case kArmWord64AtomicNarrowSubUint32:
+ case kArmWord64AtomicNarrowAndUint8:
+ case kArmWord64AtomicNarrowAndUint16:
+ case kArmWord64AtomicNarrowAndUint32:
+ case kArmWord64AtomicNarrowOrUint8:
+ case kArmWord64AtomicNarrowOrUint16:
+ case kArmWord64AtomicNarrowOrUint32:
+ case kArmWord64AtomicNarrowXorUint8:
+ case kArmWord64AtomicNarrowXorUint16:
+ case kArmWord64AtomicNarrowXorUint32:
+ case kArmWord64AtomicNarrowExchangeUint8:
+ case kArmWord64AtomicNarrowExchangeUint16:
+ case kArmWord64AtomicNarrowExchangeUint32:
+ case kArmWord64AtomicNarrowCompareExchangeUint8:
+ case kArmWord64AtomicNarrowCompareExchangeUint16:
+ case kArmWord64AtomicNarrowCompareExchangeUint32:
return kHasSideEffect;
#define CASE(Name) case k##Name:
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
index 8fc5779112..277d9779c0 100644
--- a/deps/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -403,6 +403,46 @@ void EmitStore(InstructionSelector* selector, InstructionCode opcode,
selector->Emit(opcode, 0, nullptr, input_count, inputs);
}
+void VisitPairAtomicBinOp(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ ArmOperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ Node* value_high = node->InputAt(3);
+ AddressingMode addressing_mode = kMode_Offset_RR;
+ InstructionOperand inputs[] = {g.UseUniqueRegister(value),
+ g.UseUniqueRegister(value_high),
+ g.UseRegister(base), g.UseRegister(index)};
+ InstructionOperand outputs[] = {
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 0), r2),
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 1), r3)};
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(r6),
+ g.TempRegister(r7), g.TempRegister()};
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
+}
+
+void VisitNarrowAtomicBinOp(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ ArmOperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ AddressingMode addressing_mode = kMode_Offset_RR;
+ InstructionOperand inputs[3] = {g.UseRegister(base), g.UseRegister(index),
+ g.UseUniqueRegister(value)};
+ InstructionOperand outputs[] = {
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 0), r4),
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 1), r5)};
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
+ g.TempRegister()};
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
+}
+
} // namespace
void InstructionSelector::VisitStackSlot(Node* node) {
@@ -1100,7 +1140,9 @@ void InstructionSelector::VisitWord32ReverseBits(Node* node) {
void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
+ VisitRR(this, kArmRev, node);
+}
void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
@@ -2100,7 +2142,7 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
@@ -2136,7 +2178,7 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
Node* old_value = node->InputAt(2);
Node* new_value = node->InputAt(3);
ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
@@ -2175,7 +2217,7 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = int8_op;
} else if (type == MachineType::Uint8()) {
@@ -2219,6 +2261,190 @@ VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
+void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
+ ArmOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index)};
+ InstructionOperand outputs[] = {
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 0), r0),
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 1), r1)};
+ InstructionOperand temps[] = {g.TempRegister()};
+ AddressingMode addressing_mode = kMode_Offset_RR;
+ InstructionCode code =
+ kArmWord32AtomicPairLoad | AddressingModeField::encode(addressing_mode);
+ Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
+}
+
+void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
+ ArmOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value_low = node->InputAt(2);
+ Node* value_high = node->InputAt(3);
+ AddressingMode addressing_mode = kMode_Offset_RR;
+ InstructionOperand inputs[] = {
+ g.UseUniqueRegister(base), g.UseUniqueRegister(index),
+ g.UseFixed(value_low, r2), g.UseFixed(value_high, r3)};
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(r0),
+ g.TempRegister(r1)};
+ InstructionCode code =
+ kArmWord32AtomicPairStore | AddressingModeField::encode(addressing_mode);
+ Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps), temps);
+}
+
+void InstructionSelector::VisitWord32AtomicPairAdd(Node* node) {
+ VisitPairAtomicBinOp(this, node, kArmWord32AtomicPairAdd);
+}
+
+void InstructionSelector::VisitWord32AtomicPairSub(Node* node) {
+ VisitPairAtomicBinOp(this, node, kArmWord32AtomicPairSub);
+}
+
+void InstructionSelector::VisitWord32AtomicPairAnd(Node* node) {
+ VisitPairAtomicBinOp(this, node, kArmWord32AtomicPairAnd);
+}
+
+void InstructionSelector::VisitWord32AtomicPairOr(Node* node) {
+ VisitPairAtomicBinOp(this, node, kArmWord32AtomicPairOr);
+}
+
+void InstructionSelector::VisitWord32AtomicPairXor(Node* node) {
+ VisitPairAtomicBinOp(this, node, kArmWord32AtomicPairXor);
+}
+
+void InstructionSelector::VisitWord64AtomicNarrowBinop(Node* node,
+ ArchOpcode uint8_op,
+ ArchOpcode uint16_op,
+ ArchOpcode uint32_op) {
+ MachineType type = AtomicOpType(node->op());
+ DCHECK(type != MachineType::Uint64());
+ ArchOpcode opcode = kArchNop;
+ if (type == MachineType::Uint32()) {
+ opcode = uint32_op;
+ } else if (type == MachineType::Uint16()) {
+ opcode = uint16_op;
+ } else if (type == MachineType::Uint8()) {
+ opcode = uint8_op;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+ VisitNarrowAtomicBinOp(this, node, opcode);
+}
+
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord64AtomicNarrow##op(Node* node) { \
+ VisitWord64AtomicNarrowBinop(node, kArmWord64AtomicNarrow##op##Uint8, \
+ kArmWord64AtomicNarrow##op##Uint16, \
+ kArmWord64AtomicNarrow##op##Uint32); \
+ }
+VISIT_ATOMIC_BINOP(Add)
+VISIT_ATOMIC_BINOP(Sub)
+VISIT_ATOMIC_BINOP(And)
+VISIT_ATOMIC_BINOP(Or)
+VISIT_ATOMIC_BINOP(Xor)
+#undef VISIT_ATOMIC_BINOP
+
+void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) {
+ ArmOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ Node* value_high = node->InputAt(3);
+ AddressingMode addressing_mode = kMode_Offset_RR;
+ InstructionOperand inputs[] = {g.UseFixed(value, r0),
+ g.UseFixed(value_high, r1),
+ g.UseRegister(base), g.UseRegister(index)};
+ InstructionOperand outputs[] = {
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 0), r6),
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 1), r7)};
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ InstructionCode code = kArmWord32AtomicPairExchange |
+ AddressingModeField::encode(addressing_mode);
+ Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
+}
+
+void InstructionSelector::VisitWord64AtomicNarrowExchange(Node* node) {
+ ArmOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ ArchOpcode opcode = kArchNop;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Uint8()) {
+ opcode = kArmWord64AtomicNarrowExchangeUint8;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kArmWord64AtomicNarrowExchangeUint16;
+ } else if (type == MachineType::Uint32()) {
+ opcode = kArmWord64AtomicNarrowExchangeUint32;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+ AddressingMode addressing_mode = kMode_Offset_RR;
+ InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
+ g.UseUniqueRegister(value)};
+ InstructionOperand outputs[] = {
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 0)),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
+}
+
+void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
+ ArmOperandGenerator g(this);
+ AddressingMode addressing_mode = kMode_Offset_RR;
+ InstructionOperand inputs[] = {
+ g.UseFixed(node->InputAt(2), r4), g.UseFixed(node->InputAt(3), r5),
+ g.UseFixed(node->InputAt(4), r8), g.UseFixed(node->InputAt(5), r9),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))};
+ InstructionOperand outputs[] = {
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 0), r2),
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 1), r3)};
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ InstructionCode code = kArmWord32AtomicPairCompareExchange |
+ AddressingModeField::encode(addressing_mode);
+ Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
+}
+
+void InstructionSelector::VisitWord64AtomicNarrowCompareExchange(Node* node) {
+ ArmOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* old_value = node->InputAt(2);
+ Node* new_value = node->InputAt(3);
+ ArchOpcode opcode = kArchNop;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Uint8()) {
+ opcode = kArmWord64AtomicNarrowCompareExchangeUint8;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kArmWord64AtomicNarrowCompareExchangeUint16;
+ } else if (type == MachineType::Uint32()) {
+ opcode = kArmWord64AtomicNarrowCompareExchangeUint32;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+ AddressingMode addressing_mode = kMode_Offset_RR;
+ InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
+ g.UseUniqueRegister(old_value),
+ g.UseUniqueRegister(new_value)};
+ InstructionOperand outputs[] = {
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 0)),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
+ g.TempRegister()};
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
+}
+
#define SIMD_TYPE_LIST(V) \
V(F32x4) \
V(I32x4) \
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
index a7c5beee4c..867c3687a1 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -13,6 +13,7 @@
#include "src/frame-constants.h"
#include "src/heap/heap-inl.h"
#include "src/optimized-compilation-info.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -1266,6 +1267,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Rbit32:
__ Rbit(i.OutputRegister32(), i.InputRegister32(0));
break;
+ case kArm64Rev:
+ __ Rev(i.OutputRegister64(), i.InputRegister64(0));
+ break;
+ case kArm64Rev32:
+ __ Rev(i.OutputRegister32(), i.InputRegister32(0));
+ break;
case kArm64Cmp:
__ Cmp(i.InputOrZeroRegister64(0), i.InputOperand2_64(1));
break;
@@ -1346,12 +1353,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputDoubleRegister(1));
break;
case kArm64Float64Mod: {
- // TODO(dcarney): implement directly. See note in lithium-codegen-arm64.cc
+ // TODO(turbofan): implement directly.
FrameScope scope(tasm(), StackFrame::MANUAL);
DCHECK(d0.is(i.InputDoubleRegister(0)));
DCHECK(d1.is(i.InputDoubleRegister(1)));
DCHECK(d0.is(i.OutputDoubleRegister()));
- // TODO(dcarney): make sure this saves all relevant registers.
+ // TODO(turbofan): make sure this saves all relevant registers.
__ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
break;
}
@@ -1414,35 +1421,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Float32ToInt64:
__ Fcvtzs(i.OutputRegister64(), i.InputFloat32Register(0));
if (i.OutputCount() > 1) {
- __ Mov(i.OutputRegister(1), 1);
- Label done;
- __ Cmp(i.OutputRegister(0), 1);
- __ Ccmp(i.OutputRegister(0), -1, VFlag, vc);
- __ Fccmp(i.InputFloat32Register(0), i.InputFloat32Register(0), VFlag,
- vc);
- __ B(vc, &done);
+ // Check for inputs below INT64_MIN and NaN.
__ Fcmp(i.InputFloat32Register(0), static_cast<float>(INT64_MIN));
- __ Cset(i.OutputRegister(1), eq);
- __ Bind(&done);
+ // Check overflow.
+ // -1 value is used to indicate a possible overflow which will occur
+ // when subtracting (-1) from the provided INT64_MAX operand.
+ // OutputRegister(1) is set to 0 if the input was out of range or NaN.
+ __ Ccmp(i.OutputRegister(0), -1, VFlag, ge);
+ __ Cset(i.OutputRegister(1), vc);
}
break;
case kArm64Float64ToInt64:
__ Fcvtzs(i.OutputRegister(0), i.InputDoubleRegister(0));
if (i.OutputCount() > 1) {
- __ Mov(i.OutputRegister(1), 1);
- Label done;
- __ Cmp(i.OutputRegister(0), 1);
- __ Ccmp(i.OutputRegister(0), -1, VFlag, vc);
- __ Fccmp(i.InputDoubleRegister(0), i.InputDoubleRegister(0), VFlag, vc);
- __ B(vc, &done);
+ // See kArm64Float32ToInt64 for a detailed description.
__ Fcmp(i.InputDoubleRegister(0), static_cast<double>(INT64_MIN));
- __ Cset(i.OutputRegister(1), eq);
- __ Bind(&done);
+ __ Ccmp(i.OutputRegister(0), -1, VFlag, ge);
+ __ Cset(i.OutputRegister(1), vc);
}
break;
case kArm64Float32ToUint64:
__ Fcvtzu(i.OutputRegister64(), i.InputFloat32Register(0));
if (i.OutputCount() > 1) {
+ // See kArm64Float32ToInt64 for a detailed description.
__ Fcmp(i.InputFloat32Register(0), -1.0);
__ Ccmp(i.OutputRegister(0), -1, ZFlag, gt);
__ Cset(i.OutputRegister(1), ne);
@@ -1451,6 +1452,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Float64ToUint64:
__ Fcvtzu(i.OutputRegister64(), i.InputDoubleRegister(0));
if (i.OutputCount() > 1) {
+ // See kArm64Float32ToInt64 for a detailed description.
__ Fcmp(i.InputDoubleRegister(0), -1.0);
__ Ccmp(i.OutputRegister(0), -1, ZFlag, gt);
__ Cset(i.OutputRegister(1), ne);
@@ -2580,10 +2582,8 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (IsMaterializableFromRoot(src_object, &index)) {
__ LoadRoot(dst, index);
} else {
- __ Move(dst, src_object);
+ __ Mov(dst, src_object);
}
- } else if (src.type() == Constant::kExternalReference) {
- __ Mov(dst, src.ToExternalReference());
} else {
__ Mov(dst, g.ToImmediate(source));
}
diff --git a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
index ce73515321..7b119c8fe7 100644
--- a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
@@ -77,6 +77,8 @@ namespace compiler {
V(Arm64Bfi) \
V(Arm64Rbit) \
V(Arm64Rbit32) \
+ V(Arm64Rev) \
+ V(Arm64Rev32) \
V(Arm64TestAndBranch32) \
V(Arm64TestAndBranch) \
V(Arm64CompareAndBranch32) \
diff --git a/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
index 4ea251c590..d443bd7641 100644
--- a/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
@@ -79,6 +79,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Bfi:
case kArm64Rbit:
case kArm64Rbit32:
+ case kArm64Rev:
+ case kArm64Rev32:
case kArm64Float32Cmp:
case kArm64Float32Add:
case kArm64Float32Sub:
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
index e07debf9ec..b2e8b4b205 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -1189,6 +1189,8 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
V(Word32Clz, kArm64Clz32) \
V(Word32ReverseBits, kArm64Rbit32) \
V(Word64ReverseBits, kArm64Rbit) \
+ V(Word32ReverseBytes, kArm64Rev32) \
+ V(Word64ReverseBytes, kArm64Rev) \
V(ChangeFloat32ToFloat64, kArm64Float32ToFloat64) \
V(RoundInt32ToFloat32, kArm64Int32ToFloat32) \
V(RoundUint32ToFloat32, kArm64Uint32ToFloat32) \
@@ -1272,10 +1274,6 @@ void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
-
-void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
-
void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
@@ -2082,23 +2080,42 @@ void VisitWord64Test(InstructionSelector* selector, Node* node,
VisitWordTest(selector, node, kArm64Tst, cont);
}
-template <typename Matcher, ArchOpcode kOpcode>
-bool TryEmitTestAndBranch(InstructionSelector* selector, Node* node,
- FlagsContinuation* cont) {
- Arm64OperandGenerator g(selector);
- Matcher m(node);
- if (cont->IsBranch() && !cont->IsPoisoned() && m.right().HasValue() &&
- base::bits::IsPowerOfTwo(m.right().Value())) {
- // If the mask has only one bit set, we can use tbz/tbnz.
- DCHECK((cont->condition() == kEqual) || (cont->condition() == kNotEqual));
- selector->EmitWithContinuation(
- kOpcode, g.UseRegister(m.left().node()),
- g.TempImmediate(base::bits::CountTrailingZeros(m.right().Value())),
- cont);
- return true;
+template <typename Matcher>
+struct TestAndBranchMatcher {
+ TestAndBranchMatcher(Node* node, FlagsContinuation* cont)
+ : matches_(false), cont_(cont), matcher_(node) {
+ Initialize();
+ }
+ bool Matches() const { return matches_; }
+
+ unsigned bit() const {
+ DCHECK(Matches());
+ return base::bits::CountTrailingZeros(matcher_.right().Value());
}
- return false;
-}
+
+ Node* input() const {
+ DCHECK(Matches());
+ return matcher_.left().node();
+ }
+
+ private:
+ bool matches_;
+ FlagsContinuation* cont_;
+ Matcher matcher_;
+
+ void Initialize() {
+ if (cont_->IsBranch() && !cont_->IsPoisoned() &&
+ matcher_.right().HasValue() &&
+ base::bits::IsPowerOfTwo(matcher_.right().Value())) {
+ // If the mask has only one bit set, we can use tbz/tbnz.
+ DCHECK((cont_->condition() == kEqual) ||
+ (cont_->condition() == kNotEqual));
+ matches_ = true;
+ } else {
+ matches_ = false;
+ }
+ }
+};
// Shared routine for multiple float32 compare operations.
void VisitFloat32Compare(InstructionSelector* selector, Node* node,
@@ -2228,6 +2245,58 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
cont->Negate();
}
+ // Try to match bit checks to create TBZ/TBNZ instructions.
+ // Unlike the switch below, CanCover check is not needed here.
+ // If there are several uses of the given operation, we will generate a TBZ
+ // instruction for each. This is useful even if there are other uses of the
+ // arithmetic result, because it moves dependencies further back.
+ switch (value->opcode()) {
+ case IrOpcode::kWord64Equal: {
+ Int64BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ Node* const left = m.left().node();
+ if (left->opcode() == IrOpcode::kWord64And) {
+ // Attempt to merge the Word64Equal(Word64And(x, y), 0) comparison
+ // into a tbz/tbnz instruction.
+ TestAndBranchMatcher<Uint64BinopMatcher> tbm(left, cont);
+ if (tbm.Matches()) {
+ Arm64OperandGenerator gen(this);
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ this->EmitWithContinuation(kArm64TestAndBranch,
+ gen.UseRegister(tbm.input()),
+ gen.TempImmediate(tbm.bit()), cont);
+ return;
+ }
+ }
+ }
+ break;
+ }
+ case IrOpcode::kWord32And: {
+ TestAndBranchMatcher<Uint32BinopMatcher> tbm(value, cont);
+ if (tbm.Matches()) {
+ Arm64OperandGenerator gen(this);
+ this->EmitWithContinuation(kArm64TestAndBranch32,
+ gen.UseRegister(tbm.input()),
+ gen.TempImmediate(tbm.bit()), cont);
+ return;
+ }
+ break;
+ }
+ case IrOpcode::kWord64And: {
+ TestAndBranchMatcher<Uint64BinopMatcher> tbm(value, cont);
+ if (tbm.Matches()) {
+ Arm64OperandGenerator gen(this);
+ this->EmitWithContinuation(kArm64TestAndBranch,
+ gen.UseRegister(tbm.input()),
+ gen.TempImmediate(tbm.bit()), cont);
+ return;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kWord32Equal:
@@ -2251,12 +2320,6 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
if (m.right().Is(0)) {
Node* const left = m.left().node();
if (CanCover(value, left) && left->opcode() == IrOpcode::kWord64And) {
- // Attempt to merge the Word64Equal(Word64And(x, y), 0) comparison
- // into a tbz/tbnz instruction.
- if (TryEmitTestAndBranch<Uint64BinopMatcher, kArm64TestAndBranch>(
- this, left, cont)) {
- return;
- }
return VisitWordCompare(this, left, kArm64Tst, cont, true,
kLogical64Imm);
}
@@ -2353,17 +2416,9 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
case IrOpcode::kInt32Sub:
return VisitWord32Compare(this, value, cont);
case IrOpcode::kWord32And:
- if (TryEmitTestAndBranch<Uint32BinopMatcher, kArm64TestAndBranch32>(
- this, value, cont)) {
- return;
- }
return VisitWordCompare(this, value, kArm64Tst32, cont, true,
kLogical32Imm);
case IrOpcode::kWord64And:
- if (TryEmitTestAndBranch<Uint64BinopMatcher, kArm64TestAndBranch>(
- this, value, cont)) {
- return;
- }
return VisitWordCompare(this, value, kArm64Tst, cont, true,
kLogical64Imm);
default:
@@ -2742,7 +2797,7 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
@@ -2762,7 +2817,7 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
opcode = kArm64Word64AtomicExchangeUint8;
} else if (type == MachineType::Uint16()) {
@@ -2780,7 +2835,7 @@ void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
@@ -2800,7 +2855,7 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
opcode = kArm64Word64AtomicCompareExchangeUint8;
} else if (type == MachineType::Uint16()) {
@@ -2820,7 +2875,7 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
ArchOpcode uint16_op, ArchOpcode word32_op) {
ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = int8_op;
} else if (type == MachineType::Uint8()) {
@@ -2856,7 +2911,7 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation(
Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
ArchOpcode uint64_op) {
ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
opcode = uint8_op;
} else if (type == MachineType::Uint16()) {
diff --git a/deps/v8/src/compiler/basic-block-instrumentor.cc b/deps/v8/src/compiler/basic-block-instrumentor.cc
index ebec8161ba..77f88502c3 100644
--- a/deps/v8/src/compiler/basic-block-instrumentor.cc
+++ b/deps/v8/src/compiler/basic-block-instrumentor.cc
@@ -40,27 +40,23 @@ static NodeVector::iterator FindInsertionPoint(BasicBlock* block) {
// TODO(dcarney): need to mark code as non-serializable.
static const Operator* PointerConstant(CommonOperatorBuilder* common,
- void* ptr) {
- return kPointerSize == 8
- ? common->Int64Constant(reinterpret_cast<intptr_t>(ptr))
- : common->Int32Constant(
- static_cast<int32_t>(reinterpret_cast<intptr_t>(ptr)));
+ intptr_t ptr) {
+ return kPointerSize == 8 ? common->Int64Constant(ptr)
+ : common->Int32Constant(static_cast<int32_t>(ptr));
}
BasicBlockProfiler::Data* BasicBlockInstrumentor::Instrument(
OptimizedCompilationInfo* info, Graph* graph, Schedule* schedule,
Isolate* isolate) {
+ // Basic block profiling disables concurrent compilation, so handle deref is
+ // fine.
+ AllowHandleDereference allow_handle_dereference;
// Skip the exit block in profiles, since the register allocator can't handle
// it and entry into it means falling off the end of the function anyway.
size_t n_blocks = static_cast<size_t>(schedule->RpoBlockCount()) - 1;
- BasicBlockProfiler::Data* data =
- isolate->GetOrCreateBasicBlockProfiler()->NewData(n_blocks);
+ BasicBlockProfiler::Data* data = BasicBlockProfiler::Get()->NewData(n_blocks);
// Set the function name.
- if (info->has_shared_info()) {
- std::ostringstream os;
- info->shared_info()->Name()->PrintUC16(os);
- data->SetFunctionName(&os);
- }
+ data->SetFunctionName(info->GetDebugName());
// Capture the schedule string before instrumentation.
{
std::ostringstream os;
@@ -77,7 +73,7 @@ BasicBlockProfiler::Data* BasicBlockInstrumentor::Instrument(
for (BasicBlockVector::iterator it = blocks->begin(); block_number < n_blocks;
++it, ++block_number) {
BasicBlock* block = (*it);
- data->SetBlockId(block_number, block->id().ToSize());
+ data->SetBlockRpoNumber(block_number, block->rpo_number());
// TODO(dcarney): wire effect and control deps for load and store.
// Construct increment operation.
Node* base = graph->NewNode(
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index 2d16ba525c..bd8b551f4f 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -14,6 +14,8 @@
#include "src/compiler/simplified-operator.h"
#include "src/interpreter/bytecodes.h"
#include "src/objects-inl.h"
+#include "src/objects/js-array-inl.h"
+#include "src/objects/js-generator.h"
#include "src/objects/literal-objects-inl.h"
#include "src/vector-slot-pair.h"
@@ -1624,6 +1626,18 @@ void BytecodeGraphBuilder::VisitCreateEmptyObjectLiteral() {
environment()->BindAccumulator(literal);
}
+void BytecodeGraphBuilder::VisitCloneObject() {
+ PrepareEagerCheckpoint();
+ Node* source =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ int flags = bytecode_iterator().GetFlagOperand(1);
+ int slot = bytecode_iterator().GetIndexOperand(2);
+ const Operator* op =
+ javascript()->CloneObject(CreateVectorSlotPair(slot), flags);
+ Node* value = NewNode(op, source);
+ environment()->BindAccumulator(value, Environment::kAttachFrameState);
+}
+
void BytecodeGraphBuilder::VisitGetTemplateObject() {
Handle<TemplateObjectDescription> description(
TemplateObjectDescription::cast(
@@ -2439,19 +2453,15 @@ void BytecodeGraphBuilder::VisitTestReferenceEqual() {
environment()->BindAccumulator(result);
}
-void BytecodeGraphBuilder::BuildTestingOp(const Operator* op) {
+void BytecodeGraphBuilder::VisitTestIn() {
PrepareEagerCheckpoint();
- Node* left =
+ Node* object = environment()->LookupAccumulator();
+ Node* key =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- Node* right = environment()->LookupAccumulator();
- Node* node = NewNode(op, left, right);
+ Node* node = NewNode(javascript()->HasProperty(), object, key);
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::VisitTestIn() {
- BuildTestingOp(javascript()->HasProperty());
-}
-
void BytecodeGraphBuilder::VisitTestInstanceOf() {
int const slot_index = bytecode_iterator().GetIndexOperand(1);
BuildCompareOp(javascript()->InstanceOf(CreateVectorSlotPair(slot_index)));
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index a94a3d79af..57127142de 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -93,6 +93,12 @@ class BytecodeGraphBuilder {
return MakeNode(op, arraysize(buffer), buffer, false);
}
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+ Node* n5, Node* n6) {
+ Node* buffer[] = {n1, n2, n3, n4, n5, n6};
+ return MakeNode(op, arraysize(buffer), buffer, false);
+ }
+
// Helpers to create new control nodes.
Node* NewIfTrue() { return NewNode(common()->IfTrue()); }
Node* NewIfFalse() { return NewNode(common()->IfFalse()); }
@@ -178,7 +184,6 @@ class BytecodeGraphBuilder {
void BuildBinaryOp(const Operator* op);
void BuildBinaryOpWithImmediate(const Operator* op);
void BuildCompareOp(const Operator* op);
- void BuildTestingOp(const Operator* op);
void BuildDelete(LanguageMode language_mode);
void BuildCastOperator(const Operator* op);
void BuildHoleCheckAndThrow(Node* condition, Runtime::FunctionId runtime_id,
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index 0b77d10072..4f400846d4 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -1378,12 +1378,64 @@ void CodeAssembler::GotoIfNot(SloppyTNode<IntegralT> condition,
void CodeAssembler::Branch(SloppyTNode<IntegralT> condition, Label* true_label,
Label* false_label) {
+ int32_t constant;
+ if (ToInt32Constant(condition, constant)) {
+ if ((true_label->is_used() || true_label->is_bound()) &&
+ (false_label->is_used() || false_label->is_bound())) {
+ return Goto(constant ? true_label : false_label);
+ }
+ }
true_label->MergeVariables();
false_label->MergeVariables();
return raw_assembler()->Branch(condition, true_label->label_,
false_label->label_);
}
+void CodeAssembler::Branch(TNode<BoolT> condition,
+ std::function<void()> true_body,
+ std::function<void()> false_body) {
+ int32_t constant;
+ if (ToInt32Constant(condition, constant)) {
+ return constant ? true_body() : false_body();
+ }
+
+ Label vtrue(this), vfalse(this);
+ Branch(condition, &vtrue, &vfalse);
+
+ Bind(&vtrue);
+ true_body();
+
+ Bind(&vfalse);
+ false_body();
+}
+
+void CodeAssembler::Branch(TNode<BoolT> condition, Label* true_label,
+ std::function<void()> false_body) {
+ int32_t constant;
+ if (ToInt32Constant(condition, constant)) {
+ return constant ? Goto(true_label) : false_body();
+ }
+
+ Label vfalse(this);
+ Branch(condition, true_label, &vfalse);
+ Bind(&vfalse);
+ false_body();
+}
+
+void CodeAssembler::Branch(TNode<BoolT> condition,
+ std::function<void()> true_body,
+ Label* false_label) {
+ int32_t constant;
+ if (ToInt32Constant(condition, constant)) {
+ return constant ? true_body() : Goto(false_label);
+ }
+
+ Label vtrue(this);
+ Branch(condition, &vtrue, false_label);
+ Bind(&vtrue);
+ true_body();
+}
+
void CodeAssembler::Switch(Node* index, Label* default_label,
const int32_t* case_values, Label** case_labels,
size_t case_count) {
@@ -1685,8 +1737,7 @@ void CodeAssemblerLabel::UpdateVariablesAfterBind() {
} // namespace compiler
-Smi* CheckObjectType(Isolate* isolate, Object* value, Smi* type,
- String* location) {
+Smi* CheckObjectType(Object* value, Smi* type, String* location) {
#ifdef DEBUG
const char* expected;
switch (static_cast<ObjectType>(type->value())) {
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 6419140a74..6b9089da6b 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -278,8 +278,7 @@ HEAP_OBJECT_TEMPLATE_TYPE_LIST(OBJECT_TYPE_TEMPLATE_CASE)
#undef OBJECT_TYPE_STRUCT_CASE
#undef OBJECT_TYPE_TEMPLATE_CASE
-Smi* CheckObjectType(Isolate* isolate, Object* value, Smi* type,
- String* location);
+Smi* CheckObjectType(Object* value, Smi* type, String* location);
namespace compiler {
@@ -440,18 +439,22 @@ class SloppyTNode : public TNode<T> {
V(Float64LessThanOrEqual, BoolT, Float64T, Float64T) \
V(Float64GreaterThan, BoolT, Float64T, Float64T) \
V(Float64GreaterThanOrEqual, BoolT, Float64T, Float64T) \
+ /* Use Word32Equal if you need Int32Equal */ \
V(Int32GreaterThan, BoolT, Word32T, Word32T) \
V(Int32GreaterThanOrEqual, BoolT, Word32T, Word32T) \
V(Int32LessThan, BoolT, Word32T, Word32T) \
V(Int32LessThanOrEqual, BoolT, Word32T, Word32T) \
+ /* Use WordEqual if you need IntPtrEqual */ \
V(IntPtrLessThan, BoolT, WordT, WordT) \
V(IntPtrLessThanOrEqual, BoolT, WordT, WordT) \
V(IntPtrGreaterThan, BoolT, WordT, WordT) \
V(IntPtrGreaterThanOrEqual, BoolT, WordT, WordT) \
+ /* Use Word32Equal if you need Uint32Equal */ \
V(Uint32LessThan, BoolT, Word32T, Word32T) \
V(Uint32LessThanOrEqual, BoolT, Word32T, Word32T) \
V(Uint32GreaterThan, BoolT, Word32T, Word32T) \
V(Uint32GreaterThanOrEqual, BoolT, Word32T, Word32T) \
+ /* Use WordEqual if you need UintPtrEqual */ \
V(UintPtrLessThan, BoolT, WordT, WordT) \
V(UintPtrLessThanOrEqual, BoolT, WordT, WordT) \
V(UintPtrGreaterThan, BoolT, WordT, WordT) \
@@ -535,12 +538,12 @@ TNode<Float64T> Float64Add(TNode<Float64T> a, TNode<Float64T> b);
V(Float64RoundTiesEven, Float64T, Float64T) \
V(Float64RoundTruncate, Float64T, Float64T) \
V(Word32Clz, Int32T, Word32T) \
- V(Word32Not, Word32T, Word32T) \
+ V(Word32BitwiseNot, Word32T, Word32T) \
V(WordNot, WordT, WordT) \
V(Int32AbsWithOverflow, PAIR_TYPE(Int32T, BoolT), Int32T) \
V(Int64AbsWithOverflow, PAIR_TYPE(Int64T, BoolT), Int64T) \
V(IntPtrAbsWithOverflow, PAIR_TYPE(IntPtrT, BoolT), IntPtrT) \
- V(Word32BinaryNot, Word32T, Word32T)
+ V(Word32BinaryNot, BoolT, Word32T)
// A "public" interface used by components outside of compiler directory to
// create code objects with TurboFan's backend. This class is mostly a thin
@@ -623,12 +626,10 @@ class V8_EXPORT_PRIVATE CodeAssembler {
}
Node* function = code_assembler_->ExternalConstant(
ExternalReference::check_object_type());
- Node* const isolate_ptr = code_assembler_->ExternalConstant(
- ExternalReference::isolate_address(code_assembler_->isolate()));
- code_assembler_->CallCFunction4(
- MachineType::AnyTagged(), MachineType::Pointer(),
- MachineType::AnyTagged(), MachineType::TaggedSigned(),
- MachineType::AnyTagged(), function, isolate_ptr, node_,
+ code_assembler_->CallCFunction3(
+ MachineType::AnyTagged(), MachineType::AnyTagged(),
+ MachineType::TaggedSigned(), MachineType::AnyTagged(), function,
+ node_,
code_assembler_->SmiConstant(
static_cast<int>(ObjectTypeOf<A>::value)),
code_assembler_->StringConstant(location_));
@@ -670,12 +671,12 @@ class V8_EXPORT_PRIVATE CodeAssembler {
return TNode<T>::UncheckedCast(value);
}
- CheckedNode<Object, false> Cast(Node* value, const char* location) {
+ CheckedNode<Object, false> Cast(Node* value, const char* location = "") {
return {value, this, location};
}
template <class T>
- CheckedNode<T, true> Cast(TNode<T> value, const char* location) {
+ CheckedNode<T, true> Cast(TNode<T> value, const char* location = "") {
return {value, this, location};
}
@@ -685,7 +686,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
#define CAST(x) \
Cast(x, "CAST(" #x ") at " __FILE__ ":" TO_STRING_LITERAL(__LINE__))
#else
-#define CAST(x) Cast(x, "")
+#define CAST(x) Cast(x)
#endif
#ifdef DEBUG
@@ -772,6 +773,13 @@ class V8_EXPORT_PRIVATE CodeAssembler {
void Branch(SloppyTNode<IntegralT> condition, Label* true_label,
Label* false_label);
+ void Branch(TNode<BoolT> condition, std::function<void()> true_body,
+ std::function<void()> false_body);
+ void Branch(TNode<BoolT> condition, Label* true_label,
+ std::function<void()> false_body);
+ void Branch(TNode<BoolT> condition, std::function<void()> true_body,
+ Label* false_label);
+
void Switch(Node* index, Label* default_label, const int32_t* case_values,
Label** case_labels, size_t case_count);
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/code-generator.cc
index 6a7d0985f4..83060f9e38 100644
--- a/deps/v8/src/compiler/code-generator.cc
+++ b/deps/v8/src/compiler/code-generator.cc
@@ -42,9 +42,8 @@ CodeGenerator::CodeGenerator(
Zone* codegen_zone, Frame* frame, Linkage* linkage,
InstructionSequence* code, OptimizedCompilationInfo* info, Isolate* isolate,
base::Optional<OsrHelper> osr_helper, int start_source_position,
- JumpOptimizationInfo* jump_opt, WasmCompilationData* wasm_compilation_data,
- PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options,
- int32_t builtin_index)
+ JumpOptimizationInfo* jump_opt, PoisoningMitigationLevel poisoning_level,
+ const AssemblerOptions& options, int32_t builtin_index)
: zone_(codegen_zone),
isolate_(isolate),
frame_access_state_(nullptr),
@@ -75,7 +74,7 @@ CodeGenerator::CodeGenerator(
optimized_out_literal_id_(-1),
source_position_table_builder_(
SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS),
- wasm_compilation_data_(wasm_compilation_data),
+ protected_instructions_(zone()),
result_(kSuccess),
poisoning_level_(poisoning_level),
block_starts_(zone()),
@@ -86,24 +85,25 @@ CodeGenerator::CodeGenerator(
CreateFrameAccessState(frame);
CHECK_EQ(info->is_osr(), osr_helper_.has_value());
tasm_.set_jump_optimization_info(jump_opt);
- Code::Kind code_kind = info_->code_kind();
+ Code::Kind code_kind = info->code_kind();
if (code_kind == Code::WASM_FUNCTION ||
code_kind == Code::WASM_TO_JS_FUNCTION ||
- code_kind == Code::WASM_INTERPRETER_ENTRY) {
- tasm_.set_trap_on_abort(true);
+ code_kind == Code::WASM_INTERPRETER_ENTRY ||
+ (Builtins::IsBuiltinId(builtin_index) &&
+ Builtins::IsWasmRuntimeStub(builtin_index))) {
+ tasm_.set_abort_hard(true);
}
tasm_.set_builtin_index(builtin_index);
}
bool CodeGenerator::wasm_runtime_exception_support() const {
- DCHECK(wasm_compilation_data_);
- return wasm_compilation_data_->runtime_exception_support();
+ DCHECK_NOT_NULL(info_);
+ return info_->wasm_runtime_exception_support();
}
void CodeGenerator::AddProtectedInstructionLanding(uint32_t instr_offset,
uint32_t landing_offset) {
- DCHECK_NOT_NULL(wasm_compilation_data_);
- wasm_compilation_data_->AddProtectedInstruction(instr_offset, landing_offset);
+ protected_instructions_.push_back({instr_offset, landing_offset});
}
void CodeGenerator::CreateFrameAccessState(Frame* frame) {
@@ -372,6 +372,12 @@ OwnedVector<byte> CodeGenerator::GetSourcePositionTable() {
return source_position_table_builder_.ToSourcePositionTableVector();
}
+OwnedVector<trap_handler::ProtectedInstructionData>
+CodeGenerator::GetProtectedInstructions() {
+ return OwnedVector<trap_handler::ProtectedInstructionData>::Of(
+ protected_instructions_);
+}
+
MaybeHandle<Code> CodeGenerator::FinalizeCode() {
if (result_ != kSuccess) {
tasm()->AbortedCodeGeneration();
@@ -439,10 +445,10 @@ void CodeGenerator::RecordSafepoint(ReferenceMap* references,
// we also don't need to worry about them, since the GC has special
// knowledge about those fields anyway.
if (index < stackSlotToSpillSlotDelta) continue;
- safepoint.DefinePointerSlot(index, zone());
+ safepoint.DefinePointerSlot(index);
} else if (operand.IsRegister() && (kind & Safepoint::kWithRegisters)) {
Register reg = LocationOperand::cast(operand).GetRegister();
- safepoint.DefinePointerRegister(reg, zone());
+ safepoint.DefinePointerRegister(reg);
}
}
}
diff --git a/deps/v8/src/compiler/code-generator.h b/deps/v8/src/compiler/code-generator.h
index dcdb6bb806..5d4941f825 100644
--- a/deps/v8/src/compiler/code-generator.h
+++ b/deps/v8/src/compiler/code-generator.h
@@ -14,6 +14,7 @@
#include "src/macro-assembler.h"
#include "src/safepoint-table.h"
#include "src/source-position-table.h"
+#include "src/trap-handler/trap-handler.h"
namespace v8 {
namespace internal {
@@ -27,7 +28,6 @@ class DeoptimizationExit;
class FrameAccessState;
class Linkage;
class OutOfLineCode;
-class WasmCompilationData;
struct BranchInfo {
FlagsCondition condition;
@@ -83,7 +83,6 @@ class CodeGenerator final : public GapResolver::Assembler {
base::Optional<OsrHelper> osr_helper,
int start_source_position,
JumpOptimizationInfo* jump_opt,
- WasmCompilationData* wasm_compilation_data,
PoisoningMitigationLevel poisoning_level,
const AssemblerOptions& options,
int32_t builtin_index);
@@ -95,6 +94,8 @@ class CodeGenerator final : public GapResolver::Assembler {
MaybeHandle<Code> FinalizeCode();
OwnedVector<byte> GetSourcePositionTable();
+ OwnedVector<trap_handler::ProtectedInstructionData>
+ GetProtectedInstructions();
InstructionSequence* code() const { return code_; }
FrameAccessState* frame_access_state() const { return frame_access_state_; }
@@ -427,7 +428,7 @@ class CodeGenerator final : public GapResolver::Assembler {
int osr_pc_offset_;
int optimized_out_literal_id_;
SourcePositionTableBuilder source_position_table_builder_;
- WasmCompilationData* wasm_compilation_data_;
+ ZoneVector<trap_handler::ProtectedInstructionData> protected_instructions_;
CodeGenResult result_;
PoisoningMitigationLevel poisoning_level_;
ZoneVector<int> block_starts_;
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
index 6e50d700b7..16a9096079 100644
--- a/deps/v8/src/compiler/common-operator-reducer.cc
+++ b/deps/v8/src/compiler/common-operator-reducer.cc
@@ -19,7 +19,7 @@ namespace compiler {
namespace {
-Decision DecideCondition(const JSHeapBroker* broker, Node* const cond) {
+Decision DecideCondition(JSHeapBroker* broker, Node* const cond) {
switch (cond->opcode()) {
case IrOpcode::kInt32Constant: {
Int32Matcher mcond(cond);
@@ -38,7 +38,7 @@ Decision DecideCondition(const JSHeapBroker* broker, Node* const cond) {
} // namespace
CommonOperatorReducer::CommonOperatorReducer(Editor* editor, Graph* graph,
- const JSHeapBroker* js_heap_broker,
+ JSHeapBroker* js_heap_broker,
CommonOperatorBuilder* common,
MachineOperatorBuilder* machine,
Zone* temp_zone)
diff --git a/deps/v8/src/compiler/common-operator-reducer.h b/deps/v8/src/compiler/common-operator-reducer.h
index 77a1d71084..f1b29eaf76 100644
--- a/deps/v8/src/compiler/common-operator-reducer.h
+++ b/deps/v8/src/compiler/common-operator-reducer.h
@@ -25,7 +25,7 @@ class V8_EXPORT_PRIVATE CommonOperatorReducer final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
CommonOperatorReducer(Editor* editor, Graph* graph,
- const JSHeapBroker* js_heap_broker,
+ JSHeapBroker* js_heap_broker,
CommonOperatorBuilder* common,
MachineOperatorBuilder* machine, Zone* temp_zone);
~CommonOperatorReducer() final {}
@@ -48,13 +48,13 @@ class V8_EXPORT_PRIVATE CommonOperatorReducer final
Reduction Change(Node* node, Operator const* op, Node* a, Node* b);
Graph* graph() const { return graph_; }
- const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
CommonOperatorBuilder* common() const { return common_; }
MachineOperatorBuilder* machine() const { return machine_; }
Node* dead() const { return dead_; }
Graph* const graph_;
- const JSHeapBroker* const js_heap_broker_;
+ JSHeapBroker* const js_heap_broker_;
CommonOperatorBuilder* const common_;
MachineOperatorBuilder* const machine_;
Node* const dead_;
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 23f1cdfc1d..9bdaedea20 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -5,12 +5,12 @@
#ifndef V8_COMPILER_COMMON_OPERATOR_H_
#define V8_COMPILER_COMMON_OPERATOR_H_
-#include "src/assembler.h"
#include "src/base/compiler-specific.h"
#include "src/compiler/frame-states.h"
#include "src/deoptimize-reason.h"
#include "src/globals.h"
#include "src/machine-type.h"
+#include "src/reloc-info.h"
#include "src/vector-slot-pair.h"
#include "src/zone/zone-containers.h"
#include "src/zone/zone-handle-set.h"
diff --git a/deps/v8/src/compiler/compilation-dependencies.cc b/deps/v8/src/compiler/compilation-dependencies.cc
index a672d0a1f0..b67adbd7ca 100644
--- a/deps/v8/src/compiler/compilation-dependencies.cc
+++ b/deps/v8/src/compiler/compilation-dependencies.cc
@@ -16,22 +16,18 @@ CompilationDependencies::CompilationDependencies(Isolate* isolate, Zone* zone)
class CompilationDependencies::Dependency : public ZoneObject {
public:
- virtual bool IsSane() const = 0;
virtual bool IsValid() const = 0;
- virtual void Install(Isolate* isolate, Handle<WeakCell> code) = 0;
+ virtual void Install(MaybeObjectHandle code) = 0;
};
class InitialMapDependency final : public CompilationDependencies::Dependency {
public:
+ // TODO(neis): Once the concurrent compiler frontend is always-on, we no
+ // longer need to explicitly store the initial map.
InitialMapDependency(const JSFunctionRef& function, const MapRef& initial_map)
: function_(function), initial_map_(initial_map) {
- DCHECK(IsSane());
- }
-
- bool IsSane() const override {
- DisallowHeapAccess no_heap_access;
- CHECK(function_.has_initial_map());
- return function_.initial_map().equals(initial_map_);
+ DCHECK(function_.has_initial_map());
+ DCHECK(function_.initial_map().equals(initial_map_));
}
bool IsValid() const override {
@@ -40,9 +36,10 @@ class InitialMapDependency final : public CompilationDependencies::Dependency {
function->initial_map() == *initial_map_.object<Map>();
}
- void Install(Isolate* isolate, Handle<WeakCell> code) override {
- DCHECK(IsValid());
- DependentCode::InstallDependency(isolate, code, initial_map_.object<Map>(),
+ void Install(MaybeObjectHandle code) override {
+ SLOW_DCHECK(IsValid());
+ DependentCode::InstallDependency(function_.isolate(), code,
+ initial_map_.object<Map>(),
DependentCode::kInitialMapChangedGroup);
}
@@ -51,22 +48,51 @@ class InitialMapDependency final : public CompilationDependencies::Dependency {
MapRef initial_map_;
};
-class StableMapDependency final : public CompilationDependencies::Dependency {
+class PrototypePropertyDependency final
+ : public CompilationDependencies::Dependency {
public:
- explicit StableMapDependency(const MapRef& map) : map_(map) {
- DCHECK(IsSane());
+ // TODO(neis): Once the concurrent compiler frontend is always-on, we no
+ // longer need to explicitly store the prototype.
+ PrototypePropertyDependency(const JSFunctionRef& function,
+ const ObjectRef& prototype)
+ : function_(function), prototype_(prototype) {
+ DCHECK(function_.has_prototype());
+ DCHECK(!function_.PrototypeRequiresRuntimeLookup());
+ DCHECK(function_.prototype().equals(prototype_));
+ }
+
+ bool IsValid() const override {
+ Handle<JSFunction> function = function_.object<JSFunction>();
+ return function->has_prototype_slot() && function->has_prototype() &&
+ !function->PrototypeRequiresRuntimeLookup() &&
+ function->prototype() == *prototype_.object();
+ }
+
+ void Install(MaybeObjectHandle code) override {
+ SLOW_DCHECK(IsValid());
+ Handle<JSFunction> function = function_.object<JSFunction>();
+ if (!function->has_initial_map()) JSFunction::EnsureHasInitialMap(function);
+ Handle<Map> initial_map(function->initial_map(), function_.isolate());
+ DependentCode::InstallDependency(function_.isolate(), code, initial_map,
+ DependentCode::kInitialMapChangedGroup);
}
- bool IsSane() const override {
- DisallowHeapAccess no_heap_access;
- return map_.is_stable();
+ private:
+ JSFunctionRef function_;
+ ObjectRef prototype_;
+};
+
+class StableMapDependency final : public CompilationDependencies::Dependency {
+ public:
+ explicit StableMapDependency(const MapRef& map) : map_(map) {
+ DCHECK(map_.is_stable());
}
bool IsValid() const override { return map_.object<Map>()->is_stable(); }
- void Install(Isolate* isolate, Handle<WeakCell> code) override {
- DCHECK(IsValid());
- DependentCode::InstallDependency(isolate, code, map_.object<Map>(),
+ void Install(MaybeObjectHandle code) override {
+ SLOW_DCHECK(IsValid());
+ DependentCode::InstallDependency(map_.isolate(), code, map_.object<Map>(),
DependentCode::kPrototypeCheckGroup);
}
@@ -77,19 +103,14 @@ class StableMapDependency final : public CompilationDependencies::Dependency {
class TransitionDependency final : public CompilationDependencies::Dependency {
public:
explicit TransitionDependency(const MapRef& map) : map_(map) {
- DCHECK(IsSane());
- }
-
- bool IsSane() const override {
- DisallowHeapAccess no_heap_access;
- return !map_.is_deprecated();
+ DCHECK(!map_.is_deprecated());
}
bool IsValid() const override { return !map_.object<Map>()->is_deprecated(); }
- void Install(Isolate* isolate, Handle<WeakCell> code) override {
- DCHECK(IsValid());
- DependentCode::InstallDependency(isolate, code, map_.object<Map>(),
+ void Install(MaybeObjectHandle code) override {
+ SLOW_DCHECK(IsValid());
+ DependentCode::InstallDependency(map_.isolate(), code, map_.object<Map>(),
DependentCode::kTransitionGroup);
}
@@ -100,24 +121,21 @@ class TransitionDependency final : public CompilationDependencies::Dependency {
class PretenureModeDependency final
: public CompilationDependencies::Dependency {
public:
+ // TODO(neis): Once the concurrent compiler frontend is always-on, we no
+ // longer need to explicitly store the mode.
PretenureModeDependency(const AllocationSiteRef& site, PretenureFlag mode)
: site_(site), mode_(mode) {
- DCHECK(IsSane());
- }
-
- bool IsSane() const override {
- DisallowHeapAccess no_heap_access;
- return mode_ == site_.GetPretenureMode();
+ DCHECK_EQ(mode_, site_.GetPretenureMode());
}
bool IsValid() const override {
return mode_ == site_.object<AllocationSite>()->GetPretenureMode();
}
- void Install(Isolate* isolate, Handle<WeakCell> code) override {
- DCHECK(IsValid());
+ void Install(MaybeObjectHandle code) override {
+ SLOW_DCHECK(IsValid());
DependentCode::InstallDependency(
- isolate, code, site_.object<AllocationSite>(),
+ site_.isolate(), code, site_.object<AllocationSite>(),
DependentCode::kAllocationSiteTenuringChangedGroup);
}
@@ -128,16 +146,13 @@ class PretenureModeDependency final
class FieldTypeDependency final : public CompilationDependencies::Dependency {
public:
+ // TODO(neis): Once the concurrent compiler frontend is always-on, we no
+ // longer need to explicitly store the type.
FieldTypeDependency(const MapRef& owner, int descriptor,
- const FieldTypeRef& type)
+ const ObjectRef& type)
: owner_(owner), descriptor_(descriptor), type_(type) {
- DCHECK(IsSane());
- }
-
- bool IsSane() const override {
- DisallowHeapAccess no_heap_access;
- CHECK(owner_.equals(owner_.FindFieldOwner(descriptor_)));
- return type_.equals(owner_.GetFieldType(descriptor_));
+ DCHECK(owner_.equals(owner_.FindFieldOwner(descriptor_)));
+ DCHECK(type_.equals(owner_.GetFieldType(descriptor_)));
}
bool IsValid() const override {
@@ -147,31 +162,29 @@ class FieldTypeDependency final : public CompilationDependencies::Dependency {
return *type == owner->instance_descriptors()->GetFieldType(descriptor_);
}
- void Install(Isolate* isolate, Handle<WeakCell> code) override {
- DCHECK(IsValid());
- DependentCode::InstallDependency(isolate, code, owner_.object<Map>(),
+ void Install(MaybeObjectHandle code) override {
+ SLOW_DCHECK(IsValid());
+ DependentCode::InstallDependency(owner_.isolate(), code,
+ owner_.object<Map>(),
DependentCode::kFieldOwnerGroup);
}
private:
MapRef owner_;
int descriptor_;
- FieldTypeRef type_;
+ ObjectRef type_;
};
class GlobalPropertyDependency final
: public CompilationDependencies::Dependency {
public:
+ // TODO(neis): Once the concurrent compiler frontend is always-on, we no
+ // longer need to explicitly store the type and the read_only flag.
GlobalPropertyDependency(const PropertyCellRef& cell, PropertyCellType type,
bool read_only)
: cell_(cell), type_(type), read_only_(read_only) {
- DCHECK(IsSane());
- }
-
- bool IsSane() const override {
- DisallowHeapAccess no_heap_access;
- return type_ == cell_.property_details().cell_type() &&
- read_only_ == cell_.property_details().IsReadOnly();
+ DCHECK_EQ(type_, cell_.property_details().cell_type());
+ DCHECK_EQ(read_only_, cell_.property_details().IsReadOnly());
}
bool IsValid() const override {
@@ -180,9 +193,9 @@ class GlobalPropertyDependency final
read_only_ == cell->property_details().IsReadOnly();
}
- void Install(Isolate* isolate, Handle<WeakCell> code) override {
- DCHECK(IsValid());
- DependentCode::InstallDependency(isolate, code,
+ void Install(MaybeObjectHandle code) override {
+ SLOW_DCHECK(IsValid());
+ DependentCode::InstallDependency(cell_.isolate(), code,
cell_.object<PropertyCell>(),
DependentCode::kPropertyCellChangedGroup);
}
@@ -196,13 +209,7 @@ class GlobalPropertyDependency final
class ProtectorDependency final : public CompilationDependencies::Dependency {
public:
explicit ProtectorDependency(const PropertyCellRef& cell) : cell_(cell) {
- DCHECK(IsSane());
- }
-
- bool IsSane() const override {
- DisallowHeapAccess no_heap_access;
- return cell_.value().IsSmi() &&
- cell_.value().AsSmi() == Isolate::kProtectorValid;
+ DCHECK_EQ(cell_.value().AsSmi(), Isolate::kProtectorValid);
}
bool IsValid() const override {
@@ -210,9 +217,9 @@ class ProtectorDependency final : public CompilationDependencies::Dependency {
return cell->value() == Smi::FromInt(Isolate::kProtectorValid);
}
- void Install(Isolate* isolate, Handle<WeakCell> code) override {
- DCHECK(IsValid());
- DependentCode::InstallDependency(isolate, code,
+ void Install(MaybeObjectHandle code) override {
+ SLOW_DCHECK(IsValid());
+ DependentCode::InstallDependency(cell_.isolate(), code,
cell_.object<PropertyCell>(),
DependentCode::kPropertyCellChangedGroup);
}
@@ -224,18 +231,14 @@ class ProtectorDependency final : public CompilationDependencies::Dependency {
class ElementsKindDependency final
: public CompilationDependencies::Dependency {
public:
+ // TODO(neis): Once the concurrent compiler frontend is always-on, we no
+ // longer need to explicitly store the elements kind.
ElementsKindDependency(const AllocationSiteRef& site, ElementsKind kind)
: site_(site), kind_(kind) {
- DCHECK(IsSane());
- }
-
- bool IsSane() const override {
- DisallowHeapAccess no_heap_access;
DCHECK(AllocationSite::ShouldTrack(kind_));
- ElementsKind kind = site_.PointsToLiteral()
- ? site_.boilerplate().GetElementsKind()
- : site_.GetElementsKind();
- return kind_ == kind;
+ DCHECK_EQ(kind_, site_.PointsToLiteral()
+ ? site_.boilerplate().value().GetElementsKind()
+ : site_.GetElementsKind());
}
bool IsValid() const override {
@@ -246,10 +249,10 @@ class ElementsKindDependency final
return kind_ == kind;
}
- void Install(Isolate* isolate, Handle<WeakCell> code) override {
- DCHECK(IsValid());
+ void Install(MaybeObjectHandle code) override {
+ SLOW_DCHECK(IsValid());
DependentCode::InstallDependency(
- isolate, code, site_.object<AllocationSite>(),
+ site_.isolate(), code, site_.object<AllocationSite>(),
DependentCode::kAllocationSiteTransitionChangedGroup);
}
@@ -258,6 +261,33 @@ class ElementsKindDependency final
ElementsKind kind_;
};
+class InitialMapInstanceSizePredictionDependency final
+ : public CompilationDependencies::Dependency {
+ public:
+ InitialMapInstanceSizePredictionDependency(const JSFunctionRef& function,
+ int instance_size)
+ : function_(function), instance_size_(instance_size) {}
+
+ bool IsValid() const override {
+ // The dependency is valid if the prediction is the same as the current
+ // slack tracking result.
+ int instance_size =
+ function_.object<JSFunction>()->ComputeInstanceSizeWithMinSlack(
+ function_.isolate());
+ return instance_size == instance_size_;
+ }
+
+ void Install(MaybeObjectHandle code) override {
+ DCHECK(IsValid());
+ // Finish the slack tracking.
+ function_.object<JSFunction>()->CompleteInobjectSlackTrackingIfActive();
+ }
+
+ private:
+ JSFunctionRef function_;
+ int instance_size_;
+};
+
MapRef CompilationDependencies::DependOnInitialMap(
const JSFunctionRef& function) {
MapRef map = function.initial_map();
@@ -265,6 +295,14 @@ MapRef CompilationDependencies::DependOnInitialMap(
return map;
}
+ObjectRef CompilationDependencies::DependOnPrototypeProperty(
+ const JSFunctionRef& function) {
+ ObjectRef prototype = function.prototype();
+ dependencies_.push_front(
+ new (zone_) PrototypePropertyDependency(function, prototype));
+ return prototype;
+}
+
void CompilationDependencies::DependOnStableMap(const MapRef& map) {
if (map.CanTransition()) {
dependencies_.push_front(new (zone_) StableMapDependency(map));
@@ -291,7 +329,7 @@ PretenureFlag CompilationDependencies::DependOnPretenureMode(
void CompilationDependencies::DependOnFieldType(const MapRef& map,
int descriptor) {
MapRef owner = map.FindFieldOwner(descriptor);
- FieldTypeRef type = owner.GetFieldType(descriptor);
+ ObjectRef type = owner.GetFieldType(descriptor);
DCHECK(type.equals(map.GetFieldType(descriptor)));
dependencies_.push_front(new (zone_)
FieldTypeDependency(owner, descriptor, type));
@@ -313,7 +351,7 @@ void CompilationDependencies::DependOnElementsKind(
const AllocationSiteRef& site) {
// Do nothing if the object doesn't have any useful element transitions left.
ElementsKind kind = site.PointsToLiteral()
- ? site.boilerplate().GetElementsKind()
+ ? site.boilerplate().value().GetElementsKind()
: site.GetElementsKind();
if (AllocationSite::ShouldTrack(kind)) {
dependencies_.push_front(new (zone_) ElementsKindDependency(site, kind));
@@ -328,25 +366,28 @@ bool CompilationDependencies::AreValid() const {
}
bool CompilationDependencies::Commit(Handle<Code> code) {
- Isolate* isolate = code->GetIsolate();
-
- // Check validity of all dependencies first, such that we can abort before
- // installing anything.
+ // Check validity of all dependencies first, such that we can avoid installing
+ // anything when there's already an invalid dependency.
if (!AreValid()) {
dependencies_.clear();
return false;
}
- Handle<WeakCell> cell = Code::WeakCellFor(code);
for (auto dep : dependencies_) {
- dep->Install(isolate, cell);
+ // Check each dependency's validity again right before installing it,
+ // because a GC can trigger invalidation for some dependency kinds.
+ if (!dep->IsValid()) {
+ dependencies_.clear();
+ return false;
+ }
+ dep->Install(MaybeObjectHandle::Weak(code));
}
dependencies_.clear();
return true;
}
namespace {
-void DependOnStablePrototypeChain(const JSHeapBroker* broker,
+void DependOnStablePrototypeChain(JSHeapBroker* broker,
CompilationDependencies* deps,
Handle<Map> map,
MaybeHandle<JSReceiver> last_prototype) {
@@ -364,7 +405,7 @@ void DependOnStablePrototypeChain(const JSHeapBroker* broker,
} // namespace
void CompilationDependencies::DependOnStablePrototypeChains(
- const JSHeapBroker* broker, Handle<Context> native_context,
+ JSHeapBroker* broker, Handle<Context> native_context,
std::vector<Handle<Map>> const& receiver_maps, Handle<JSObject> holder) {
Isolate* isolate = holder->GetIsolate();
// Determine actual holder and perform prototype chain checks.
@@ -391,6 +432,28 @@ void CompilationDependencies::DependOnElementsKinds(
CHECK_EQ(current.nested_site().AsSmi(), 0);
}
+SlackTrackingPrediction::SlackTrackingPrediction(MapRef initial_map,
+ int instance_size)
+ : instance_size_(instance_size),
+ inobject_property_count_(
+ (instance_size >> kPointerSizeLog2) -
+ initial_map.GetInObjectPropertiesStartInWords()) {}
+
+SlackTrackingPrediction
+CompilationDependencies::DependOnInitialMapInstanceSizePrediction(
+ const JSFunctionRef& function) {
+ MapRef initial_map = DependOnInitialMap(function);
+ int instance_size = function.InitialMapInstanceSizeWithMinSlack();
+ // Currently, we always install the prediction dependency. If this turns out
+ // to be too expensive, we can only install the dependency if slack
+ // tracking is active.
+ dependencies_.push_front(
+ new (zone_)
+ InitialMapInstanceSizePredictionDependency(function, instance_size));
+ DCHECK_LE(instance_size, function.initial_map().instance_size());
+ return SlackTrackingPrediction(initial_map, instance_size);
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/compilation-dependencies.h b/deps/v8/src/compiler/compilation-dependencies.h
index 9770775c2e..5d4cd221df 100644
--- a/deps/v8/src/compiler/compilation-dependencies.h
+++ b/deps/v8/src/compiler/compilation-dependencies.h
@@ -13,6 +13,18 @@ namespace v8 {
namespace internal {
namespace compiler {
+class SlackTrackingPrediction {
+ public:
+ SlackTrackingPrediction(MapRef initial_map, int instance_size);
+
+ int inobject_property_count() const { return inobject_property_count_; }
+ int instance_size() const { return instance_size_; }
+
+ private:
+ int instance_size_;
+ int inobject_property_count_;
+};
+
// Collects and installs dependencies of the code that is being generated.
class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
public:
@@ -21,9 +33,13 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
V8_WARN_UNUSED_RESULT bool Commit(Handle<Code> code);
// Return the initial map of {function} and record the assumption that it
- // stays the intial map.
+ // stays the initial map.
MapRef DependOnInitialMap(const JSFunctionRef& function);
+ // Return the "prototype" property of the given function and record the
+ // assumption that it doesn't change.
+ ObjectRef DependOnPrototypeProperty(const JSFunctionRef& function);
+
// Record the assumption that {map} stays stable.
void DependOnStableMap(const MapRef& map);
@@ -53,12 +69,20 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
// {receiver_type} up to (and including) the {holder}.
// TODO(neis): Fully brokerize!
void DependOnStablePrototypeChains(
- const JSHeapBroker* broker, Handle<Context> native_context,
+ JSHeapBroker* broker, Handle<Context> native_context,
std::vector<Handle<Map>> const& receiver_maps, Handle<JSObject> holder);
// Like DependOnElementsKind but also applies to all nested allocation sites.
void DependOnElementsKinds(const AllocationSiteRef& site);
+ // Predict the final instance size for {function}'s initial map and record
+ // the assumption that this prediction is correct. In addition, register
+ // the initial map dependency. This method returns the {function}'s the
+ // predicted minimum slack instance size count (wrapped together with
+ // the corresponding in-object property count for convenience).
+ SlackTrackingPrediction DependOnInitialMapInstanceSizePrediction(
+ const JSFunctionRef& function);
+
// Exposed only for testing purposes.
bool AreValid() const;
diff --git a/deps/v8/src/compiler/constant-folding-reducer.cc b/deps/v8/src/compiler/constant-folding-reducer.cc
index 1811c06f98..a447b2a07c 100644
--- a/deps/v8/src/compiler/constant-folding-reducer.cc
+++ b/deps/v8/src/compiler/constant-folding-reducer.cc
@@ -11,8 +11,8 @@ namespace v8 {
namespace internal {
namespace compiler {
-ConstantFoldingReducer::ConstantFoldingReducer(
- Editor* editor, JSGraph* jsgraph, const JSHeapBroker* js_heap_broker)
+ConstantFoldingReducer::ConstantFoldingReducer(Editor* editor, JSGraph* jsgraph,
+ JSHeapBroker* js_heap_broker)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
js_heap_broker_(js_heap_broker) {}
diff --git a/deps/v8/src/compiler/constant-folding-reducer.h b/deps/v8/src/compiler/constant-folding-reducer.h
index b111e5b878..3fbe5c4c2e 100644
--- a/deps/v8/src/compiler/constant-folding-reducer.h
+++ b/deps/v8/src/compiler/constant-folding-reducer.h
@@ -18,7 +18,7 @@ class V8_EXPORT_PRIVATE ConstantFoldingReducer final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
ConstantFoldingReducer(Editor* editor, JSGraph* jsgraph,
- const JSHeapBroker* js_heap_broker);
+ JSHeapBroker* js_heap_broker);
~ConstantFoldingReducer() final;
const char* reducer_name() const override { return "ConstantFoldingReducer"; }
@@ -27,10 +27,10 @@ class V8_EXPORT_PRIVATE ConstantFoldingReducer final
private:
JSGraph* jsgraph() const { return jsgraph_; }
- const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
JSGraph* const jsgraph_;
- const JSHeapBroker* const js_heap_broker_;
+ JSHeapBroker* const js_heap_broker_;
DISALLOW_COPY_AND_ASSIGN(ConstantFoldingReducer);
};
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index 9a3a293055..9b12c022c4 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -1631,6 +1631,32 @@ Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node,
return value;
}
+Node* EffectControlLinearizer::BuildUint32Mod(Node* lhs, Node* rhs) {
+ auto if_rhs_power_of_two = __ MakeLabel();
+ auto done = __ MakeLabel(MachineRepresentation::kWord32);
+
+ // Compute the mask for the {rhs}.
+ Node* one = __ Int32Constant(1);
+ Node* msk = __ Int32Sub(rhs, one);
+
+ // Check if the {rhs} is a power of two.
+ __ GotoIf(__ Word32Equal(__ Word32And(rhs, msk), __ Int32Constant(0)),
+ &if_rhs_power_of_two);
+ {
+ // The {rhs} is not a power of two, do a generic Uint32Mod.
+ __ Goto(&done, __ Uint32Mod(lhs, rhs));
+ }
+
+ __ Bind(&if_rhs_power_of_two);
+ {
+ // The {rhs} is a power of two, just do a fast bit masking.
+ __ Goto(&done, __ Word32And(lhs, msk));
+ }
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
Node* EffectControlLinearizer::LowerCheckedInt32Mod(Node* node,
Node* frame_state) {
// General case for signed integer modulus, with optimization for (unknown)
@@ -1639,12 +1665,19 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mod(Node* node,
// if rhs <= 0 then
// rhs = -rhs
// deopt if rhs == 0
+ // let msk = rhs - 1 in
// if lhs < 0 then
- // let res = lhs % rhs in
- // deopt if res == 0
- // res
+ // let lhs_abs = -lsh in
+ // let res = if rhs & msk == 0 then
+ // lhs_abs & msk
+ // else
+ // lhs_abs % rhs in
+ // if lhs < 0 then
+ // deopt if res == 0
+ // -res
+ // else
+ // res
// else
- // let msk = rhs - 1 in
// if rhs & msk == 0 then
// lhs & msk
// else
@@ -1655,7 +1688,7 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mod(Node* node,
auto if_rhs_not_positive = __ MakeDeferredLabel();
auto if_lhs_negative = __ MakeDeferredLabel();
- auto if_power_of_two = __ MakeLabel();
+ auto if_rhs_power_of_two = __ MakeLabel();
auto rhs_checked = __ MakeLabel(MachineRepresentation::kWord32);
auto done = __ MakeLabel(MachineRepresentation::kWord32);
@@ -1673,45 +1706,29 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mod(Node* node,
Node* vtrue0 = __ Int32Sub(zero, rhs);
// Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
- Node* check = __ Word32Equal(vtrue0, zero);
- __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, VectorSlotPair(), check,
- frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, VectorSlotPair(),
+ __ Word32Equal(vtrue0, zero), frame_state);
__ Goto(&rhs_checked, vtrue0);
}
__ Bind(&rhs_checked);
rhs = rhs_checked.PhiAt(0);
- // Check if {lhs} is negative.
- Node* check1 = __ Int32LessThan(lhs, zero);
- __ GotoIf(check1, &if_lhs_negative);
-
- // {lhs} non-negative.
+ __ GotoIf(__ Int32LessThan(lhs, zero), &if_lhs_negative);
{
- Node* one = __ Int32Constant(1);
- Node* msk = __ Int32Sub(rhs, one);
-
- // Check if {rhs} minus one is a valid mask.
- Node* check2 = __ Word32Equal(__ Word32And(rhs, msk), zero);
- __ GotoIf(check2, &if_power_of_two);
- // Compute the remainder using the generic {lhs % rhs}.
- __ Goto(&done, __ Int32Mod(lhs, rhs));
-
- __ Bind(&if_power_of_two);
- // Compute the remainder using {lhs & msk}.
- __ Goto(&done, __ Word32And(lhs, msk));
+ // The {lhs} is a non-negative integer.
+ __ Goto(&done, BuildUint32Mod(lhs, rhs));
}
__ Bind(&if_lhs_negative);
{
- // Compute the remainder using {lhs % msk}.
- Node* vtrue1 = __ Int32Mod(lhs, rhs);
+ // The {lhs} is a negative integer.
+ Node* res = BuildUint32Mod(__ Int32Sub(zero, lhs), rhs);
// Check if we would have to return -0.
- Node* check = __ Word32Equal(vtrue1, zero);
- __ DeoptimizeIf(DeoptimizeReason::kMinusZero, VectorSlotPair(), check,
- frame_state);
- __ Goto(&done, vtrue1);
+ __ DeoptimizeIf(DeoptimizeReason::kMinusZero, VectorSlotPair(),
+ __ Word32Equal(res, zero), frame_state);
+ __ Goto(&done, __ Int32Sub(zero, res));
}
__ Bind(&done);
@@ -1753,7 +1770,7 @@ Node* EffectControlLinearizer::LowerCheckedUint32Mod(Node* node,
frame_state);
// Perform the actual unsigned integer modulus.
- return __ Uint32Mod(lhs, rhs);
+ return BuildUint32Mod(lhs, rhs);
}
Node* EffectControlLinearizer::LowerCheckedInt32Mul(Node* node,
@@ -3293,16 +3310,17 @@ Node* EffectControlLinearizer::LowerCheckFloat64Hole(Node* node,
Node* frame_state) {
// If we reach this point w/o eliminating the {node} that's marked
// with allow-return-hole, we cannot do anything, so just deoptimize
- // in case of the hole NaN (similar to Crankshaft).
+ // in case of the hole NaN.
+ CheckFloat64HoleParameters const& params =
+ CheckFloat64HoleParametersOf(node->op());
Node* value = node->InputAt(0);
Node* check = __ Word32Equal(__ Float64ExtractHighWord32(value),
__ Int32Constant(kHoleNanUpper32));
- __ DeoptimizeIf(DeoptimizeReason::kHole, VectorSlotPair(), check,
+ __ DeoptimizeIf(DeoptimizeReason::kHole, params.feedback(), check,
frame_state);
return value;
}
-
Node* EffectControlLinearizer::LowerCheckNotTaggedHole(Node* node,
Node* frame_state) {
Node* value = node->InputAt(0);
@@ -3752,350 +3770,153 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
return done.PhiAt(0);
}
-Node* EffectControlLinearizer::LowerLoadDataViewElement(Node* node) {
- ExternalArrayType element_type = ExternalArrayTypeOf(node->op());
- Node* buffer = node->InputAt(0);
- Node* storage = node->InputAt(1);
- Node* index = node->InputAt(2);
- Node* is_little_endian = node->InputAt(3);
-
- // We need to keep the {buffer} alive so that the GC will not release the
- // ArrayBuffer (if there's any) as long as we are still operating on it.
- __ Retain(buffer);
-
- ElementAccess access_int8 = AccessBuilder::ForTypedArrayElement(
- kExternalInt8Array, true, LoadSensitivity::kCritical);
- ElementAccess access_uint8 = AccessBuilder::ForTypedArrayElement(
- kExternalUint8Array, true, LoadSensitivity::kCritical);
-
- switch (element_type) {
- case kExternalUint8Array:
- return __ LoadElement(access_uint8, storage, index);
-
+Node* EffectControlLinearizer::BuildReverseBytes(ExternalArrayType type,
+ Node* value) {
+ switch (type) {
case kExternalInt8Array:
- return __ LoadElement(access_int8, storage, index);
+ case kExternalUint8Array:
+ case kExternalUint8ClampedArray:
+ return value;
- case kExternalUint16Array: // Fall through.
case kExternalInt16Array: {
- auto big_endian = __ MakeLabel();
- auto done = __ MakeLabel(MachineRepresentation::kWord32);
-
- // If we're doing an Int16 load, sign-extend the most significant byte
- // by loading it as an Int8 instead of Uint8.
- ElementAccess access_msb =
- element_type == kExternalInt16Array ? access_int8 : access_uint8;
-
- __ GotoIfNot(is_little_endian, &big_endian);
- {
- // Little-endian load.
- Node* b0 = __ LoadElement(access_uint8, storage, index);
- Node* b1 = __ LoadElement(access_msb, storage,
- __ Int32Add(index, __ Int32Constant(1)));
-
- // result = (b1 << 8) + b0
- Node* result = __ Int32Add(__ Word32Shl(b1, __ Int32Constant(8)), b0);
- __ Goto(&done, result);
- }
-
- __ Bind(&big_endian);
- {
- // Big-endian load.
- Node* b0 = __ LoadElement(access_msb, storage, index);
- Node* b1 = __ LoadElement(access_uint8, storage,
- __ Int32Add(index, __ Int32Constant(1)));
-
- // result = (b0 << 8) + b1;
- Node* result = __ Int32Add(__ Word32Shl(b0, __ Int32Constant(8)), b1);
- __ Goto(&done, result);
- }
-
- // We're done, return {result}.
- __ Bind(&done);
- return done.PhiAt(0);
+ Node* result = __ Word32ReverseBytes(value);
+ result = __ Word32Sar(result, __ Int32Constant(16));
+ return result;
}
- case kExternalUint32Array: // Fall through.
- case kExternalInt32Array: // Fall through.
- case kExternalFloat32Array: {
- Node* b0 = __ LoadElement(access_uint8, storage, index);
- Node* b1 = __ LoadElement(access_uint8, storage,
- __ Int32Add(index, __ Int32Constant(1)));
- Node* b2 = __ LoadElement(access_uint8, storage,
- __ Int32Add(index, __ Int32Constant(2)));
- Node* b3 = __ LoadElement(access_uint8, storage,
- __ Int32Add(index, __ Int32Constant(3)));
-
- auto big_endian = __ MakeLabel();
- auto done = __ MakeLabel(MachineRepresentation::kWord32);
-
- __ GotoIfNot(is_little_endian, &big_endian);
- {
- // Little-endian load.
- // result = (b3 << 24) | (b2 << 16) | (b1 << 8) | b0;
- Node* result =
- __ Word32Or(__ Word32Or(__ Word32Shl(b3, __ Int32Constant(24)),
- __ Word32Shl(b2, __ Int32Constant(16))),
- __ Word32Or(__ Word32Shl(b1, __ Int32Constant(8)), b0));
- __ Goto(&done, result);
- }
+ case kExternalUint16Array: {
+ Node* result = __ Word32ReverseBytes(value);
+ result = __ Word32Shr(result, __ Int32Constant(16));
+ return result;
+ }
- __ Bind(&big_endian);
- {
- // Big-endian load.
- // result = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3;
- Node* result =
- __ Word32Or(__ Word32Or(__ Word32Shl(b0, __ Int32Constant(24)),
- __ Word32Shl(b1, __ Int32Constant(16))),
- __ Word32Or(__ Word32Shl(b2, __ Int32Constant(8)), b3));
- __ Goto(&done, result);
- }
+ case kExternalInt32Array: // Fall through.
+ case kExternalUint32Array:
+ return __ Word32ReverseBytes(value);
- // We're done, return {result}.
- __ Bind(&done);
- if (element_type == kExternalFloat32Array) {
- return __ BitcastInt32ToFloat32(done.PhiAt(0));
- } else {
- return done.PhiAt(0);
- }
+ case kExternalFloat32Array: {
+ Node* result = __ BitcastFloat32ToInt32(value);
+ result = __ Word32ReverseBytes(result);
+ result = __ BitcastInt32ToFloat32(result);
+ return result;
}
case kExternalFloat64Array: {
- Node* b0 = __ LoadElement(access_uint8, storage, index);
- Node* b1 = __ LoadElement(access_uint8, storage,
- __ Int32Add(index, __ Int32Constant(1)));
- Node* b2 = __ LoadElement(access_uint8, storage,
- __ Int32Add(index, __ Int32Constant(2)));
- Node* b3 = __ LoadElement(access_uint8, storage,
- __ Int32Add(index, __ Int32Constant(3)));
- Node* b4 = __ LoadElement(access_uint8, storage,
- __ Int32Add(index, __ Int32Constant(4)));
- Node* b5 = __ LoadElement(access_uint8, storage,
- __ Int32Add(index, __ Int32Constant(5)));
- Node* b6 = __ LoadElement(access_uint8, storage,
- __ Int32Add(index, __ Int32Constant(6)));
- Node* b7 = __ LoadElement(access_uint8, storage,
- __ Int32Add(index, __ Int32Constant(7)));
-
- auto big_endian = __ MakeLabel();
- auto done = __ MakeLabel(MachineRepresentation::kWord32,
- MachineRepresentation::kWord32);
-
- __ GotoIfNot(is_little_endian, &big_endian);
- {
- // Little-endian load.
- // low_word = (b3 << 24) | (b2 << 16) | (b1 << 8) | b0;
- // high_word = (b7 << 24) | (b6 << 16) | (b5 << 8) | b4;
- Node* low_word =
- __ Word32Or(__ Word32Or(__ Word32Shl(b3, __ Int32Constant(24)),
- __ Word32Shl(b2, __ Int32Constant(16))),
- __ Word32Or(__ Word32Shl(b1, __ Int32Constant(8)), b0));
- Node* high_word =
- __ Word32Or(__ Word32Or(__ Word32Shl(b7, __ Int32Constant(24)),
- __ Word32Shl(b6, __ Int32Constant(16))),
- __ Word32Or(__ Word32Shl(b5, __ Int32Constant(8)), b4));
- __ Goto(&done, low_word, high_word);
- }
-
- __ Bind(&big_endian);
- {
- // Big-endian load.
- // high_word = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3;
- // low_word = (b4 << 24) | (b5 << 16) | (b6 << 8) | b7;
- Node* high_word =
- __ Word32Or(__ Word32Or(__ Word32Shl(b0, __ Int32Constant(24)),
- __ Word32Shl(b1, __ Int32Constant(16))),
- __ Word32Or(__ Word32Shl(b2, __ Int32Constant(8)), b3));
- Node* low_word =
- __ Word32Or(__ Word32Or(__ Word32Shl(b4, __ Int32Constant(24)),
- __ Word32Shl(b5, __ Int32Constant(16))),
- __ Word32Or(__ Word32Shl(b6, __ Int32Constant(8)), b7));
- __ Goto(&done, low_word, high_word);
+ if (machine()->Is64()) {
+ Node* result = __ BitcastFloat64ToInt64(value);
+ result = __ Word64ReverseBytes(result);
+ result = __ BitcastInt64ToFloat64(result);
+ return result;
+ } else {
+ Node* lo = __ Word32ReverseBytes(__ Float64ExtractLowWord32(value));
+ Node* hi = __ Word32ReverseBytes(__ Float64ExtractHighWord32(value));
+ Node* result = __ Float64Constant(0.0);
+ result = __ Float64InsertLowWord32(result, hi);
+ result = __ Float64InsertHighWord32(result, lo);
+ return result;
}
-
- // We're done, store the low and high words into a float64.
- __ Bind(&done);
- Node* result = __ Float64Constant(0.0);
- result = __ Float64InsertLowWord32(result, done.PhiAt(0));
- result = __ Float64InsertHighWord32(result, done.PhiAt(1));
- return result;
}
- default:
+ case kExternalBigInt64Array:
+ case kExternalBigUint64Array:
UNREACHABLE();
}
}
-void EffectControlLinearizer::LowerStoreDataViewElement(Node* node) {
+Node* EffectControlLinearizer::LowerLoadDataViewElement(Node* node) {
ExternalArrayType element_type = ExternalArrayTypeOf(node->op());
Node* buffer = node->InputAt(0);
Node* storage = node->InputAt(1);
Node* index = node->InputAt(2);
- Node* value = node->InputAt(3);
- Node* is_little_endian = node->InputAt(4);
+ Node* is_little_endian = node->InputAt(3);
+
+ // On 64-bit platforms, we need to feed a Word64 index to the Load and
+ // Store operators.
+ if (machine()->Is64()) {
+ index = __ ChangeUint32ToUint64(index);
+ }
// We need to keep the {buffer} alive so that the GC will not release the
// ArrayBuffer (if there's any) as long as we are still operating on it.
__ Retain(buffer);
- ElementAccess access =
- AccessBuilder::ForTypedArrayElement(kExternalUint8Array, true);
+ MachineType const machine_type =
+ AccessBuilder::ForTypedArrayElement(element_type, true).machine_type;
- switch (element_type) {
- case kExternalUint8Array: // Fall through.
- case kExternalInt8Array: {
- Node* b0 = __ Word32And(value, __ Int32Constant(0xFF));
- __ StoreElement(access, storage, index, b0);
- break;
- }
- case kExternalUint16Array: // Fall through.
- case kExternalInt16Array: {
- Node* b0 = __ Word32And(value, __ Int32Constant(0xFF));
- Node* b1 = __ Word32And(__ Word32Shr(value, __ Int32Constant(8)),
- __ Int32Constant(0xFF));
+ Node* value = __ LoadUnaligned(machine_type, storage, index);
+ auto big_endian = __ MakeLabel();
+ auto done = __ MakeLabel(machine_type.representation());
- auto big_endian = __ MakeLabel();
- auto done = __ MakeLabel();
-
- __ GotoIfNot(is_little_endian, &big_endian);
- {
- // Little-endian store.
- __ StoreElement(access, storage, index, b0);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(1)), b1);
- __ Goto(&done);
- }
-
- __ Bind(&big_endian);
- {
- // Big-endian store.
- __ StoreElement(access, storage, index, b1);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(1)), b0);
- __ Goto(&done);
- }
-
- __ Bind(&done);
- break;
- }
-
- case kExternalUint32Array: // Fall through.
- case kExternalInt32Array: // Fall through.
- case kExternalFloat32Array: {
- if (element_type == kExternalFloat32Array) {
- value = __ BitcastFloat32ToInt32(value);
- }
+ __ GotoIfNot(is_little_endian, &big_endian);
+ { // Little-endian load.
+#if V8_TARGET_LITTLE_ENDIAN
+ __ Goto(&done, value);
+#else
+ __ Goto(&done, BuildReverseBytes(element_type, value));
+#endif // V8_TARGET_LITTLE_ENDIAN
+ }
- Node* b0 = __ Word32And(value, __ Int32Constant(0xFF));
- Node* b1 = __ Word32And(__ Word32Shr(value, __ Int32Constant(8)),
- __ Int32Constant(0xFF));
- Node* b2 = __ Word32And(__ Word32Shr(value, __ Int32Constant(16)),
- __ Int32Constant(0xFF));
- Node* b3 = __ Word32Shr(value, __ Int32Constant(24));
+ __ Bind(&big_endian);
+ { // Big-endian load.
+#if V8_TARGET_LITTLE_ENDIAN
+ __ Goto(&done, BuildReverseBytes(element_type, value));
+#else
+ __ Goto(&done, value);
+#endif // V8_TARGET_LITTLE_ENDIAN
+ }
- auto big_endian = __ MakeLabel();
- auto done = __ MakeLabel();
+ // We're done, return {result}.
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
- __ GotoIfNot(is_little_endian, &big_endian);
- {
- // Little-endian store.
- __ StoreElement(access, storage, index, b0);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(1)), b1);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(2)), b2);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(3)), b3);
- __ Goto(&done);
- }
+void EffectControlLinearizer::LowerStoreDataViewElement(Node* node) {
+ ExternalArrayType element_type = ExternalArrayTypeOf(node->op());
+ Node* buffer = node->InputAt(0);
+ Node* storage = node->InputAt(1);
+ Node* index = node->InputAt(2);
+ Node* value = node->InputAt(3);
+ Node* is_little_endian = node->InputAt(4);
- __ Bind(&big_endian);
- {
- // Big-endian store.
- __ StoreElement(access, storage, index, b3);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(1)), b2);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(2)), b1);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(3)), b0);
- __ Goto(&done);
- }
+ // On 64-bit platforms, we need to feed a Word64 index to the Load and
+ // Store operators.
+ if (machine()->Is64()) {
+ index = __ ChangeUint32ToUint64(index);
+ }
- __ Bind(&done);
- break;
- }
+ // We need to keep the {buffer} alive so that the GC will not release the
+ // ArrayBuffer (if there's any) as long as we are still operating on it.
+ __ Retain(buffer);
- case kExternalFloat64Array: {
- Node* low_word = __ Float64ExtractLowWord32(value);
- Node* high_word = __ Float64ExtractHighWord32(value);
-
- Node* b0 = __ Word32And(low_word, __ Int32Constant(0xFF));
- Node* b1 = __ Word32And(__ Word32Shr(low_word, __ Int32Constant(8)),
- __ Int32Constant(0xFF));
- Node* b2 = __ Word32And(__ Word32Shr(low_word, __ Int32Constant(16)),
- __ Int32Constant(0xFF));
- Node* b3 = __ Word32Shr(low_word, __ Int32Constant(24));
-
- Node* b4 = __ Word32And(high_word, __ Int32Constant(0xFF));
- Node* b5 = __ Word32And(__ Word32Shr(high_word, __ Int32Constant(8)),
- __ Int32Constant(0xFF));
- Node* b6 = __ Word32And(__ Word32Shr(high_word, __ Int32Constant(16)),
- __ Int32Constant(0xFF));
- Node* b7 = __ Word32Shr(high_word, __ Int32Constant(24));
-
- auto big_endian = __ MakeLabel();
- auto done = __ MakeLabel();
-
- __ GotoIfNot(is_little_endian, &big_endian);
- {
- // Little-endian store.
- __ StoreElement(access, storage, index, b0);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(1)), b1);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(2)), b2);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(3)), b3);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(4)), b4);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(5)), b5);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(6)), b6);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(7)), b7);
- __ Goto(&done);
- }
+ MachineType const machine_type =
+ AccessBuilder::ForTypedArrayElement(element_type, true).machine_type;
- __ Bind(&big_endian);
- {
- // Big-endian store.
- __ StoreElement(access, storage, index, b7);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(1)), b6);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(2)), b5);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(3)), b4);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(4)), b3);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(5)), b2);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(6)), b1);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(7)), b0);
- __ Goto(&done);
- }
+ auto big_endian = __ MakeLabel();
+ auto done = __ MakeLabel(machine_type.representation());
- __ Bind(&done);
- break;
- }
+ __ GotoIfNot(is_little_endian, &big_endian);
+ { // Little-endian store.
+#if V8_TARGET_LITTLE_ENDIAN
+ __ Goto(&done, value);
+#else
+ __ Goto(&done, BuildReverseBytes(element_type, value));
+#endif // V8_TARGET_LITTLE_ENDIAN
+ }
- default:
- UNREACHABLE();
+ __ Bind(&big_endian);
+ { // Big-endian store.
+#if V8_TARGET_LITTLE_ENDIAN
+ __ Goto(&done, BuildReverseBytes(element_type, value));
+#else
+ __ Goto(&done, value);
+#endif // V8_TARGET_LITTLE_ENDIAN
}
+
+ __ Bind(&done);
+ __ StoreUnaligned(machine_type.representation(), storage, index,
+ done.PhiAt(0));
}
+
Node* EffectControlLinearizer::LowerLoadTypedElement(Node* node) {
ExternalArrayType array_type = ExternalArrayTypeOf(node->op());
Node* buffer = node->InputAt(0);
diff --git a/deps/v8/src/compiler/effect-control-linearizer.h b/deps/v8/src/compiler/effect-control-linearizer.h
index 47e0a249cf..aa174ed45e 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.h
+++ b/deps/v8/src/compiler/effect-control-linearizer.h
@@ -178,8 +178,10 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
const VectorSlotPair& feedback,
Node* value,
Node* frame_state);
+ Node* BuildReverseBytes(ExternalArrayType type, Node* value);
Node* BuildFloat64RoundDown(Node* value);
Node* BuildFloat64RoundTruncate(Node* input);
+ Node* BuildUint32Mod(Node* lhs, Node* rhs);
Node* ComputeIntegerHash(Node* value);
Node* LowerStringComparison(Callable const& callable, Node* node);
Node* IsElementsKindGreaterThan(Node* kind, ElementsKind reference_kind);
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.cc b/deps/v8/src/compiler/escape-analysis-reducer.cc
index 1642d85c23..1434a4b98a 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.cc
+++ b/deps/v8/src/compiler/escape-analysis-reducer.cc
@@ -99,7 +99,8 @@ Reduction EscapeAnalysisReducer::Reduce(Node* node) {
}
switch (node->opcode()) {
- case IrOpcode::kAllocate: {
+ case IrOpcode::kAllocate:
+ case IrOpcode::kTypeGuard: {
const VirtualObject* vobject = analysis_result().GetVirtualObject(node);
if (vobject && !vobject->HasEscaped()) {
RelaxEffectsAndControls(node);
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index 9780d227fd..496f322106 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -155,6 +155,28 @@ Node* GraphAssembler::Load(MachineType rep, Node* object, Node* offset) {
current_effect_, current_control_);
}
+Node* GraphAssembler::StoreUnaligned(MachineRepresentation rep, Node* object,
+ Node* offset, Node* value) {
+ Operator const* const op =
+ (rep == MachineRepresentation::kWord8 ||
+ machine()->UnalignedStoreSupported(rep))
+ ? machine()->Store(StoreRepresentation(rep, kNoWriteBarrier))
+ : machine()->UnalignedStore(rep);
+ return current_effect_ = graph()->NewNode(op, object, offset, value,
+ current_effect_, current_control_);
+}
+
+Node* GraphAssembler::LoadUnaligned(MachineType rep, Node* object,
+ Node* offset) {
+ Operator const* const op =
+ (rep.representation() == MachineRepresentation::kWord8 ||
+ machine()->UnalignedLoadSupported(rep.representation()))
+ ? machine()->Load(rep)
+ : machine()->UnalignedLoad(rep);
+ return current_effect_ = graph()->NewNode(op, object, offset, current_effect_,
+ current_control_);
+}
+
Node* GraphAssembler::Retain(Node* buffer) {
return current_effect_ =
graph()->NewNode(common()->Retain(), buffer, current_effect_);
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index f9b45a2007..79eb493608 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -31,8 +31,12 @@ namespace compiler {
V(Float64ExtractLowWord32) \
V(Float64ExtractHighWord32) \
V(BitcastInt32ToFloat32) \
+ V(BitcastInt64ToFloat64) \
V(BitcastFloat32ToInt32) \
- V(Float64Abs)
+ V(BitcastFloat64ToInt64) \
+ V(Float64Abs) \
+ V(Word32ReverseBytes) \
+ V(Word64ReverseBytes)
#define PURE_ASSEMBLER_MACH_BINOP_LIST(V) \
V(WordShl) \
@@ -215,6 +219,10 @@ class GraphAssembler {
Node* Store(StoreRepresentation rep, Node* object, Node* offset, Node* value);
Node* Load(MachineType rep, Node* object, Node* offset);
+ Node* StoreUnaligned(MachineRepresentation rep, Node* object, Node* offset,
+ Node* value);
+ Node* LoadUnaligned(MachineType rep, Node* object, Node* offset);
+
Node* Retain(Node* buffer);
Node* UnsafePointerAdd(Node* base, Node* external);
diff --git a/deps/v8/src/compiler/graph-visualizer.h b/deps/v8/src/compiler/graph-visualizer.h
index 4b95169215..5573c346ee 100644
--- a/deps/v8/src/compiler/graph-visualizer.h
+++ b/deps/v8/src/compiler/graph-visualizer.h
@@ -11,6 +11,7 @@
#include <memory>
#include "src/globals.h"
+#include "src/handles.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
index 9aef138811..9d54eaeb90 100644
--- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -16,6 +16,8 @@
#include "src/ia32/assembler-ia32.h"
#include "src/ia32/macro-assembler-ia32.h"
#include "src/optimized-compilation-info.h"
+#include "src/wasm/wasm-code-manager.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -166,6 +168,22 @@ class IA32OperandConverter : public InstructionOperandConverter {
Operand MemoryOperand(size_t first_input = 0) {
return MemoryOperand(&first_input);
}
+
+ Operand NextMemoryOperand(size_t offset = 0) {
+ AddressingMode mode = AddressingModeField::decode(instr_->opcode());
+ Register base = InputRegister(NextOffset(&offset));
+ const int32_t disp = 4;
+ if (mode == kMode_MR1) {
+ Register index = InputRegister(NextOffset(&offset));
+ ScaleFactor scale = ScaleFor(kMode_MR1, kMode_MR1);
+ return Operand(base, index, scale, disp);
+ } else if (mode == kMode_MRI) {
+ Constant ctant = ToConstant(instr_->InputAt(NextOffset(&offset)));
+ return Operand(base, ctant.ToInt32() + disp, ctant.rmode());
+ } else {
+ UNREACHABLE();
+ }
+ }
};
@@ -409,6 +427,23 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
__ j(not_equal, &binop); \
} while (false)
+#define ASSEMBLE_I64ATOMIC_BINOP(instr1, instr2) \
+ do { \
+ Label binop; \
+ __ bind(&binop); \
+ __ mov(i.OutputRegister(0), i.MemoryOperand(2)); \
+ __ mov(i.OutputRegister(1), i.NextMemoryOperand(2)); \
+ __ push(i.InputRegister(0)); \
+ __ push(i.InputRegister(1)); \
+ __ instr1(i.InputRegister(0), i.OutputRegister(0)); \
+ __ instr2(i.InputRegister(1), i.OutputRegister(1)); \
+ __ lock(); \
+ __ cmpxchg8b(i.MemoryOperand(2)); \
+ __ pop(i.InputRegister(1)); \
+ __ pop(i.InputRegister(0)); \
+ __ j(not_equal, &binop); \
+ } while (false);
+
#define ASSEMBLE_MOVX(mov_instr) \
do { \
if (instr->addressing_mode() != kMode_None) { \
@@ -1152,6 +1187,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIA32Popcnt:
__ Popcnt(i.OutputRegister(), i.InputOperand(0));
break;
+ case kIA32Bswap:
+ __ bswap(i.OutputRegister());
+ break;
case kArchWordPoisonOnSpeculation:
DCHECK_EQ(i.OutputRegister(), i.InputRegister(0));
__ and_(i.InputRegister(0), kSpeculationPoisonRegister);
@@ -3594,7 +3632,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIA32StackCheck: {
ExternalReference const stack_limit =
ExternalReference::address_of_stack_limit(__ isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ VerifyRootRegister();
+ __ cmp(esp, tasm()->StaticVariable(stack_limit));
+ break;
+ }
+ case kIA32Word32AtomicPairLoad: {
+ XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
+ __ movq(tmp, i.MemoryOperand());
+ __ Pextrd(i.OutputRegister(0), tmp, 0);
+ __ Pextrd(i.OutputRegister(1), tmp, 1);
+ break;
+ }
+ case kIA32Word32AtomicPairStore: {
+ __ mov(i.TempRegister(0), i.MemoryOperand(2));
+ __ mov(i.TempRegister(1), i.NextMemoryOperand(2));
+ __ lock();
+ __ cmpxchg8b(i.MemoryOperand(2));
break;
}
case kWord32AtomicExchangeInt8: {
@@ -3621,6 +3674,34 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ xchg(i.InputRegister(0), i.MemoryOperand(1));
break;
}
+ // For the narrow Word64 operations below, i.OutputRegister(1) contains
+ // the high-order 32 bits for the 64bit operation. As the data exchange
+ // fits in one register, the i.OutputRegister(1) needs to be cleared for
+ // the correct return value to be propagated back.
+ case kIA32Word64AtomicNarrowExchangeUint8: {
+ __ xchg_b(i.OutputRegister(0), i.MemoryOperand(1));
+ __ movzx_b(i.OutputRegister(0), i.OutputRegister(0));
+ __ xor_(i.OutputRegister(1), i.OutputRegister(1));
+ break;
+ }
+ case kIA32Word64AtomicNarrowExchangeUint16: {
+ __ xchg_w(i.OutputRegister(0), i.MemoryOperand(1));
+ __ movzx_w(i.OutputRegister(0), i.OutputRegister(0));
+ __ xor_(i.OutputRegister(1), i.OutputRegister(1));
+ break;
+ }
+ case kIA32Word64AtomicNarrowExchangeUint32: {
+ __ xchg(i.OutputRegister(0), i.MemoryOperand(1));
+ __ xor_(i.OutputRegister(1), i.OutputRegister(1));
+ break;
+ }
+ case kIA32Word32AtomicPairExchange: {
+ __ mov(i.OutputRegister(0), i.MemoryOperand(2));
+ __ mov(i.OutputRegister(1), i.NextMemoryOperand(2));
+ __ lock();
+ __ cmpxchg8b(i.MemoryOperand(2));
+ break;
+ }
case kWord32AtomicCompareExchangeInt8: {
__ lock();
__ cmpxchg_b(i.MemoryOperand(2), i.InputRegister(1));
@@ -3650,30 +3731,72 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cmpxchg(i.MemoryOperand(2), i.InputRegister(1));
break;
}
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: { \
- ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
- __ movsx_b(eax, eax); \
- break; \
- } \
- case kWord32Atomic##op##Uint8: { \
- ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
- __ movzx_b(eax, eax); \
- break; \
- } \
- case kWord32Atomic##op##Int16: { \
- ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
- __ movsx_w(eax, eax); \
- break; \
- } \
- case kWord32Atomic##op##Uint16: { \
- ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
- __ movzx_w(eax, eax); \
- break; \
- } \
- case kWord32Atomic##op##Word32: { \
- ASSEMBLE_ATOMIC_BINOP(inst, mov, cmpxchg); \
- break; \
+ case kIA32Word64AtomicNarrowCompareExchangeUint8: {
+ __ lock();
+ __ cmpxchg_b(i.MemoryOperand(2), i.InputRegister(1));
+ __ movzx_b(i.OutputRegister(0), i.OutputRegister(0));
+ __ xor_(i.OutputRegister(1), i.OutputRegister(1));
+ break;
+ }
+ case kIA32Word64AtomicNarrowCompareExchangeUint16: {
+ __ lock();
+ __ cmpxchg_w(i.MemoryOperand(2), i.InputRegister(1));
+ __ movzx_w(i.OutputRegister(0), i.OutputRegister(0));
+ __ xor_(i.OutputRegister(1), i.OutputRegister(1));
+ break;
+ }
+ case kIA32Word64AtomicNarrowCompareExchangeUint32: {
+ __ lock();
+ __ cmpxchg(i.MemoryOperand(2), i.InputRegister(1));
+ __ xor_(i.OutputRegister(1), i.OutputRegister(1));
+ break;
+ }
+ case kIA32Word32AtomicPairCompareExchange: {
+ __ lock();
+ __ cmpxchg8b(i.MemoryOperand(4));
+ break;
+ }
+#define ATOMIC_BINOP_CASE(op, inst) \
+ case kWord32Atomic##op##Int8: { \
+ ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
+ __ movsx_b(eax, eax); \
+ break; \
+ } \
+ case kIA32Word64AtomicNarrow##op##Uint8: { \
+ ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
+ __ movzx_b(i.OutputRegister(0), i.OutputRegister(0)); \
+ __ xor_(i.OutputRegister(1), i.OutputRegister(1)); \
+ break; \
+ } \
+ case kWord32Atomic##op##Uint8: { \
+ ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
+ __ movzx_b(eax, eax); \
+ break; \
+ } \
+ case kWord32Atomic##op##Int16: { \
+ ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
+ __ movsx_w(eax, eax); \
+ break; \
+ } \
+ case kIA32Word64AtomicNarrow##op##Uint16: { \
+ ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
+ __ movzx_w(i.OutputRegister(0), i.OutputRegister(0)); \
+ __ xor_(i.OutputRegister(1), i.OutputRegister(1)); \
+ break; \
+ } \
+ case kWord32Atomic##op##Uint16: { \
+ ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
+ __ movzx_w(eax, eax); \
+ break; \
+ } \
+ case kIA32Word64AtomicNarrow##op##Uint32: { \
+ ASSEMBLE_ATOMIC_BINOP(inst, mov, cmpxchg); \
+ __ xor_(i.OutputRegister(1), i.OutputRegister(1)); \
+ break; \
+ } \
+ case kWord32Atomic##op##Word32: { \
+ ASSEMBLE_ATOMIC_BINOP(inst, mov, cmpxchg); \
+ break; \
}
ATOMIC_BINOP_CASE(Add, add)
ATOMIC_BINOP_CASE(Sub, sub)
@@ -3681,6 +3804,40 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, or_)
ATOMIC_BINOP_CASE(Xor, xor_)
#undef ATOMIC_BINOP_CASE
+#define ATOMIC_BINOP_CASE(op, instr1, instr2) \
+ case kIA32Word32AtomicPair##op: { \
+ ASSEMBLE_I64ATOMIC_BINOP(instr1, instr2) \
+ break; \
+ }
+ ATOMIC_BINOP_CASE(Add, add, adc)
+ ATOMIC_BINOP_CASE(And, and_, and_)
+ ATOMIC_BINOP_CASE(Or, or_, or_)
+ ATOMIC_BINOP_CASE(Xor, xor_, xor_)
+#undef ATOMIC_BINOP_CASE
+ case kIA32Word32AtomicPairSub: {
+ Label binop;
+ __ bind(&binop);
+ // Move memory operand into edx:eax
+ __ mov(i.OutputRegister(0), i.MemoryOperand(2));
+ __ mov(i.OutputRegister(1), i.NextMemoryOperand(2));
+ // Save input registers temporarily on the stack.
+ __ push(i.InputRegister(0));
+ __ push(i.InputRegister(1));
+ // Negate input in place
+ __ neg(i.InputRegister(0));
+ __ adc(i.InputRegister(1), 0);
+ __ neg(i.InputRegister(1));
+ // Add memory operand, negated input.
+ __ add(i.InputRegister(0), i.OutputRegister(0));
+ __ adc(i.InputRegister(1), i.OutputRegister(1));
+ __ lock();
+ __ cmpxchg8b(i.MemoryOperand(2));
+ // Restore input registers
+ __ pop(i.InputRegister(1));
+ __ pop(i.InputRegister(0));
+ __ j(not_equal, &binop);
+ break;
+ }
case kWord32AtomicLoadInt8:
case kWord32AtomicLoadUint8:
case kWord32AtomicLoadInt16:
@@ -4450,6 +4607,7 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
#undef ASSEMBLE_IEEE754_UNOP
#undef ASSEMBLE_BINOP
#undef ASSEMBLE_ATOMIC_BINOP
+#undef ASSEMBLE_I64ATOMIC_BINOP
#undef ASSEMBLE_MOVX
#undef ASSEMBLE_SIMD_PUNPCK_SHUFFLE
#undef ASSEMBLE_SIMD_IMM_SHUFFLE
diff --git a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
index 8ffc9c3819..97f3763cf5 100644
--- a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
@@ -11,346 +11,377 @@ namespace compiler {
// IA32-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(IA32Add) \
- V(IA32And) \
- V(IA32Cmp) \
- V(IA32Cmp16) \
- V(IA32Cmp8) \
- V(IA32Test) \
- V(IA32Test16) \
- V(IA32Test8) \
- V(IA32Or) \
- V(IA32Xor) \
- V(IA32Sub) \
- V(IA32Imul) \
- V(IA32ImulHigh) \
- V(IA32UmulHigh) \
- V(IA32Idiv) \
- V(IA32Udiv) \
- V(IA32Not) \
- V(IA32Neg) \
- V(IA32Shl) \
- V(IA32Shr) \
- V(IA32Sar) \
- V(IA32AddPair) \
- V(IA32SubPair) \
- V(IA32MulPair) \
- V(IA32ShlPair) \
- V(IA32ShrPair) \
- V(IA32SarPair) \
- V(IA32Ror) \
- V(IA32Lzcnt) \
- V(IA32Tzcnt) \
- V(IA32Popcnt) \
- V(LFence) \
- V(SSEFloat32Cmp) \
- V(SSEFloat32Add) \
- V(SSEFloat32Sub) \
- V(SSEFloat32Mul) \
- V(SSEFloat32Div) \
- V(SSEFloat32Abs) \
- V(SSEFloat32Neg) \
- V(SSEFloat32Sqrt) \
- V(SSEFloat32Round) \
- V(SSEFloat64Cmp) \
- V(SSEFloat64Add) \
- V(SSEFloat64Sub) \
- V(SSEFloat64Mul) \
- V(SSEFloat64Div) \
- V(SSEFloat64Mod) \
- V(SSEFloat32Max) \
- V(SSEFloat64Max) \
- V(SSEFloat32Min) \
- V(SSEFloat64Min) \
- V(SSEFloat64Abs) \
- V(SSEFloat64Neg) \
- V(SSEFloat64Sqrt) \
- V(SSEFloat64Round) \
- V(SSEFloat32ToFloat64) \
- V(SSEFloat64ToFloat32) \
- V(SSEFloat32ToInt32) \
- V(SSEFloat32ToUint32) \
- V(SSEFloat64ToInt32) \
- V(SSEFloat64ToUint32) \
- V(SSEInt32ToFloat32) \
- V(SSEUint32ToFloat32) \
- V(SSEInt32ToFloat64) \
- V(SSEUint32ToFloat64) \
- V(SSEFloat64ExtractLowWord32) \
- V(SSEFloat64ExtractHighWord32) \
- V(SSEFloat64InsertLowWord32) \
- V(SSEFloat64InsertHighWord32) \
- V(SSEFloat64LoadLowWord32) \
- V(SSEFloat64SilenceNaN) \
- V(AVXFloat32Add) \
- V(AVXFloat32Sub) \
- V(AVXFloat32Mul) \
- V(AVXFloat32Div) \
- V(AVXFloat64Add) \
- V(AVXFloat64Sub) \
- V(AVXFloat64Mul) \
- V(AVXFloat64Div) \
- V(AVXFloat64Abs) \
- V(AVXFloat64Neg) \
- V(AVXFloat32Abs) \
- V(AVXFloat32Neg) \
- V(IA32Movsxbl) \
- V(IA32Movzxbl) \
- V(IA32Movb) \
- V(IA32Movsxwl) \
- V(IA32Movzxwl) \
- V(IA32Movw) \
- V(IA32Movl) \
- V(IA32Movss) \
- V(IA32Movsd) \
- V(IA32Movdqu) \
- V(IA32BitcastFI) \
- V(IA32BitcastIF) \
- V(IA32Lea) \
- V(IA32Push) \
- V(IA32PushFloat32) \
- V(IA32PushFloat64) \
- V(IA32PushSimd128) \
- V(IA32Poke) \
- V(IA32Peek) \
- V(IA32StackCheck) \
- V(SSEF32x4Splat) \
- V(AVXF32x4Splat) \
- V(SSEF32x4ExtractLane) \
- V(AVXF32x4ExtractLane) \
- V(SSEF32x4ReplaceLane) \
- V(AVXF32x4ReplaceLane) \
- V(IA32F32x4SConvertI32x4) \
- V(SSEF32x4UConvertI32x4) \
- V(AVXF32x4UConvertI32x4) \
- V(SSEF32x4Abs) \
- V(AVXF32x4Abs) \
- V(SSEF32x4Neg) \
- V(AVXF32x4Neg) \
- V(IA32F32x4RecipApprox) \
- V(IA32F32x4RecipSqrtApprox) \
- V(SSEF32x4Add) \
- V(AVXF32x4Add) \
- V(SSEF32x4AddHoriz) \
- V(AVXF32x4AddHoriz) \
- V(SSEF32x4Sub) \
- V(AVXF32x4Sub) \
- V(SSEF32x4Mul) \
- V(AVXF32x4Mul) \
- V(SSEF32x4Min) \
- V(AVXF32x4Min) \
- V(SSEF32x4Max) \
- V(AVXF32x4Max) \
- V(SSEF32x4Eq) \
- V(AVXF32x4Eq) \
- V(SSEF32x4Ne) \
- V(AVXF32x4Ne) \
- V(SSEF32x4Lt) \
- V(AVXF32x4Lt) \
- V(SSEF32x4Le) \
- V(AVXF32x4Le) \
- V(IA32I32x4Splat) \
- V(IA32I32x4ExtractLane) \
- V(SSEI32x4ReplaceLane) \
- V(AVXI32x4ReplaceLane) \
- V(SSEI32x4SConvertF32x4) \
- V(AVXI32x4SConvertF32x4) \
- V(IA32I32x4SConvertI16x8Low) \
- V(IA32I32x4SConvertI16x8High) \
- V(IA32I32x4Neg) \
- V(SSEI32x4Shl) \
- V(AVXI32x4Shl) \
- V(SSEI32x4ShrS) \
- V(AVXI32x4ShrS) \
- V(SSEI32x4Add) \
- V(AVXI32x4Add) \
- V(SSEI32x4AddHoriz) \
- V(AVXI32x4AddHoriz) \
- V(SSEI32x4Sub) \
- V(AVXI32x4Sub) \
- V(SSEI32x4Mul) \
- V(AVXI32x4Mul) \
- V(SSEI32x4MinS) \
- V(AVXI32x4MinS) \
- V(SSEI32x4MaxS) \
- V(AVXI32x4MaxS) \
- V(SSEI32x4Eq) \
- V(AVXI32x4Eq) \
- V(SSEI32x4Ne) \
- V(AVXI32x4Ne) \
- V(SSEI32x4GtS) \
- V(AVXI32x4GtS) \
- V(SSEI32x4GeS) \
- V(AVXI32x4GeS) \
- V(SSEI32x4UConvertF32x4) \
- V(AVXI32x4UConvertF32x4) \
- V(IA32I32x4UConvertI16x8Low) \
- V(IA32I32x4UConvertI16x8High) \
- V(SSEI32x4ShrU) \
- V(AVXI32x4ShrU) \
- V(SSEI32x4MinU) \
- V(AVXI32x4MinU) \
- V(SSEI32x4MaxU) \
- V(AVXI32x4MaxU) \
- V(SSEI32x4GtU) \
- V(AVXI32x4GtU) \
- V(SSEI32x4GeU) \
- V(AVXI32x4GeU) \
- V(IA32I16x8Splat) \
- V(IA32I16x8ExtractLane) \
- V(SSEI16x8ReplaceLane) \
- V(AVXI16x8ReplaceLane) \
- V(IA32I16x8SConvertI8x16Low) \
- V(IA32I16x8SConvertI8x16High) \
- V(IA32I16x8Neg) \
- V(SSEI16x8Shl) \
- V(AVXI16x8Shl) \
- V(SSEI16x8ShrS) \
- V(AVXI16x8ShrS) \
- V(SSEI16x8SConvertI32x4) \
- V(AVXI16x8SConvertI32x4) \
- V(SSEI16x8Add) \
- V(AVXI16x8Add) \
- V(SSEI16x8AddSaturateS) \
- V(AVXI16x8AddSaturateS) \
- V(SSEI16x8AddHoriz) \
- V(AVXI16x8AddHoriz) \
- V(SSEI16x8Sub) \
- V(AVXI16x8Sub) \
- V(SSEI16x8SubSaturateS) \
- V(AVXI16x8SubSaturateS) \
- V(SSEI16x8Mul) \
- V(AVXI16x8Mul) \
- V(SSEI16x8MinS) \
- V(AVXI16x8MinS) \
- V(SSEI16x8MaxS) \
- V(AVXI16x8MaxS) \
- V(SSEI16x8Eq) \
- V(AVXI16x8Eq) \
- V(SSEI16x8Ne) \
- V(AVXI16x8Ne) \
- V(SSEI16x8GtS) \
- V(AVXI16x8GtS) \
- V(SSEI16x8GeS) \
- V(AVXI16x8GeS) \
- V(IA32I16x8UConvertI8x16Low) \
- V(IA32I16x8UConvertI8x16High) \
- V(SSEI16x8ShrU) \
- V(AVXI16x8ShrU) \
- V(SSEI16x8UConvertI32x4) \
- V(AVXI16x8UConvertI32x4) \
- V(SSEI16x8AddSaturateU) \
- V(AVXI16x8AddSaturateU) \
- V(SSEI16x8SubSaturateU) \
- V(AVXI16x8SubSaturateU) \
- V(SSEI16x8MinU) \
- V(AVXI16x8MinU) \
- V(SSEI16x8MaxU) \
- V(AVXI16x8MaxU) \
- V(SSEI16x8GtU) \
- V(AVXI16x8GtU) \
- V(SSEI16x8GeU) \
- V(AVXI16x8GeU) \
- V(IA32I8x16Splat) \
- V(IA32I8x16ExtractLane) \
- V(SSEI8x16ReplaceLane) \
- V(AVXI8x16ReplaceLane) \
- V(SSEI8x16SConvertI16x8) \
- V(AVXI8x16SConvertI16x8) \
- V(IA32I8x16Neg) \
- V(SSEI8x16Shl) \
- V(AVXI8x16Shl) \
- V(IA32I8x16ShrS) \
- V(SSEI8x16Add) \
- V(AVXI8x16Add) \
- V(SSEI8x16AddSaturateS) \
- V(AVXI8x16AddSaturateS) \
- V(SSEI8x16Sub) \
- V(AVXI8x16Sub) \
- V(SSEI8x16SubSaturateS) \
- V(AVXI8x16SubSaturateS) \
- V(SSEI8x16Mul) \
- V(AVXI8x16Mul) \
- V(SSEI8x16MinS) \
- V(AVXI8x16MinS) \
- V(SSEI8x16MaxS) \
- V(AVXI8x16MaxS) \
- V(SSEI8x16Eq) \
- V(AVXI8x16Eq) \
- V(SSEI8x16Ne) \
- V(AVXI8x16Ne) \
- V(SSEI8x16GtS) \
- V(AVXI8x16GtS) \
- V(SSEI8x16GeS) \
- V(AVXI8x16GeS) \
- V(SSEI8x16UConvertI16x8) \
- V(AVXI8x16UConvertI16x8) \
- V(SSEI8x16AddSaturateU) \
- V(AVXI8x16AddSaturateU) \
- V(SSEI8x16SubSaturateU) \
- V(AVXI8x16SubSaturateU) \
- V(IA32I8x16ShrU) \
- V(SSEI8x16MinU) \
- V(AVXI8x16MinU) \
- V(SSEI8x16MaxU) \
- V(AVXI8x16MaxU) \
- V(SSEI8x16GtU) \
- V(AVXI8x16GtU) \
- V(SSEI8x16GeU) \
- V(AVXI8x16GeU) \
- V(IA32S128Zero) \
- V(SSES128Not) \
- V(AVXS128Not) \
- V(SSES128And) \
- V(AVXS128And) \
- V(SSES128Or) \
- V(AVXS128Or) \
- V(SSES128Xor) \
- V(AVXS128Xor) \
- V(SSES128Select) \
- V(AVXS128Select) \
- V(IA32S8x16Shuffle) \
- V(IA32S32x4Swizzle) \
- V(IA32S32x4Shuffle) \
- V(IA32S16x8Blend) \
- V(IA32S16x8HalfShuffle1) \
- V(IA32S16x8HalfShuffle2) \
- V(IA32S8x16Alignr) \
- V(IA32S16x8Dup) \
- V(IA32S8x16Dup) \
- V(SSES16x8UnzipHigh) \
- V(AVXS16x8UnzipHigh) \
- V(SSES16x8UnzipLow) \
- V(AVXS16x8UnzipLow) \
- V(SSES8x16UnzipHigh) \
- V(AVXS8x16UnzipHigh) \
- V(SSES8x16UnzipLow) \
- V(AVXS8x16UnzipLow) \
- V(IA32S64x2UnpackHigh) \
- V(IA32S32x4UnpackHigh) \
- V(IA32S16x8UnpackHigh) \
- V(IA32S8x16UnpackHigh) \
- V(IA32S64x2UnpackLow) \
- V(IA32S32x4UnpackLow) \
- V(IA32S16x8UnpackLow) \
- V(IA32S8x16UnpackLow) \
- V(SSES8x16TransposeLow) \
- V(AVXS8x16TransposeLow) \
- V(SSES8x16TransposeHigh) \
- V(AVXS8x16TransposeHigh) \
- V(SSES8x8Reverse) \
- V(AVXS8x8Reverse) \
- V(SSES8x4Reverse) \
- V(AVXS8x4Reverse) \
- V(SSES8x2Reverse) \
- V(AVXS8x2Reverse) \
- V(IA32S1x4AnyTrue) \
- V(IA32S1x4AllTrue) \
- V(IA32S1x8AnyTrue) \
- V(IA32S1x8AllTrue) \
- V(IA32S1x16AnyTrue) \
- V(IA32S1x16AllTrue)
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(IA32Add) \
+ V(IA32And) \
+ V(IA32Cmp) \
+ V(IA32Cmp16) \
+ V(IA32Cmp8) \
+ V(IA32Test) \
+ V(IA32Test16) \
+ V(IA32Test8) \
+ V(IA32Or) \
+ V(IA32Xor) \
+ V(IA32Sub) \
+ V(IA32Imul) \
+ V(IA32ImulHigh) \
+ V(IA32UmulHigh) \
+ V(IA32Idiv) \
+ V(IA32Udiv) \
+ V(IA32Not) \
+ V(IA32Neg) \
+ V(IA32Shl) \
+ V(IA32Shr) \
+ V(IA32Sar) \
+ V(IA32AddPair) \
+ V(IA32SubPair) \
+ V(IA32MulPair) \
+ V(IA32ShlPair) \
+ V(IA32ShrPair) \
+ V(IA32SarPair) \
+ V(IA32Ror) \
+ V(IA32Lzcnt) \
+ V(IA32Tzcnt) \
+ V(IA32Popcnt) \
+ V(IA32Bswap) \
+ V(LFence) \
+ V(SSEFloat32Cmp) \
+ V(SSEFloat32Add) \
+ V(SSEFloat32Sub) \
+ V(SSEFloat32Mul) \
+ V(SSEFloat32Div) \
+ V(SSEFloat32Abs) \
+ V(SSEFloat32Neg) \
+ V(SSEFloat32Sqrt) \
+ V(SSEFloat32Round) \
+ V(SSEFloat64Cmp) \
+ V(SSEFloat64Add) \
+ V(SSEFloat64Sub) \
+ V(SSEFloat64Mul) \
+ V(SSEFloat64Div) \
+ V(SSEFloat64Mod) \
+ V(SSEFloat32Max) \
+ V(SSEFloat64Max) \
+ V(SSEFloat32Min) \
+ V(SSEFloat64Min) \
+ V(SSEFloat64Abs) \
+ V(SSEFloat64Neg) \
+ V(SSEFloat64Sqrt) \
+ V(SSEFloat64Round) \
+ V(SSEFloat32ToFloat64) \
+ V(SSEFloat64ToFloat32) \
+ V(SSEFloat32ToInt32) \
+ V(SSEFloat32ToUint32) \
+ V(SSEFloat64ToInt32) \
+ V(SSEFloat64ToUint32) \
+ V(SSEInt32ToFloat32) \
+ V(SSEUint32ToFloat32) \
+ V(SSEInt32ToFloat64) \
+ V(SSEUint32ToFloat64) \
+ V(SSEFloat64ExtractLowWord32) \
+ V(SSEFloat64ExtractHighWord32) \
+ V(SSEFloat64InsertLowWord32) \
+ V(SSEFloat64InsertHighWord32) \
+ V(SSEFloat64LoadLowWord32) \
+ V(SSEFloat64SilenceNaN) \
+ V(AVXFloat32Add) \
+ V(AVXFloat32Sub) \
+ V(AVXFloat32Mul) \
+ V(AVXFloat32Div) \
+ V(AVXFloat64Add) \
+ V(AVXFloat64Sub) \
+ V(AVXFloat64Mul) \
+ V(AVXFloat64Div) \
+ V(AVXFloat64Abs) \
+ V(AVXFloat64Neg) \
+ V(AVXFloat32Abs) \
+ V(AVXFloat32Neg) \
+ V(IA32Movsxbl) \
+ V(IA32Movzxbl) \
+ V(IA32Movb) \
+ V(IA32Movsxwl) \
+ V(IA32Movzxwl) \
+ V(IA32Movw) \
+ V(IA32Movl) \
+ V(IA32Movss) \
+ V(IA32Movsd) \
+ V(IA32Movdqu) \
+ V(IA32BitcastFI) \
+ V(IA32BitcastIF) \
+ V(IA32Lea) \
+ V(IA32Push) \
+ V(IA32PushFloat32) \
+ V(IA32PushFloat64) \
+ V(IA32PushSimd128) \
+ V(IA32Poke) \
+ V(IA32Peek) \
+ V(IA32StackCheck) \
+ V(SSEF32x4Splat) \
+ V(AVXF32x4Splat) \
+ V(SSEF32x4ExtractLane) \
+ V(AVXF32x4ExtractLane) \
+ V(SSEF32x4ReplaceLane) \
+ V(AVXF32x4ReplaceLane) \
+ V(IA32F32x4SConvertI32x4) \
+ V(SSEF32x4UConvertI32x4) \
+ V(AVXF32x4UConvertI32x4) \
+ V(SSEF32x4Abs) \
+ V(AVXF32x4Abs) \
+ V(SSEF32x4Neg) \
+ V(AVXF32x4Neg) \
+ V(IA32F32x4RecipApprox) \
+ V(IA32F32x4RecipSqrtApprox) \
+ V(SSEF32x4Add) \
+ V(AVXF32x4Add) \
+ V(SSEF32x4AddHoriz) \
+ V(AVXF32x4AddHoriz) \
+ V(SSEF32x4Sub) \
+ V(AVXF32x4Sub) \
+ V(SSEF32x4Mul) \
+ V(AVXF32x4Mul) \
+ V(SSEF32x4Min) \
+ V(AVXF32x4Min) \
+ V(SSEF32x4Max) \
+ V(AVXF32x4Max) \
+ V(SSEF32x4Eq) \
+ V(AVXF32x4Eq) \
+ V(SSEF32x4Ne) \
+ V(AVXF32x4Ne) \
+ V(SSEF32x4Lt) \
+ V(AVXF32x4Lt) \
+ V(SSEF32x4Le) \
+ V(AVXF32x4Le) \
+ V(IA32I32x4Splat) \
+ V(IA32I32x4ExtractLane) \
+ V(SSEI32x4ReplaceLane) \
+ V(AVXI32x4ReplaceLane) \
+ V(SSEI32x4SConvertF32x4) \
+ V(AVXI32x4SConvertF32x4) \
+ V(IA32I32x4SConvertI16x8Low) \
+ V(IA32I32x4SConvertI16x8High) \
+ V(IA32I32x4Neg) \
+ V(SSEI32x4Shl) \
+ V(AVXI32x4Shl) \
+ V(SSEI32x4ShrS) \
+ V(AVXI32x4ShrS) \
+ V(SSEI32x4Add) \
+ V(AVXI32x4Add) \
+ V(SSEI32x4AddHoriz) \
+ V(AVXI32x4AddHoriz) \
+ V(SSEI32x4Sub) \
+ V(AVXI32x4Sub) \
+ V(SSEI32x4Mul) \
+ V(AVXI32x4Mul) \
+ V(SSEI32x4MinS) \
+ V(AVXI32x4MinS) \
+ V(SSEI32x4MaxS) \
+ V(AVXI32x4MaxS) \
+ V(SSEI32x4Eq) \
+ V(AVXI32x4Eq) \
+ V(SSEI32x4Ne) \
+ V(AVXI32x4Ne) \
+ V(SSEI32x4GtS) \
+ V(AVXI32x4GtS) \
+ V(SSEI32x4GeS) \
+ V(AVXI32x4GeS) \
+ V(SSEI32x4UConvertF32x4) \
+ V(AVXI32x4UConvertF32x4) \
+ V(IA32I32x4UConvertI16x8Low) \
+ V(IA32I32x4UConvertI16x8High) \
+ V(SSEI32x4ShrU) \
+ V(AVXI32x4ShrU) \
+ V(SSEI32x4MinU) \
+ V(AVXI32x4MinU) \
+ V(SSEI32x4MaxU) \
+ V(AVXI32x4MaxU) \
+ V(SSEI32x4GtU) \
+ V(AVXI32x4GtU) \
+ V(SSEI32x4GeU) \
+ V(AVXI32x4GeU) \
+ V(IA32I16x8Splat) \
+ V(IA32I16x8ExtractLane) \
+ V(SSEI16x8ReplaceLane) \
+ V(AVXI16x8ReplaceLane) \
+ V(IA32I16x8SConvertI8x16Low) \
+ V(IA32I16x8SConvertI8x16High) \
+ V(IA32I16x8Neg) \
+ V(SSEI16x8Shl) \
+ V(AVXI16x8Shl) \
+ V(SSEI16x8ShrS) \
+ V(AVXI16x8ShrS) \
+ V(SSEI16x8SConvertI32x4) \
+ V(AVXI16x8SConvertI32x4) \
+ V(SSEI16x8Add) \
+ V(AVXI16x8Add) \
+ V(SSEI16x8AddSaturateS) \
+ V(AVXI16x8AddSaturateS) \
+ V(SSEI16x8AddHoriz) \
+ V(AVXI16x8AddHoriz) \
+ V(SSEI16x8Sub) \
+ V(AVXI16x8Sub) \
+ V(SSEI16x8SubSaturateS) \
+ V(AVXI16x8SubSaturateS) \
+ V(SSEI16x8Mul) \
+ V(AVXI16x8Mul) \
+ V(SSEI16x8MinS) \
+ V(AVXI16x8MinS) \
+ V(SSEI16x8MaxS) \
+ V(AVXI16x8MaxS) \
+ V(SSEI16x8Eq) \
+ V(AVXI16x8Eq) \
+ V(SSEI16x8Ne) \
+ V(AVXI16x8Ne) \
+ V(SSEI16x8GtS) \
+ V(AVXI16x8GtS) \
+ V(SSEI16x8GeS) \
+ V(AVXI16x8GeS) \
+ V(IA32I16x8UConvertI8x16Low) \
+ V(IA32I16x8UConvertI8x16High) \
+ V(SSEI16x8ShrU) \
+ V(AVXI16x8ShrU) \
+ V(SSEI16x8UConvertI32x4) \
+ V(AVXI16x8UConvertI32x4) \
+ V(SSEI16x8AddSaturateU) \
+ V(AVXI16x8AddSaturateU) \
+ V(SSEI16x8SubSaturateU) \
+ V(AVXI16x8SubSaturateU) \
+ V(SSEI16x8MinU) \
+ V(AVXI16x8MinU) \
+ V(SSEI16x8MaxU) \
+ V(AVXI16x8MaxU) \
+ V(SSEI16x8GtU) \
+ V(AVXI16x8GtU) \
+ V(SSEI16x8GeU) \
+ V(AVXI16x8GeU) \
+ V(IA32I8x16Splat) \
+ V(IA32I8x16ExtractLane) \
+ V(SSEI8x16ReplaceLane) \
+ V(AVXI8x16ReplaceLane) \
+ V(SSEI8x16SConvertI16x8) \
+ V(AVXI8x16SConvertI16x8) \
+ V(IA32I8x16Neg) \
+ V(SSEI8x16Shl) \
+ V(AVXI8x16Shl) \
+ V(IA32I8x16ShrS) \
+ V(SSEI8x16Add) \
+ V(AVXI8x16Add) \
+ V(SSEI8x16AddSaturateS) \
+ V(AVXI8x16AddSaturateS) \
+ V(SSEI8x16Sub) \
+ V(AVXI8x16Sub) \
+ V(SSEI8x16SubSaturateS) \
+ V(AVXI8x16SubSaturateS) \
+ V(SSEI8x16Mul) \
+ V(AVXI8x16Mul) \
+ V(SSEI8x16MinS) \
+ V(AVXI8x16MinS) \
+ V(SSEI8x16MaxS) \
+ V(AVXI8x16MaxS) \
+ V(SSEI8x16Eq) \
+ V(AVXI8x16Eq) \
+ V(SSEI8x16Ne) \
+ V(AVXI8x16Ne) \
+ V(SSEI8x16GtS) \
+ V(AVXI8x16GtS) \
+ V(SSEI8x16GeS) \
+ V(AVXI8x16GeS) \
+ V(SSEI8x16UConvertI16x8) \
+ V(AVXI8x16UConvertI16x8) \
+ V(SSEI8x16AddSaturateU) \
+ V(AVXI8x16AddSaturateU) \
+ V(SSEI8x16SubSaturateU) \
+ V(AVXI8x16SubSaturateU) \
+ V(IA32I8x16ShrU) \
+ V(SSEI8x16MinU) \
+ V(AVXI8x16MinU) \
+ V(SSEI8x16MaxU) \
+ V(AVXI8x16MaxU) \
+ V(SSEI8x16GtU) \
+ V(AVXI8x16GtU) \
+ V(SSEI8x16GeU) \
+ V(AVXI8x16GeU) \
+ V(IA32S128Zero) \
+ V(SSES128Not) \
+ V(AVXS128Not) \
+ V(SSES128And) \
+ V(AVXS128And) \
+ V(SSES128Or) \
+ V(AVXS128Or) \
+ V(SSES128Xor) \
+ V(AVXS128Xor) \
+ V(SSES128Select) \
+ V(AVXS128Select) \
+ V(IA32S8x16Shuffle) \
+ V(IA32S32x4Swizzle) \
+ V(IA32S32x4Shuffle) \
+ V(IA32S16x8Blend) \
+ V(IA32S16x8HalfShuffle1) \
+ V(IA32S16x8HalfShuffle2) \
+ V(IA32S8x16Alignr) \
+ V(IA32S16x8Dup) \
+ V(IA32S8x16Dup) \
+ V(SSES16x8UnzipHigh) \
+ V(AVXS16x8UnzipHigh) \
+ V(SSES16x8UnzipLow) \
+ V(AVXS16x8UnzipLow) \
+ V(SSES8x16UnzipHigh) \
+ V(AVXS8x16UnzipHigh) \
+ V(SSES8x16UnzipLow) \
+ V(AVXS8x16UnzipLow) \
+ V(IA32S64x2UnpackHigh) \
+ V(IA32S32x4UnpackHigh) \
+ V(IA32S16x8UnpackHigh) \
+ V(IA32S8x16UnpackHigh) \
+ V(IA32S64x2UnpackLow) \
+ V(IA32S32x4UnpackLow) \
+ V(IA32S16x8UnpackLow) \
+ V(IA32S8x16UnpackLow) \
+ V(SSES8x16TransposeLow) \
+ V(AVXS8x16TransposeLow) \
+ V(SSES8x16TransposeHigh) \
+ V(AVXS8x16TransposeHigh) \
+ V(SSES8x8Reverse) \
+ V(AVXS8x8Reverse) \
+ V(SSES8x4Reverse) \
+ V(AVXS8x4Reverse) \
+ V(SSES8x2Reverse) \
+ V(AVXS8x2Reverse) \
+ V(IA32S1x4AnyTrue) \
+ V(IA32S1x4AllTrue) \
+ V(IA32S1x8AnyTrue) \
+ V(IA32S1x8AllTrue) \
+ V(IA32S1x16AnyTrue) \
+ V(IA32S1x16AllTrue) \
+ V(IA32Word32AtomicPairLoad) \
+ V(IA32Word32AtomicPairStore) \
+ V(IA32Word32AtomicPairAdd) \
+ V(IA32Word32AtomicPairSub) \
+ V(IA32Word32AtomicPairAnd) \
+ V(IA32Word32AtomicPairOr) \
+ V(IA32Word32AtomicPairXor) \
+ V(IA32Word32AtomicPairExchange) \
+ V(IA32Word32AtomicPairCompareExchange) \
+ V(IA32Word64AtomicNarrowAddUint8) \
+ V(IA32Word64AtomicNarrowAddUint16) \
+ V(IA32Word64AtomicNarrowAddUint32) \
+ V(IA32Word64AtomicNarrowSubUint8) \
+ V(IA32Word64AtomicNarrowSubUint16) \
+ V(IA32Word64AtomicNarrowSubUint32) \
+ V(IA32Word64AtomicNarrowAndUint8) \
+ V(IA32Word64AtomicNarrowAndUint16) \
+ V(IA32Word64AtomicNarrowAndUint32) \
+ V(IA32Word64AtomicNarrowOrUint8) \
+ V(IA32Word64AtomicNarrowOrUint16) \
+ V(IA32Word64AtomicNarrowOrUint32) \
+ V(IA32Word64AtomicNarrowXorUint8) \
+ V(IA32Word64AtomicNarrowXorUint16) \
+ V(IA32Word64AtomicNarrowXorUint32) \
+ V(IA32Word64AtomicNarrowExchangeUint8) \
+ V(IA32Word64AtomicNarrowExchangeUint16) \
+ V(IA32Word64AtomicNarrowExchangeUint32) \
+ V(IA32Word64AtomicNarrowCompareExchangeUint8) \
+ V(IA32Word64AtomicNarrowCompareExchangeUint16) \
+ V(IA32Word64AtomicNarrowCompareExchangeUint32)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
index 82d6fb88a3..07d42bc614 100644
--- a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
@@ -43,6 +43,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Lzcnt:
case kIA32Tzcnt:
case kIA32Popcnt:
+ case kIA32Bswap:
case kIA32Lea:
case kSSEFloat32Cmp:
case kSSEFloat32Add:
@@ -368,6 +369,40 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kLFence:
return kHasSideEffect;
+ case kIA32Word32AtomicPairLoad:
+ return kIsLoadOperation;
+
+ case kIA32Word32AtomicPairStore:
+ case kIA32Word32AtomicPairAdd:
+ case kIA32Word32AtomicPairSub:
+ case kIA32Word32AtomicPairAnd:
+ case kIA32Word32AtomicPairOr:
+ case kIA32Word32AtomicPairXor:
+ case kIA32Word32AtomicPairExchange:
+ case kIA32Word32AtomicPairCompareExchange:
+ case kIA32Word64AtomicNarrowAddUint8:
+ case kIA32Word64AtomicNarrowAddUint16:
+ case kIA32Word64AtomicNarrowAddUint32:
+ case kIA32Word64AtomicNarrowSubUint8:
+ case kIA32Word64AtomicNarrowSubUint16:
+ case kIA32Word64AtomicNarrowSubUint32:
+ case kIA32Word64AtomicNarrowAndUint8:
+ case kIA32Word64AtomicNarrowAndUint16:
+ case kIA32Word64AtomicNarrowAndUint32:
+ case kIA32Word64AtomicNarrowOrUint8:
+ case kIA32Word64AtomicNarrowOrUint16:
+ case kIA32Word64AtomicNarrowOrUint32:
+ case kIA32Word64AtomicNarrowXorUint8:
+ case kIA32Word64AtomicNarrowXorUint16:
+ case kIA32Word64AtomicNarrowXorUint32:
+ case kIA32Word64AtomicNarrowExchangeUint8:
+ case kIA32Word64AtomicNarrowExchangeUint16:
+ case kIA32Word64AtomicNarrowExchangeUint32:
+ case kIA32Word64AtomicNarrowCompareExchangeUint8:
+ case kIA32Word64AtomicNarrowCompareExchangeUint16:
+ case kIA32Word64AtomicNarrowCompareExchangeUint32:
+ return kHasSideEffect;
+
#define CASE(Name) case k##Name:
COMMON_ARCH_OPCODE_LIST(CASE)
#undef CASE
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
index 4144254285..ce2f14e97f 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -164,6 +164,17 @@ class IA32OperandGenerator final : public OperandGenerator {
}
}
+ InstructionOperand GetEffectiveIndexOperand(Node* index,
+ AddressingMode* mode) {
+ if (CanBeImmediate(index)) {
+ *mode = kMode_MRI;
+ return UseImmediate(index);
+ } else {
+ *mode = kMode_MR1;
+ return UseUniqueRegister(index);
+ }
+ }
+
bool CanBeBetterLeftOperand(Node* node) const {
return !selector()->IsLive(node);
}
@@ -331,17 +342,10 @@ void InstructionSelector::VisitStore(Node* node) {
if (write_barrier_kind != kNoWriteBarrier) {
DCHECK(CanBeTaggedPointer(rep));
AddressingMode addressing_mode;
- InstructionOperand inputs[3];
- size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(base);
- if (g.CanBeImmediate(index)) {
- inputs[input_count++] = g.UseImmediate(index);
- addressing_mode = kMode_MRI;
- } else {
- inputs[input_count++] = g.UseUniqueRegister(index);
- addressing_mode = kMode_MR1;
- }
- inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionOperand inputs[] = {
+ g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode),
+ g.UseUniqueRegister(value)};
RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
switch (write_barrier_kind) {
case kNoWriteBarrier:
@@ -362,7 +366,7 @@ void InstructionSelector::VisitStore(Node* node) {
InstructionCode code = kArchStoreWithWriteBarrier;
code |= AddressingModeField::encode(addressing_mode);
code |= MiscField::encode(static_cast<int>(record_write_mode));
- Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
+ Emit(code, 0, nullptr, arraysize(inputs), inputs, temp_count, temps);
} else {
ArchOpcode opcode = kArchNop;
switch (rep) {
@@ -823,7 +827,10 @@ void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kIA32Bswap, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)));
+}
void InstructionSelector::VisitInt32Add(Node* node) {
IA32OperandGenerator g(this);
@@ -1293,30 +1300,86 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node,
Node* value = node->InputAt(2);
AddressingMode addressing_mode;
- InstructionOperand inputs[3];
- size_t input_count = 0;
- if (rep == MachineRepresentation::kWord8) {
- inputs[input_count++] = g.UseFixed(value, edx);
- } else {
- inputs[input_count++] = g.UseUniqueRegister(value);
- }
- inputs[input_count++] = g.UseUniqueRegister(base);
- if (g.CanBeImmediate(index)) {
- inputs[input_count++] = g.UseImmediate(index);
- addressing_mode = kMode_MRI;
- } else {
- inputs[input_count++] = g.UseUniqueRegister(index);
- addressing_mode = kMode_MR1;
- }
- InstructionOperand outputs[1];
- if (rep == MachineRepresentation::kWord8) {
- // Using DefineSameAsFirst requires the register to be unallocated.
- outputs[0] = g.DefineAsFixed(node, edx);
- } else {
- outputs[0] = g.DefineSameAsFirst(node);
- }
+ InstructionOperand value_operand = (rep == MachineRepresentation::kWord8)
+ ? g.UseFixed(value, edx)
+ : g.UseUniqueRegister(value);
+ InstructionOperand inputs[] = {
+ value_operand, g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
+ InstructionOperand outputs[] = {
+ (rep == MachineRepresentation::kWord8)
+ // Using DefineSameAsFirst requires the register to be unallocated.
+ ? g.DefineAsFixed(node, edx)
+ : g.DefineSameAsFirst(node)};
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ selector->Emit(code, 1, outputs, arraysize(inputs), inputs);
+}
+
+void VisitAtomicBinOp(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode, MachineRepresentation rep) {
+ AddressingMode addressing_mode;
+ IA32OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ InstructionOperand inputs[] = {
+ g.UseUniqueRegister(value), g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
+ InstructionOperand outputs[] = {g.DefineAsFixed(node, eax)};
+ InstructionOperand temp[] = {(rep == MachineRepresentation::kWord8)
+ ? g.UseByteRegister(node)
+ : g.TempRegister()};
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temp), temp);
+}
+
+void VisitPairAtomicBinOp(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ IA32OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ // For Word64 operations, the value input is split into the a high node,
+ // and a low node in the int64-lowering phase.
+ Node* value_high = node->InputAt(3);
+
+ // Wasm lives in 32-bit address space, so we do not need to worry about
+ // base/index lowering. This will need to be fixed for Wasm64.
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[] = {
+ g.UseFixed(value, ebx), g.UseFixed(value_high, ecx),
+ g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
+ InstructionOperand outputs[] = {
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 0), eax),
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 1), edx)};
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
+}
+
+void VisitNarrowAtomicBinOp(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode, MachineType type) {
+ IA32OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ // Wasm lives in 32-bit address space, so we do not need to worry about
+ // base/index lowering. This will need to be fixed for Wasm64.
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[] = {
+ g.UseUniqueRegister(value), g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
+ InstructionOperand outputs[] = {
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 0), eax),
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 1), edx)};
+ InstructionOperand temp[] = {(type == MachineType::Uint8())
+ ? g.UseByteRegister(node)
+ : g.TempRegister()};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- selector->Emit(code, 1, outputs, input_count, inputs);
+ selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temp), temp);
}
} // namespace
@@ -1608,7 +1671,7 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
IA32OperandGenerator g(this);
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
opcode = kWord32AtomicExchangeInt8;
@@ -1634,7 +1697,7 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
Node* old_value = node->InputAt(2);
Node* new_value = node->InputAt(3);
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
@@ -1650,38 +1713,23 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
UNREACHABLE();
return;
}
- InstructionOperand outputs[1];
AddressingMode addressing_mode;
- InstructionOperand inputs[4];
- size_t input_count = 0;
- inputs[input_count++] = g.UseFixed(old_value, eax);
- if (type == MachineType::Int8() || type == MachineType::Uint8()) {
- inputs[input_count++] = g.UseByteRegister(new_value);
- } else {
- inputs[input_count++] = g.UseUniqueRegister(new_value);
- }
- inputs[input_count++] = g.UseUniqueRegister(base);
- if (g.CanBeImmediate(index)) {
- inputs[input_count++] = g.UseImmediate(index);
- addressing_mode = kMode_MRI;
- } else {
- inputs[input_count++] = g.UseUniqueRegister(index);
- addressing_mode = kMode_MR1;
- }
- outputs[0] = g.DefineAsFixed(node, eax);
+ InstructionOperand new_val_operand =
+ (type.representation() == MachineRepresentation::kWord8)
+ ? g.UseByteRegister(new_value)
+ : g.UseUniqueRegister(new_value);
+ InstructionOperand inputs[] = {
+ g.UseFixed(old_value, eax), new_val_operand, g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
+ InstructionOperand outputs[] = {g.DefineAsFixed(node, eax)};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 1, outputs, input_count, inputs);
+ Emit(code, 1, outputs, arraysize(inputs), inputs);
}
void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
ArchOpcode uint16_op, ArchOpcode word32_op) {
- IA32OperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
-
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
opcode = int8_op;
@@ -1697,28 +1745,7 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
UNREACHABLE();
return;
}
- InstructionOperand outputs[1];
- AddressingMode addressing_mode;
- InstructionOperand inputs[3];
- size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(value);
- inputs[input_count++] = g.UseUniqueRegister(base);
- if (g.CanBeImmediate(index)) {
- inputs[input_count++] = g.UseImmediate(index);
- addressing_mode = kMode_MRI;
- } else {
- inputs[input_count++] = g.UseUniqueRegister(index);
- addressing_mode = kMode_MR1;
- }
- outputs[0] = g.DefineAsFixed(node, eax);
- InstructionOperand temp[1];
- if (type == MachineType::Int8() || type == MachineType::Uint8()) {
- temp[0] = g.UseByteRegister(node);
- } else {
- temp[0] = g.TempRegister();
- }
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 1, outputs, input_count, inputs, 1, temp);
+ VisitAtomicBinOp(this, node, opcode, type.representation());
}
#define VISIT_ATOMIC_BINOP(op) \
@@ -1735,6 +1762,193 @@ VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
+void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
+ IA32OperandGenerator g(this);
+ AddressingMode mode;
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ InstructionOperand inputs[] = {g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &mode)};
+ InstructionOperand temps[] = {g.TempDoubleRegister()};
+ InstructionOperand outputs[] = {
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 0)),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+ InstructionCode code =
+ kIA32Word32AtomicPairLoad | AddressingModeField::encode(mode);
+ Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
+}
+
+void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
+ IA32OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ Node* value_high = node->InputAt(3);
+
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[] = {
+ g.UseFixed(value, ebx), g.UseFixed(value_high, ecx),
+ g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
+ // Allocating temp registers here as stores are performed using an atomic
+ // exchange, the output of which is stored in edx:eax, which should be saved
+ // and restored at the end of the instruction.
+ InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
+ InstructionCode code =
+ kIA32Word32AtomicPairStore | AddressingModeField::encode(addressing_mode);
+ Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps), temps);
+}
+
+void InstructionSelector::VisitWord32AtomicPairAdd(Node* node) {
+ VisitPairAtomicBinOp(this, node, kIA32Word32AtomicPairAdd);
+}
+
+void InstructionSelector::VisitWord32AtomicPairSub(Node* node) {
+ VisitPairAtomicBinOp(this, node, kIA32Word32AtomicPairSub);
+}
+
+void InstructionSelector::VisitWord32AtomicPairAnd(Node* node) {
+ VisitPairAtomicBinOp(this, node, kIA32Word32AtomicPairAnd);
+}
+
+void InstructionSelector::VisitWord32AtomicPairOr(Node* node) {
+ VisitPairAtomicBinOp(this, node, kIA32Word32AtomicPairOr);
+}
+
+void InstructionSelector::VisitWord32AtomicPairXor(Node* node) {
+ VisitPairAtomicBinOp(this, node, kIA32Word32AtomicPairXor);
+}
+
+void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) {
+ VisitPairAtomicBinOp(this, node, kIA32Word32AtomicPairExchange);
+}
+
+void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
+ IA32OperandGenerator g(this);
+ Node* index = node->InputAt(1);
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[] = {
+ // High, Low values of old value
+ g.UseFixed(node->InputAt(2), eax), g.UseFixed(node->InputAt(3), edx),
+ // High, Low values of new value
+ g.UseFixed(node->InputAt(4), ebx), g.UseFixed(node->InputAt(5), ecx),
+ // InputAt(0) => base
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
+ InstructionOperand outputs[] = {
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 0), eax),
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 1), edx)};
+ InstructionCode code = kIA32Word32AtomicPairCompareExchange |
+ AddressingModeField::encode(addressing_mode);
+ Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
+}
+
+void InstructionSelector::VisitWord64AtomicNarrowBinop(Node* node,
+ ArchOpcode uint8_op,
+ ArchOpcode uint16_op,
+ ArchOpcode uint32_op) {
+ MachineType type = AtomicOpType(node->op());
+ DCHECK(type != MachineType::Uint64());
+ ArchOpcode opcode = kArchNop;
+ if (type == MachineType::Uint32()) {
+ opcode = uint32_op;
+ } else if (type == MachineType::Uint16()) {
+ opcode = uint16_op;
+ } else if (type == MachineType::Uint8()) {
+ opcode = uint8_op;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+ VisitNarrowAtomicBinOp(this, node, opcode, type);
+}
+
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord64AtomicNarrow##op(Node* node) { \
+ VisitWord64AtomicNarrowBinop(node, kIA32Word64AtomicNarrow##op##Uint8, \
+ kIA32Word64AtomicNarrow##op##Uint16, \
+ kIA32Word64AtomicNarrow##op##Uint32); \
+ }
+VISIT_ATOMIC_BINOP(Add)
+VISIT_ATOMIC_BINOP(Sub)
+VISIT_ATOMIC_BINOP(And)
+VISIT_ATOMIC_BINOP(Or)
+VISIT_ATOMIC_BINOP(Xor)
+#undef VISIT_ATOMIC_BINOP
+
+void InstructionSelector::VisitWord64AtomicNarrowExchange(Node* node) {
+ MachineType type = AtomicOpType(node->op());
+ DCHECK(type != MachineType::Uint64());
+ ArchOpcode opcode = kArchNop;
+ if (type == MachineType::Uint32()) {
+ opcode = kIA32Word64AtomicNarrowExchangeUint32;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kIA32Word64AtomicNarrowExchangeUint16;
+ } else if (type == MachineType::Uint8()) {
+ opcode = kIA32Word64AtomicNarrowExchangeUint8;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+ IA32OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ AddressingMode addressing_mode;
+ InstructionOperand value_operand =
+ (type.representation() == MachineRepresentation::kWord8)
+ ? g.UseFixed(value, edx)
+ : g.UseUniqueRegister(value);
+ InstructionOperand inputs[] = {
+ value_operand, g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
+ InstructionOperand outputs[2];
+ if (type.representation() == MachineRepresentation::kWord8) {
+ // Using DefineSameAsFirst requires the register to be unallocated.
+ outputs[0] = g.DefineAsFixed(NodeProperties::FindProjection(node, 0), edx);
+ } else {
+ outputs[0] = g.DefineSameAsFirst(NodeProperties::FindProjection(node, 0));
+ }
+ outputs[1] = g.DefineAsRegister(NodeProperties::FindProjection(node, 1));
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
+}
+
+void InstructionSelector::VisitWord64AtomicNarrowCompareExchange(Node* node) {
+ MachineType type = AtomicOpType(node->op());
+ DCHECK(type != MachineType::Uint64());
+ ArchOpcode opcode = kArchNop;
+ if (type == MachineType::Uint32()) {
+ opcode = kIA32Word64AtomicNarrowCompareExchangeUint32;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kIA32Word64AtomicNarrowCompareExchangeUint16;
+ } else if (type == MachineType::Uint8()) {
+ opcode = kIA32Word64AtomicNarrowCompareExchangeUint8;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+ IA32OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* old_value = node->InputAt(2);
+ Node* new_value = node->InputAt(3);
+ AddressingMode addressing_mode;
+ InstructionOperand new_value_operand =
+ (type.representation() == MachineRepresentation::kWord8)
+ ? g.UseByteRegister(new_value)
+ : g.UseUniqueRegister(new_value);
+ InstructionOperand inputs[] = {
+ g.UseFixed(old_value, eax), new_value_operand, g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
+ InstructionOperand outputs[] = {
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 0), eax),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
+}
+
#define SIMD_INT_TYPES(V) \
V(I32x4) \
V(I16x8) \
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc
index f1ca52b14d..d15a633257 100644
--- a/deps/v8/src/compiler/instruction-selector.cc
+++ b/deps/v8/src/compiler/instruction-selector.cc
@@ -587,7 +587,7 @@ size_t InstructionSelector::AddOperandToStateValueDescriptor(
}
return entries;
} else {
- // Crankshaft counts duplicate objects for the running id, so we have
+ // Deoptimizer counts duplicate objects for the running id, so we have
// to push the input again.
deduplicator->InsertObject(input);
values->PushDuplicate(id);
@@ -1705,11 +1705,18 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitWord32AtomicStore(node);
case IrOpcode::kWord64AtomicStore:
return VisitWord64AtomicStore(node);
-#define ATOMIC_CASE(name, rep) \
- case IrOpcode::k##rep##Atomic##name: { \
- MachineType type = AtomicOpRepresentationOf(node->op()); \
- MarkAsRepresentation(type.representation(), node); \
- return Visit##rep##Atomic##name(node); \
+ case IrOpcode::kWord32AtomicPairStore:
+ return VisitWord32AtomicPairStore(node);
+ case IrOpcode::kWord32AtomicPairLoad: {
+ MarkAsWord32(node);
+ MarkPairProjectionsAsWord32(node);
+ return VisitWord32AtomicPairLoad(node);
+ }
+#define ATOMIC_CASE(name, rep) \
+ case IrOpcode::k##rep##Atomic##name: { \
+ MachineType type = AtomicOpType(node->op()); \
+ MarkAsRepresentation(type.representation(), node); \
+ return Visit##rep##Atomic##name(node); \
}
ATOMIC_CASE(Add, Word32)
ATOMIC_CASE(Add, Word64)
@@ -1726,6 +1733,35 @@ void InstructionSelector::VisitNode(Node* node) {
ATOMIC_CASE(CompareExchange, Word32)
ATOMIC_CASE(CompareExchange, Word64)
#undef ATOMIC_CASE
+#define ATOMIC_CASE(name) \
+ case IrOpcode::kWord32AtomicPair##name: { \
+ MarkAsWord32(node); \
+ MarkPairProjectionsAsWord32(node); \
+ return VisitWord32AtomicPair##name(node); \
+ }
+ ATOMIC_CASE(Add)
+ ATOMIC_CASE(Sub)
+ ATOMIC_CASE(And)
+ ATOMIC_CASE(Or)
+ ATOMIC_CASE(Xor)
+ ATOMIC_CASE(Exchange)
+ ATOMIC_CASE(CompareExchange)
+#undef ATOMIC_CASE
+#define ATOMIC_CASE(name) \
+ case IrOpcode::kWord64AtomicNarrow##name: { \
+ MachineType type = AtomicOpType(node->op()); \
+ MarkAsRepresentation(type.representation(), node); \
+ MarkPairProjectionsAsWord32(node); \
+ return VisitWord64AtomicNarrow##name(node); \
+ }
+ ATOMIC_CASE(Add)
+ ATOMIC_CASE(Sub)
+ ATOMIC_CASE(And)
+ ATOMIC_CASE(Or)
+ ATOMIC_CASE(Xor)
+ ATOMIC_CASE(Exchange)
+ ATOMIC_CASE(CompareExchange)
+#undef ATOMIC_CASE
case IrOpcode::kSpeculationFence:
return VisitSpeculationFence(node);
case IrOpcode::kProtectedLoad: {
@@ -2353,6 +2389,72 @@ void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
#endif // V8_TARGET_ARCH_64_BIT
+#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM
+void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord32AtomicPairAdd(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord32AtomicPairSub(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord32AtomicPairAnd(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord32AtomicPairOr(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord32AtomicPairXor(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord64AtomicNarrowAdd(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord64AtomicNarrowSub(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord64AtomicNarrowAnd(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord64AtomicNarrowOr(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord64AtomicNarrowXor(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord64AtomicNarrowExchange(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord64AtomicNarrowCompareExchange(Node* node) {
+ UNIMPLEMENTED();
+}
+#endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM
+
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
void InstructionSelector::VisitF32x4SConvertI32x4(Node* node) {
diff --git a/deps/v8/src/compiler/instruction-selector.h b/deps/v8/src/compiler/instruction-selector.h
index 39d0c01ee9..435b7185a6 100644
--- a/deps/v8/src/compiler/instruction-selector.h
+++ b/deps/v8/src/compiler/instruction-selector.h
@@ -731,6 +731,8 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
ArchOpcode uint16_op,
ArchOpcode uint32_op,
ArchOpcode uint64_op);
+ void VisitWord64AtomicNarrowBinop(Node* node, ArchOpcode uint8_op,
+ ArchOpcode uint16_op, ArchOpcode uint32_op);
// ===========================================================================
diff --git a/deps/v8/src/compiler/instruction.h b/deps/v8/src/compiler/instruction.h
index 803f3e0c1d..1991e309d3 100644
--- a/deps/v8/src/compiler/instruction.h
+++ b/deps/v8/src/compiler/instruction.h
@@ -811,7 +811,7 @@ class V8_EXPORT_PRIVATE Instruction final {
return &operands_[i];
}
- bool HasOutput() const { return OutputCount() == 1; }
+ bool HasOutput() const { return OutputCount() > 0; }
const InstructionOperand* Output() const { return OutputAt(0); }
InstructionOperand* Output() { return OutputAt(0); }
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index 410e78a30d..8066ce5dca 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -117,6 +117,21 @@ int GetReturnCountAfterLowering(Signature<MachineRepresentation>* signature) {
} // namespace
+void Int64Lowering::LowerWord64AtomicBinop(Node* node, const Operator* op) {
+ DCHECK_EQ(5, node->InputCount());
+ Node* value = node->InputAt(2);
+ node->ReplaceInput(2, GetReplacementLow(value));
+ node->InsertInput(zone(), 3, GetReplacementHigh(value));
+ NodeProperties::ChangeOp(node, op);
+ ReplaceNodeWithProjections(node);
+}
+
+void Int64Lowering::LowerWord64AtomicNarrowOp(Node* node, const Operator* op) {
+ DefaultLowering(node, true);
+ NodeProperties::ChangeOp(node, op);
+ ReplaceNodeWithProjections(node);
+}
+
// static
int Int64Lowering::GetParameterCountAfterLowering(
Signature<MachineRepresentation>* signature) {
@@ -338,11 +353,7 @@ void Int64Lowering::LowerNode(Node* node) {
size_t return_arity = call_descriptor->ReturnCount();
if (return_arity == 1) {
// We access the additional return values through projections.
- Node* low_node =
- graph()->NewNode(common()->Projection(0), node, graph()->start());
- Node* high_node =
- graph()->NewNode(common()->Projection(1), node, graph()->start());
- ReplaceNode(node, low_node, high_node);
+ ReplaceNodeWithProjections(node);
} else {
ZoneVector<Node*> projections(return_arity, zone());
NodeProperties::CollectValueProjections(node, projections.data(),
@@ -405,11 +416,7 @@ void Int64Lowering::LowerNode(Node* node) {
NodeProperties::ChangeOp(node, machine()->Int32PairAdd());
// We access the additional return values through projections.
- Node* low_node =
- graph()->NewNode(common()->Projection(0), node, graph()->start());
- Node* high_node =
- graph()->NewNode(common()->Projection(1), node, graph()->start());
- ReplaceNode(node, low_node, high_node);
+ ReplaceNodeWithProjections(node);
break;
}
case IrOpcode::kInt64Sub: {
@@ -425,11 +432,7 @@ void Int64Lowering::LowerNode(Node* node) {
NodeProperties::ChangeOp(node, machine()->Int32PairSub());
// We access the additional return values through projections.
- Node* low_node =
- graph()->NewNode(common()->Projection(0), node, graph()->start());
- Node* high_node =
- graph()->NewNode(common()->Projection(1), node, graph()->start());
- ReplaceNode(node, low_node, high_node);
+ ReplaceNodeWithProjections(node);
break;
}
case IrOpcode::kInt64Mul: {
@@ -445,11 +448,7 @@ void Int64Lowering::LowerNode(Node* node) {
NodeProperties::ChangeOp(node, machine()->Int32PairMul());
// We access the additional return values through projections.
- Node* low_node =
- graph()->NewNode(common()->Projection(0), node, graph()->start());
- Node* high_node =
- graph()->NewNode(common()->Projection(1), node, graph()->start());
- ReplaceNode(node, low_node, high_node);
+ ReplaceNodeWithProjections(node);
break;
}
case IrOpcode::kWord64Or: {
@@ -497,11 +496,7 @@ void Int64Lowering::LowerNode(Node* node) {
NodeProperties::ChangeOp(node, machine()->Word32PairShl());
// We access the additional return values through projections.
- Node* low_node =
- graph()->NewNode(common()->Projection(0), node, graph()->start());
- Node* high_node =
- graph()->NewNode(common()->Projection(1), node, graph()->start());
- ReplaceNode(node, low_node, high_node);
+ ReplaceNodeWithProjections(node);
break;
}
case IrOpcode::kWord64Shr: {
@@ -521,11 +516,7 @@ void Int64Lowering::LowerNode(Node* node) {
NodeProperties::ChangeOp(node, machine()->Word32PairShr());
// We access the additional return values through projections.
- Node* low_node =
- graph()->NewNode(common()->Projection(0), node, graph()->start());
- Node* high_node =
- graph()->NewNode(common()->Projection(1), node, graph()->start());
- ReplaceNode(node, low_node, high_node);
+ ReplaceNodeWithProjections(node);
break;
}
case IrOpcode::kWord64Sar: {
@@ -545,11 +536,7 @@ void Int64Lowering::LowerNode(Node* node) {
NodeProperties::ChangeOp(node, machine()->Word32PairSar());
// We access the additional return values through projections.
- Node* low_node =
- graph()->NewNode(common()->Projection(0), node, graph()->start());
- Node* high_node =
- graph()->NewNode(common()->Projection(1), node, graph()->start());
- ReplaceNode(node, low_node, high_node);
+ ReplaceNodeWithProjections(node);
break;
}
case IrOpcode::kWord64Equal: {
@@ -855,9 +842,10 @@ void Int64Lowering::LowerNode(Node* node) {
}
case IrOpcode::kWord64ReverseBytes: {
Node* input = node->InputAt(0);
- ReplaceNode(node, graph()->NewNode(machine()->Word32ReverseBytes().op(),
- GetReplacementHigh(input)),
- graph()->NewNode(machine()->Word32ReverseBytes().op(),
+ ReplaceNode(node,
+ graph()->NewNode(machine()->Word32ReverseBytes(),
+ GetReplacementHigh(input)),
+ graph()->NewNode(machine()->Word32ReverseBytes(),
GetReplacementLow(input)));
break;
}
@@ -895,6 +883,68 @@ void Int64Lowering::LowerNode(Node* node) {
node->NullAllInputs();
break;
}
+ case IrOpcode::kWord64AtomicLoad: {
+ DCHECK_EQ(4, node->InputCount());
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Uint64()) {
+ NodeProperties::ChangeOp(node, machine()->Word32AtomicPairLoad());
+ ReplaceNodeWithProjections(node);
+ } else {
+ NodeProperties::ChangeOp(node, machine()->Word32AtomicLoad(type));
+ ReplaceNode(node, node, graph()->NewNode(common()->Int32Constant(0)));
+ }
+ break;
+ }
+ case IrOpcode::kWord64AtomicStore: {
+ DCHECK_EQ(5, node->InputCount());
+ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ if (rep == MachineRepresentation::kWord64) {
+ Node* value = node->InputAt(2);
+ node->ReplaceInput(2, GetReplacementLow(value));
+ node->InsertInput(zone(), 3, GetReplacementHigh(value));
+ NodeProperties::ChangeOp(node, machine()->Word32AtomicPairStore());
+ } else {
+ DefaultLowering(node, true);
+ NodeProperties::ChangeOp(node, machine()->Word32AtomicStore(rep));
+ }
+ break;
+ }
+#define ATOMIC_CASE(name) \
+ case IrOpcode::kWord64Atomic##name: { \
+ MachineType type = AtomicOpType(node->op()); \
+ if (type == MachineType::Uint64()) { \
+ LowerWord64AtomicBinop(node, machine()->Word32AtomicPair##name()); \
+ } else { \
+ LowerWord64AtomicNarrowOp(node, \
+ machine()->Word64AtomicNarrow##name(type)); \
+ } \
+ break; \
+ }
+ ATOMIC_CASE(Add)
+ ATOMIC_CASE(Sub)
+ ATOMIC_CASE(And)
+ ATOMIC_CASE(Or)
+ ATOMIC_CASE(Xor)
+ ATOMIC_CASE(Exchange)
+#undef ATOMIC_CASE
+ case IrOpcode::kWord64AtomicCompareExchange: {
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Uint64()) {
+ Node* old_value = node->InputAt(2);
+ Node* new_value = node->InputAt(3);
+ node->ReplaceInput(2, GetReplacementLow(old_value));
+ node->ReplaceInput(3, GetReplacementHigh(old_value));
+ node->InsertInput(zone(), 4, GetReplacementLow(new_value));
+ node->InsertInput(zone(), 5, GetReplacementHigh(new_value));
+ NodeProperties::ChangeOp(node,
+ machine()->Word32AtomicPairCompareExchange());
+ ReplaceNodeWithProjections(node);
+ } else {
+ LowerWord64AtomicNarrowOp(
+ node, machine()->Word64AtomicNarrowCompareExchange(type));
+ }
+ break;
+ }
default: { DefaultLowering(node); }
}
@@ -987,6 +1037,16 @@ void Int64Lowering::PreparePhiReplacement(Node* phi) {
value_count + 1, inputs_high, false));
}
}
+
+void Int64Lowering::ReplaceNodeWithProjections(Node* node) {
+ DCHECK(node != nullptr);
+ Node* low_node =
+ graph()->NewNode(common()->Projection(0), node, graph()->start());
+ Node* high_node =
+ graph()->NewNode(common()->Projection(1), node, graph()->start());
+ ReplaceNode(node, low_node, high_node);
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/int64-lowering.h b/deps/v8/src/compiler/int64-lowering.h
index 6109cd5847..ab403f904a 100644
--- a/deps/v8/src/compiler/int64-lowering.h
+++ b/deps/v8/src/compiler/int64-lowering.h
@@ -50,6 +50,8 @@ class V8_EXPORT_PRIVATE Int64Lowering {
bool DefaultLowering(Node* node, bool low_word_only = false);
void LowerComparison(Node* node, const Operator* signed_op,
const Operator* unsigned_op);
+ void LowerWord64AtomicBinop(Node* node, const Operator* op);
+ void LowerWord64AtomicNarrowOp(Node* node, const Operator* op);
void ReplaceNode(Node* old, Node* new_low, Node* new_high);
bool HasReplacementLow(Node* node);
@@ -58,6 +60,7 @@ class V8_EXPORT_PRIVATE Int64Lowering {
Node* GetReplacementHigh(Node* node);
void PreparePhiReplacement(Node* phi);
void GetIndexNodes(Node* index, Node*& index_low, Node*& index_high);
+ void ReplaceNodeWithProjections(Node* node);
struct NodeState {
Node* node;
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index 6b545c2853..a06f4490a6 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -4,7 +4,7 @@
#include "src/compiler/js-call-reducer.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/builtins/builtins-promise-gen.h"
#include "src/builtins/builtins-utils.h"
#include "src/code-factory.h"
@@ -23,6 +23,8 @@
#include "src/ic/call-optimization.h"
#include "src/objects-inl.h"
#include "src/objects/arguments-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/js-array-inl.h"
#include "src/vector-slot-pair.h"
namespace v8 {
@@ -218,11 +220,11 @@ Reduction JSCallReducer::ReduceArrayConstructor(Node* node) {
// Turn the {node} into a {JSCreateArray} call.
DCHECK_LE(2u, p.arity());
- Handle<AllocationSite> site;
size_t const arity = p.arity() - 2;
NodeProperties::ReplaceValueInput(node, target, 0);
NodeProperties::ReplaceValueInput(node, target, 1);
- NodeProperties::ChangeOp(node, javascript()->CreateArray(arity, site));
+ NodeProperties::ChangeOp(
+ node, javascript()->CreateArray(arity, MaybeHandle<AllocationSite>()));
return Changed(node);
}
@@ -937,7 +939,7 @@ Reduction JSCallReducer::ReduceReflectHas(Node* node) {
Node* vtrue;
{
vtrue = etrue = if_true =
- graph()->NewNode(javascript()->HasProperty(), key, target, context,
+ graph()->NewNode(javascript()->HasProperty(), target, key, context,
frame_state, etrue, if_true);
}
@@ -976,7 +978,6 @@ bool CanInlineArrayIteratingBuiltin(Isolate* isolate,
isolate);
return receiver_map->instance_type() == JS_ARRAY_TYPE &&
IsFastElementsKind(receiver_map->elements_kind()) &&
- (!receiver_map->is_prototype_map() || receiver_map->is_stable()) &&
isolate->IsNoElementsProtectorIntact() &&
isolate->IsAnyInitialArrayPrototype(receiver_prototype);
}
@@ -1507,6 +1508,11 @@ Reduction JSCallReducer::ReduceArrayMap(Node* node,
if (receiver_map->elements_kind() != kind) return NoChange();
}
+ if (IsHoleyElementsKind(kind)) {
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
+ }
+
dependencies()->DependOnProtector(
PropertyCellRef(js_heap_broker(), factory()->array_species_protector()));
@@ -1531,11 +1537,10 @@ Reduction JSCallReducer::ReduceArrayMap(Node* node,
simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
effect, control);
- // This array should be HOLEY_SMI_ELEMENTS because of the non-zero length.
// Even though {JSCreateArray} is not marked as {kNoThrow}, we can elide the
// exceptional projections because it cannot throw with the given parameters.
Node* a = control = effect = graph()->NewNode(
- javascript()->CreateArray(1, Handle<AllocationSite>::null()),
+ javascript()->CreateArray(1, MaybeHandle<AllocationSite>()),
array_constructor, array_constructor, original_length, context,
outer_frame_state, effect, control);
@@ -1633,6 +1638,9 @@ Reduction JSCallReducer::ReduceArrayMap(Node* node,
&check_fail, &control);
}
+ // The array {a} should be HOLEY_SMI_ELEMENTS because we'd only come into this
+ // loop if the input array length is non-zero, and "new Array({x > 0})" always
+ // produces a HOLEY array.
Handle<Map> double_map(Map::cast(native_context()->get(
Context::ArrayMapIndex(HOLEY_DOUBLE_ELEMENTS))),
isolate());
@@ -1715,6 +1723,11 @@ Reduction JSCallReducer::ReduceArrayFilter(Node* node,
if (receiver_map->elements_kind() != kind) return NoChange();
}
+ if (IsHoleyElementsKind(kind)) {
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
+ }
+
dependencies()->DependOnProtector(
PropertyCellRef(js_heap_broker(), factory()->array_species_protector()));
@@ -1979,11 +1992,6 @@ Reduction JSCallReducer::ReduceArrayFind(Node* node, ArrayFindVariant variant,
const ElementsKind kind = receiver_maps[0]->elements_kind();
- // TODO(pwong): Handle holey double elements kinds.
- if (IsDoubleElementsKind(kind) && IsHoleyElementsKind(kind)) {
- return NoChange();
- }
-
for (Handle<Map> receiver_map : receiver_maps) {
if (!CanInlineArrayIteratingBuiltin(isolate(), receiver_map))
return NoChange();
@@ -2069,12 +2077,15 @@ Reduction JSCallReducer::ReduceArrayFind(Node* node, ArrayFindVariant variant,
graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
// Replace holes with undefined.
- if (IsHoleyElementsKind(kind)) {
- element = graph()->NewNode(
- common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
- graph()->NewNode(simplified()->ReferenceEqual(), element,
- jsgraph()->TheHoleConstant()),
- jsgraph()->UndefinedConstant(), element);
+ if (kind == HOLEY_DOUBLE_ELEMENTS) {
+ // TODO(7409): avoid deopt if not all uses of value are truncated.
+ CheckFloat64HoleMode mode = CheckFloat64HoleMode::kAllowReturnHole;
+ element = effect =
+ graph()->NewNode(simplified()->CheckFloat64Hole(mode, p.feedback()),
+ element, effect, control);
+ } else if (IsHoleyElementsKind(kind)) {
+ element =
+ graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), element);
}
Node* if_found_return_value =
@@ -2310,6 +2321,11 @@ Reduction JSCallReducer::ReduceArrayEvery(Node* node,
if (receiver_map->elements_kind() != kind) return NoChange();
}
+ if (IsHoleyElementsKind(kind)) {
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
+ }
+
dependencies()->DependOnProtector(
PropertyCellRef(js_heap_broker(), factory()->array_species_protector()));
@@ -2555,8 +2571,13 @@ Reduction JSCallReducer::ReduceArrayIndexOfIncludes(
if (!NodeProperties::GetMapWitness(isolate(), node).ToHandle(&receiver_map))
return NoChange();
- if (receiver_map->instance_type() != JS_ARRAY_TYPE) return NoChange();
- if (!IsFastElementsKind(receiver_map->elements_kind())) return NoChange();
+ if (!CanInlineArrayIteratingBuiltin(isolate(), receiver_map))
+ return NoChange();
+
+ if (IsHoleyElementsKind(receiver_map->elements_kind())) {
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
+ }
Callable const callable =
search_variant == SearchVariant::kIndexOf
@@ -2651,6 +2672,11 @@ Reduction JSCallReducer::ReduceArraySome(Node* node,
if (receiver_map->elements_kind() != kind) return NoChange();
}
+ if (IsHoleyElementsKind(kind)) {
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
+ }
+
dependencies()->DependOnProtector(
PropertyCellRef(js_heap_broker(), factory()->array_species_protector()));
@@ -3419,6 +3445,8 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
return ReduceArrayPrototypePop(node);
case Builtins::kArrayPrototypeShift:
return ReduceArrayPrototypeShift(node);
+ case Builtins::kArrayPrototypeSlice:
+ return ReduceArrayPrototypeSlice(node);
case Builtins::kArrayPrototypeEntries:
return ReduceArrayIterator(node, IterationKind::kEntries);
case Builtins::kArrayPrototypeKeys:
@@ -3675,6 +3703,8 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
return ReduceDatePrototypeGetTime(node);
case Builtins::kDateNow:
return ReduceDateNow(node);
+ case Builtins::kNumberConstructor:
+ return ReduceNumberConstructor(node);
default:
break;
}
@@ -4287,7 +4317,7 @@ Reduction JSCallReducer::ReduceSoftDeoptimize(Node* node,
namespace {
-// TODO(turbofan): This was copied from Crankshaft, might be too restrictive.
+// TODO(turbofan): This was copied from old compiler, might be too restrictive.
bool IsReadOnlyLengthDescriptor(Isolate* isolate, Handle<Map> jsarray_map) {
DCHECK(!jsarray_map->is_dictionary_map());
Handle<Name> length_string = isolate->factory()->length_string();
@@ -4297,7 +4327,7 @@ bool IsReadOnlyLengthDescriptor(Isolate* isolate, Handle<Map> jsarray_map) {
return descriptors->GetDetails(number).IsReadOnly();
}
-// TODO(turbofan): This was copied from Crankshaft, might be too restrictive.
+// TODO(turbofan): This was copied from old compiler, might be too restrictive.
bool CanInlineArrayResizeOperation(Isolate* isolate, Handle<Map> receiver_map) {
if (!receiver_map->prototype()->IsJSArray()) return false;
Handle<JSArray> receiver_prototype(JSArray::cast(receiver_map->prototype()),
@@ -4739,6 +4769,85 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
return Replace(value);
}
+// ES6 section 22.1.3.23 Array.prototype.slice ( )
+Reduction JSCallReducer::ReduceArrayPrototypeSlice(Node* node) {
+ if (!FLAG_turbo_inline_array_builtins) return NoChange();
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ int arity = static_cast<int>(p.arity() - 2);
+ // Here we only optimize for cloning, that is when slice is called
+ // without arguments, or with a single argument that is the constant 0.
+ if (arity >= 2) return NoChange();
+ if (arity == 1) {
+ NumberMatcher m(NodeProperties::GetValueInput(node, 2));
+ if (!m.HasValue()) return NoChange();
+ if (m.Value() != 0) return NoChange();
+ }
+
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Try to determine the {receiver} map.
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+
+ // Ensure that any changes to the Array species constructor cause deopt.
+ if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->array_species_protector()));
+
+ bool can_be_holey = false;
+ // Check that the maps are of JSArray (and more)
+ for (Handle<Map> receiver_map : receiver_maps) {
+ if (!CanInlineArrayIteratingBuiltin(isolate(), receiver_map))
+ return NoChange();
+
+ if (IsHoleyElementsKind(receiver_map->elements_kind())) can_be_holey = true;
+ }
+
+ // Install code dependency on the array protector for holey arrays.
+ if (can_be_holey) {
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
+ }
+
+ // If we have unreliable maps, we need a map check.
+ // This is actually redundant due to how JSNativeContextSpecialization
+ // reduces the load of slice, but we do it here nevertheless for consistency
+ // and robustness.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ Node* context = NodeProperties::GetContextInput(node);
+
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kCloneFastJSArray);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), callable.descriptor(), 0, CallDescriptor::kNoFlags,
+ Operator::kNoThrow | Operator::kNoDeopt);
+
+ // Calls to Builtins::kCloneFastJSArray produce COW arrays
+ // if the original array is COW
+ Node* clone = effect = graph()->NewNode(
+ common()->Call(call_descriptor), jsgraph()->HeapConstant(callable.code()),
+ receiver, context, effect, control);
+
+ ReplaceWithValue(node, clone, effect, control);
+ return Replace(clone);
+}
+
// ES6 section 22.1.2.2 Array.isArray ( arg )
Reduction JSCallReducer::ReduceArrayIsArray(Node* node) {
// We certainly know that undefined is not an array.
@@ -4864,17 +4973,13 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
}
- // Load the (current) {iterated_object} from the {iterator}; this might be
- // either undefined or the JSReceiver that was passed to the JSArrayIterator
- // creation.
+ // Load the (current) {iterated_object} from the {iterator}.
Node* iterated_object = effect =
graph()->NewNode(simplified()->LoadField(
AccessBuilder::ForJSArrayIteratorIteratedObject()),
iterator, effect, control);
- // Ensure that the {iterated_object} map didn't change. This also rules
- // out the undefined that we put as a termination marker into the
- // iterator.[[IteratedObject]] field once we reach the end.
+ // Ensure that the {iterated_object} map didn't change.
effect = graph()->NewNode(
simplified()->CheckMaps(CheckMapsFlag::kNone, iterated_object_maps,
p.feedback()),
@@ -4919,6 +5024,16 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
Node* index = effect = graph()->NewNode(simplified()->LoadField(index_access),
iterator, effect, control);
+ // Load the elements of the {iterated_object}. While it feels
+ // counter-intuitive to place the elements pointer load before
+ // the condition below, as it might not be needed (if the {index}
+ // is out of bounds for the {iterated_object}), it's better this
+ // way as it allows the LoadElimination to eliminate redundant
+ // reloads of the elements pointer.
+ Node* elements = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
+ iterated_object, effect, control);
+
// Load the length of the {iterated_object}. Due to the map checks we
// already know something about the length here, which we can leverage
// to generate Word32 operations below without additional checking.
@@ -4953,10 +5068,6 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
DCHECK(iteration_kind == IterationKind::kEntries ||
iteration_kind == IterationKind::kValues);
- Node* elements = etrue = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
- iterated_object, etrue, if_true);
-
if (IsFixedTypedArrayElementsKind(elements_kind)) {
Node* base_ptr = etrue = graph()->NewNode(
simplified()->LoadField(
@@ -4969,9 +5080,9 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
ExternalArrayType array_type = kExternalInt8Array;
switch (elements_kind) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case TYPE##_ELEMENTS: \
- array_type = kExternal##Type##Array; \
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ case TYPE##_ELEMENTS: \
+ array_type = kExternal##Type##Array; \
break;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
default:
@@ -5002,7 +5113,8 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
// TODO(6587): avoid deopt if not all uses of value are truncated.
CheckFloat64HoleMode mode = CheckFloat64HoleMode::kAllowReturnHole;
value_true = etrue = graph()->NewNode(
- simplified()->CheckFloat64Hole(mode), value_true, etrue, if_true);
+ simplified()->CheckFloat64Hole(mode, p.feedback()), value_true,
+ etrue, if_true);
}
}
@@ -5032,10 +5144,22 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
// iterator.[[NextIndex]] >= array.length, stop iterating.
done_false = jsgraph()->TrueConstant();
value_false = jsgraph()->UndefinedConstant();
- efalse =
- graph()->NewNode(simplified()->StoreField(
- AccessBuilder::ForJSArrayIteratorIteratedObject()),
- iterator, value_false, efalse, if_false);
+
+ if (!IsFixedTypedArrayElementsKind(elements_kind)) {
+ // Mark the {iterator} as exhausted by setting the [[NextIndex]] to a
+ // value that will never pass the length check again (aka the maximum
+ // value possible for the specific iterated object). Note that this is
+ // different from what the specification says, which is changing the
+ // [[IteratedObject]] field to undefined, but that makes it difficult
+ // to eliminate the map checks and "length" accesses in for..of loops.
+ //
+ // This is not necessary for JSTypedArray's, since the length of those
+ // cannot change later and so if we were ever out of bounds for them
+ // we will stay out-of-bounds forever.
+ Node* end_index = jsgraph()->Constant(index_access.type.Max());
+ efalse = graph()->NewNode(simplified()->StoreField(index_access),
+ iterator, end_index, efalse, if_false);
+ }
}
control = graph()->NewNode(common()->Merge(2), if_true, if_false);
@@ -6034,7 +6158,7 @@ Reduction JSCallReducer::ReduceTypedArrayConstructor(
Node* const parameters[] = {jsgraph()->TheHoleConstant()};
int const num_parameters = static_cast<int>(arraysize(parameters));
frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), shared, Builtins::kTypedArrayConstructorLazyDeoptContinuation,
+ jsgraph(), shared, Builtins::kGenericConstructorLazyDeoptContinuation,
target, context, parameters, num_parameters, frame_state,
ContinuationFrameStateMode::LAZY);
@@ -6082,7 +6206,7 @@ Reduction JSCallReducer::ReduceTypedArrayPrototypeToStringTag(Node* node) {
simplified()->NumberSubtract(), receiver_elements_kind,
jsgraph()->Constant(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND));
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
do { \
Node* check = graph()->NewNode( \
simplified()->NumberEqual(), receiver_elements_kind, \
@@ -6629,11 +6753,12 @@ Reduction JSCallReducer::ReduceArrayBufferViewAccessor(
}
namespace {
-int ExternalArrayElementSize(const ExternalArrayType element_type) {
+uint32_t ExternalArrayElementSize(const ExternalArrayType element_type) {
switch (element_type) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case kExternal##Type##Array: \
- return size;
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ case kExternal##Type##Array: \
+ DCHECK_LE(sizeof(ctype), 8); \
+ return sizeof(ctype);
TYPED_ARRAYS(TYPED_ARRAY_CASE)
default:
UNREACHABLE();
@@ -6644,14 +6769,15 @@ int ExternalArrayElementSize(const ExternalArrayType element_type) {
Reduction JSCallReducer::ReduceDataViewPrototypeGet(
Node* node, ExternalArrayType element_type) {
+ uint32_t const element_size = ExternalArrayElementSize(element_type);
+ CallParameters const& p = CallParametersOf(node->op());
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* context = NodeProperties::GetContextInput(node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node);
-
- CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
Node* offset = node->op()->ValueInputCount() > 2
? NodeProperties::GetValueInput(node, 2)
@@ -6664,16 +6790,68 @@ Reduction JSCallReducer::ReduceDataViewPrototypeGet(
// Only do stuff if the {receiver} is really a DataView.
if (NodeProperties::HasInstanceTypeWitness(isolate(), receiver, effect,
JS_DATA_VIEW_TYPE)) {
- // Check that the {offset} is a positive Smi.
- offset = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()),
- offset, effect, control);
+ // Check that the {offset} is within range for the {receiver}.
+ HeapObjectMatcher m(receiver);
+ if (m.HasValue()) {
+ // We only deal with DataViews here whose [[ByteLength]] is at least
+ // {element_size} and less than 2^31-{element_size}.
+ Handle<JSDataView> dataview = Handle<JSDataView>::cast(m.Value());
+ if (dataview->byte_length()->Number() < element_size ||
+ dataview->byte_length()->Number() - element_size > kMaxInt) {
+ return NoChange();
+ }
- Node* is_positive = graph()->NewNode(simplified()->NumberLessThanOrEqual(),
- jsgraph()->ZeroConstant(), offset);
+ // The {receiver}s [[ByteOffset]] must be within Unsigned31 range.
+ if (dataview->byte_offset()->Number() > kMaxInt) {
+ return NoChange();
+ }
- effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kNotASmi, p.feedback()),
- is_positive, effect, control);
+ // Check that the {offset} is within range of the {byte_length}.
+ Node* byte_length = jsgraph()->Constant(
+ dataview->byte_length()->Number() - (element_size - 1));
+ offset = effect =
+ graph()->NewNode(simplified()->CheckBounds(p.feedback()), offset,
+ byte_length, effect, control);
+
+ // Add the [[ByteOffset]] to compute the effective offset.
+ Node* byte_offset =
+ jsgraph()->Constant(dataview->byte_offset()->Number());
+ offset = graph()->NewNode(simplified()->NumberAdd(), offset, byte_offset);
+ } else {
+ // We only deal with DataViews here that have Smi [[ByteLength]]s.
+ Node* byte_length = effect =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForJSArrayBufferViewByteLength()),
+ receiver, effect, control);
+ byte_length = effect = graph()->NewNode(
+ simplified()->CheckSmi(p.feedback()), byte_length, effect, control);
+
+ // Check that the {offset} is within range of the {byte_length}.
+ offset = effect =
+ graph()->NewNode(simplified()->CheckBounds(p.feedback()), offset,
+ byte_length, effect, control);
+
+ if (element_size > 0) {
+ // For non-byte accesses we also need to check that the {offset}
+ // plus the {element_size}-1 fits within the given {byte_length}.
+ Node* end_offset =
+ graph()->NewNode(simplified()->NumberAdd(), offset,
+ jsgraph()->Constant(element_size - 1));
+ effect = graph()->NewNode(simplified()->CheckBounds(p.feedback()),
+ end_offset, byte_length, effect, control);
+ }
+
+ // The {receiver}s [[ByteOffset]] also needs to be a (positive) Smi.
+ Node* byte_offset = effect =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForJSArrayBufferViewByteOffset()),
+ receiver, effect, control);
+ byte_offset = effect = graph()->NewNode(
+ simplified()->CheckSmi(p.feedback()), byte_offset, effect, control);
+
+ // Compute the buffer index at which we'll read.
+ offset = graph()->NewNode(simplified()->NumberAdd(), offset, byte_offset);
+ }
// Coerce {is_little_endian} to boolean.
is_little_endian =
@@ -6684,139 +6862,52 @@ Reduction JSCallReducer::ReduceDataViewPrototypeGet(
simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
receiver, effect, control);
- Node* check_neutered = effect = graph()->NewNode(
- simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
- Node* branch_neutered = graph()->NewNode(
- common()->Branch(BranchHint::kFalse), check_neutered, control);
-
- // Raise an error if it was neuteured.
- Node* if_true_neutered =
- graph()->NewNode(common()->IfTrue(), branch_neutered);
- Node* etrue_neutered = effect;
- {
- if_true_neutered = etrue_neutered = graph()->NewNode(
- javascript()->CallRuntime(Runtime::kThrowTypeError, 2),
- jsgraph()->Constant(MessageTemplate::kDetachedOperation),
- jsgraph()->HeapConstant(
- factory()->NewStringFromAsciiChecked("DataView.prototype.get")),
- context, frame_state, etrue_neutered, if_true_neutered);
- }
-
- // Otherwise, proceed.
- Node* if_false_neutered =
- graph()->NewNode(common()->IfFalse(), branch_neutered);
- Node* efalse_neutered = effect;
-
- // Get the byte offset and byte length of the {receiver}.
- Node* byte_offset = efalse_neutered =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForJSArrayBufferViewByteOffset()),
- receiver, efalse_neutered, if_false_neutered);
-
- Node* byte_length = efalse_neutered =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForJSArrayBufferViewByteLength()),
- receiver, efalse_neutered, if_false_neutered);
-
- // The end offset is the offset plus the element size
- // of the type that we want to load.
- int element_size = ExternalArrayElementSize(element_type);
- Node* end_offset = graph()->NewNode(simplified()->NumberAdd(), offset,
- jsgraph()->Constant(element_size));
-
- // We need to check that {end_offset} <= {byte_length}, ie
- // throw a RangeError if {byte_length} < {end_offset}.
- Node* check_range = graph()->NewNode(simplified()->NumberLessThan(),
- byte_length, end_offset);
- Node* branch_range = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check_range, if_false_neutered);
-
- Node* if_true_range = graph()->NewNode(common()->IfTrue(), branch_range);
- Node* etrue_range = efalse_neutered;
- {
- if_true_range = etrue_range = graph()->NewNode(
- javascript()->CallRuntime(Runtime::kThrowRangeError, 2),
- jsgraph()->Constant(MessageTemplate::kInvalidDataViewAccessorOffset),
- jsgraph()->HeapConstant(
- factory()->NewStringFromAsciiChecked("DataView.prototype.get")),
- context, frame_state, etrue_range, if_true_range);
- }
-
- Node* if_false_range = graph()->NewNode(common()->IfFalse(), branch_range);
- Node* efalse_range = efalse_neutered;
- Node* vfalse_range;
- {
- // Get the buffer's backing store.
- Node* backing_store = efalse_range =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForJSArrayBufferBackingStore()),
- buffer, efalse_range, if_false_range);
-
- // Compute the buffer index at which we'll read.
- Node* buffer_index =
- graph()->NewNode(simplified()->NumberAdd(), offset, byte_offset);
-
- // Perform the load.
- vfalse_range = efalse_range =
- graph()->NewNode(simplified()->LoadDataViewElement(element_type),
- buffer, backing_store, buffer_index,
- is_little_endian, efalse_range, if_false_range);
+ if (isolate()->IsArrayBufferNeuteringIntact()) {
+ // Add a code dependency so we are deoptimized in case an ArrayBuffer
+ // gets neutered.
+ dependencies()->DependOnProtector(PropertyCellRef(
+ js_heap_broker(), factory()->array_buffer_neutering_protector()));
+ } else {
+ // If the buffer was neutered, deopt and let the unoptimized code throw.
+ Node* check_neutered = effect = graph()->NewNode(
+ simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
+ check_neutered =
+ graph()->NewNode(simplified()->BooleanNot(), check_neutered);
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kArrayBufferWasNeutered,
+ p.feedback()),
+ check_neutered, effect, control);
}
- // Rewire potential exception edges.
- Node* on_exception = nullptr;
- if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
- // Create appropriate {IfException} and {IfSuccess} nodes.
- Node* extrue_neutered = graph()->NewNode(
- common()->IfException(), etrue_neutered,
- if_true_neutered); // We threw because the array was neutered.
- if_true_neutered =
- graph()->NewNode(common()->IfSuccess(), if_true_neutered);
+ // Get the buffer's backing store.
+ Node* backing_store = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferBackingStore()),
+ buffer, effect, control);
- Node* extrue_range =
- graph()->NewNode(common()->IfException(), etrue_range,
- if_true_range); // We threw because out of bounds.
- if_true_range = graph()->NewNode(common()->IfSuccess(), if_true_range);
-
- // We can't throw in LoadDataViewElement(),
- // so we don't need to handle that path here.
-
- // Join the exception edges.
- Node* merge =
- graph()->NewNode(common()->Merge(2), extrue_neutered, extrue_range);
- Node* ephi = graph()->NewNode(common()->EffectPhi(2), extrue_neutered,
- extrue_range, merge);
- Node* phi =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- extrue_neutered, extrue_range, merge);
- ReplaceWithValue(on_exception, phi, ephi, merge);
- }
-
- // Connect the throwing paths to end.
- if_true_neutered =
- graph()->NewNode(common()->Throw(), etrue_neutered, if_true_neutered);
- NodeProperties::MergeControlToEnd(graph(), common(), if_true_neutered);
- if_true_range =
- graph()->NewNode(common()->Throw(), etrue_range, if_true_range);
- NodeProperties::MergeControlToEnd(graph(), common(), if_true_range);
+ // Perform the load.
+ Node* value = effect = graph()->NewNode(
+ simplified()->LoadDataViewElement(element_type), buffer, backing_store,
+ offset, is_little_endian, effect, control);
// Continue on the regular path.
- ReplaceWithValue(node, vfalse_range, efalse_range, if_false_range);
- return Changed(vfalse_range);
+ ReplaceWithValue(node, value, effect, control);
+ return Changed(value);
}
return NoChange();
}
+
Reduction JSCallReducer::ReduceDataViewPrototypeSet(
Node* node, ExternalArrayType element_type) {
+ uint32_t const element_size = ExternalArrayElementSize(element_type);
+ CallParameters const& p = CallParametersOf(node->op());
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* context = NodeProperties::GetContextInput(node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node);
-
- CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
Node* offset = node->op()->ValueInputCount() > 2
? NodeProperties::GetValueInput(node, 2)
@@ -6833,16 +6924,68 @@ Reduction JSCallReducer::ReduceDataViewPrototypeSet(
// Only do stuff if the {receiver} is really a DataView.
if (NodeProperties::HasInstanceTypeWitness(isolate(), receiver, effect,
JS_DATA_VIEW_TYPE)) {
- // Check that the {offset} is a positive Smi.
- offset = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()),
- offset, effect, control);
+ // Check that the {offset} is within range for the {receiver}.
+ HeapObjectMatcher m(receiver);
+ if (m.HasValue()) {
+ // We only deal with DataViews here whose [[ByteLength]] is at least
+ // {element_size} and less than 2^31-{element_size}.
+ Handle<JSDataView> dataview = Handle<JSDataView>::cast(m.Value());
+ if (dataview->byte_length()->Number() < element_size ||
+ dataview->byte_length()->Number() - element_size > kMaxInt) {
+ return NoChange();
+ }
- Node* is_positive = graph()->NewNode(simplified()->NumberLessThanOrEqual(),
- jsgraph()->ZeroConstant(), offset);
+ // The {receiver}s [[ByteOffset]] must be within Unsigned31 range.
+ if (dataview->byte_offset()->Number() > kMaxInt) {
+ return NoChange();
+ }
- effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kNotASmi, p.feedback()),
- is_positive, effect, control);
+ // Check that the {offset} is within range of the {byte_length}.
+ Node* byte_length = jsgraph()->Constant(
+ dataview->byte_length()->Number() - (element_size - 1));
+ offset = effect =
+ graph()->NewNode(simplified()->CheckBounds(p.feedback()), offset,
+ byte_length, effect, control);
+
+ // Add the [[ByteOffset]] to compute the effective offset.
+ Node* byte_offset =
+ jsgraph()->Constant(dataview->byte_offset()->Number());
+ offset = graph()->NewNode(simplified()->NumberAdd(), offset, byte_offset);
+ } else {
+ // We only deal with DataViews here that have Smi [[ByteLength]]s.
+ Node* byte_length = effect =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForJSArrayBufferViewByteLength()),
+ receiver, effect, control);
+ byte_length = effect = graph()->NewNode(
+ simplified()->CheckSmi(p.feedback()), byte_length, effect, control);
+
+ // Check that the {offset} is within range of the {byte_length}.
+ offset = effect =
+ graph()->NewNode(simplified()->CheckBounds(p.feedback()), offset,
+ byte_length, effect, control);
+
+ if (element_size > 0) {
+ // For non-byte accesses we also need to check that the {offset}
+ // plus the {element_size}-1 fits within the given {byte_length}.
+ Node* end_offset =
+ graph()->NewNode(simplified()->NumberAdd(), offset,
+ jsgraph()->Constant(element_size - 1));
+ effect = graph()->NewNode(simplified()->CheckBounds(p.feedback()),
+ end_offset, byte_length, effect, control);
+ }
+
+ // The {receiver}s [[ByteOffset]] also needs to be a (positive) Smi.
+ Node* byte_offset = effect =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForJSArrayBufferViewByteOffset()),
+ receiver, effect, control);
+ byte_offset = effect = graph()->NewNode(
+ simplified()->CheckSmi(p.feedback()), byte_offset, effect, control);
+
+ // Compute the buffer index at which we'll read.
+ offset = graph()->NewNode(simplified()->NumberAdd(), offset, byte_offset);
+ }
// Coerce {is_little_endian} to boolean.
is_little_endian =
@@ -6859,125 +7002,38 @@ Reduction JSCallReducer::ReduceDataViewPrototypeSet(
simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
receiver, effect, control);
- Node* check_neutered = effect = graph()->NewNode(
- simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
- Node* branch_neutered = graph()->NewNode(
- common()->Branch(BranchHint::kFalse), check_neutered, control);
-
- // Raise an error if it was neuteured.
- Node* if_true_neutered =
- graph()->NewNode(common()->IfTrue(), branch_neutered);
- Node* etrue_neutered = effect;
- {
- if_true_neutered = etrue_neutered = graph()->NewNode(
- javascript()->CallRuntime(Runtime::kThrowTypeError, 2),
- jsgraph()->Constant(MessageTemplate::kDetachedOperation),
- jsgraph()->HeapConstant(
- factory()->NewStringFromAsciiChecked("DataView.prototype.set")),
- context, frame_state, etrue_neutered, if_true_neutered);
+ if (isolate()->IsArrayBufferNeuteringIntact()) {
+ // Add a code dependency so we are deoptimized in case an ArrayBuffer
+ // gets neutered.
+ dependencies()->DependOnProtector(PropertyCellRef(
+ js_heap_broker(), factory()->array_buffer_neutering_protector()));
+ } else {
+ // If the buffer was neutered, deopt and let the unoptimized code throw.
+ Node* check_neutered = effect = graph()->NewNode(
+ simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
+ check_neutered =
+ graph()->NewNode(simplified()->BooleanNot(), check_neutered);
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kArrayBufferWasNeutered,
+ p.feedback()),
+ check_neutered, effect, control);
}
- // Otherwise, proceed.
- Node* if_false_neutered =
- graph()->NewNode(common()->IfFalse(), branch_neutered);
- Node* efalse_neutered = effect;
-
- // Get the byte offset and byte length of the {receiver}.
- Node* byte_offset = efalse_neutered =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForJSArrayBufferViewByteOffset()),
- receiver, efalse_neutered, if_false_neutered);
-
- Node* byte_length = efalse_neutered =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForJSArrayBufferViewByteLength()),
- receiver, efalse_neutered, if_false_neutered);
-
- // The end offset is the offset plus the element size
- // of the type that we want to store.
- int element_size = ExternalArrayElementSize(element_type);
- Node* end_offset = graph()->NewNode(simplified()->NumberAdd(), offset,
- jsgraph()->Constant(element_size));
-
- // We need to check that {end_offset} <= {byte_length}, ie
- // throw a RangeError if {byte_length} < {end_offset}.
- Node* check_range = graph()->NewNode(simplified()->NumberLessThan(),
- byte_length, end_offset);
- Node* branch_range = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check_range, if_false_neutered);
-
- Node* if_true_range = graph()->NewNode(common()->IfTrue(), branch_range);
- Node* etrue_range = efalse_neutered;
- {
- if_true_range = etrue_range = graph()->NewNode(
- javascript()->CallRuntime(Runtime::kThrowRangeError, 2),
- jsgraph()->Constant(MessageTemplate::kInvalidDataViewAccessorOffset),
- jsgraph()->HeapConstant(
- factory()->NewStringFromAsciiChecked("DataView.prototype.set")),
- context, frame_state, etrue_range, if_true_range);
- }
+ // Get the buffer's backing store.
+ Node* backing_store = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferBackingStore()),
+ buffer, effect, control);
- Node* if_false_range = graph()->NewNode(common()->IfFalse(), branch_range);
- Node* efalse_range = efalse_neutered;
- Node* vfalse_range = jsgraph()->UndefinedConstant(); // Return value.
- {
- // Get the buffer's backing store.
- Node* backing_store = efalse_range =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForJSArrayBufferBackingStore()),
- buffer, efalse_range, if_false_range);
-
- // Compute the buffer index at which we'll write.
- Node* buffer_index =
- graph()->NewNode(simplified()->NumberAdd(), offset, byte_offset);
-
- // Perform the store.
- efalse_range =
- graph()->NewNode(simplified()->StoreDataViewElement(element_type),
- buffer, backing_store, buffer_index, value,
- is_little_endian, efalse_range, if_false_range);
- }
+ // Perform the store.
+ effect = graph()->NewNode(simplified()->StoreDataViewElement(element_type),
+ buffer, backing_store, offset, value,
+ is_little_endian, effect, control);
- // Rewire potential exception edges.
- Node* on_exception = nullptr;
- if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
- // Create appropriate {IfException} and {IfSuccess} nodes.
- Node* extrue_neutered = graph()->NewNode(
- common()->IfException(), etrue_neutered,
- if_true_neutered); // We threw because the array was neutered.
- if_true_neutered =
- graph()->NewNode(common()->IfSuccess(), if_true_neutered);
-
- Node* extrue_range =
- graph()->NewNode(common()->IfException(), etrue_range,
- if_true_range); // We threw because out of bounds.
- if_true_range = graph()->NewNode(common()->IfSuccess(), if_true_range);
-
- // We can't throw in StoreDataViewElement(),
- // so we don't need to handle that path here.
-
- // Join the exception edges.
- Node* merge =
- graph()->NewNode(common()->Merge(2), extrue_neutered, extrue_range);
- Node* ephi = graph()->NewNode(common()->EffectPhi(2), extrue_neutered,
- extrue_range, merge);
- Node* phi =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- extrue_neutered, extrue_range, merge);
- ReplaceWithValue(on_exception, phi, ephi, merge);
- }
-
- // Connect the throwing paths to end.
- if_true_neutered =
- graph()->NewNode(common()->Throw(), etrue_neutered, if_true_neutered);
- NodeProperties::MergeControlToEnd(graph(), common(), if_true_neutered);
- if_true_range =
- graph()->NewNode(common()->Throw(), etrue_range, if_true_range);
- NodeProperties::MergeControlToEnd(graph(), common(), if_true_range);
+ Node* value = jsgraph()->UndefinedConstant();
// Continue on the regular path.
- ReplaceWithValue(node, vfalse_range, efalse_range, if_false_range);
- return Changed(vfalse_range);
+ ReplaceWithValue(node, value, effect, control);
+ return Changed(value);
}
return NoChange();
@@ -7182,6 +7238,45 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
return Changed(node);
}
+// ES section #sec-number-constructor
+Reduction JSCallReducer::ReduceNumberConstructor(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+
+ if (p.arity() <= 2) {
+ ReplaceWithValue(node, jsgraph()->ZeroConstant());
+ }
+
+ // We don't have a new.target argument, so we can convert to number,
+ // but must also convert BigInts.
+ if (p.arity() == 3) {
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* value = NodeProperties::GetValueInput(node, 2);
+ Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
+ Handle<SharedFunctionInfo> number_constructor(
+ handle(native_context()->number_function()->shared(), isolate()));
+
+ const std::vector<Node*> checkpoint_parameters({
+ jsgraph()->UndefinedConstant(), /* receiver */
+ });
+ int checkpoint_parameters_size =
+ static_cast<int>(checkpoint_parameters.size());
+
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), number_constructor,
+ Builtins::kGenericConstructorLazyDeoptContinuation, target, context,
+ checkpoint_parameters.data(), checkpoint_parameters_size,
+ outer_frame_state, ContinuationFrameStateMode::LAZY);
+
+ NodeProperties::ReplaceValueInputs(node, value);
+ NodeProperties::ChangeOp(node, javascript()->ToNumberConvertBigInt());
+ NodeProperties::ReplaceFrameStateInput(node, frame_state);
+ return Changed(node);
+ }
+ return NoChange();
+}
+
Graph* JSCallReducer::graph() const { return jsgraph()->graph(); }
Isolate* JSCallReducer::isolate() const { return jsgraph()->isolate(); }
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index 6e3f531647..e04870ed2f 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -94,6 +94,7 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Reduction ReduceArrayPrototypePush(Node* node);
Reduction ReduceArrayPrototypePop(Node* node);
Reduction ReduceArrayPrototypeShift(Node* node);
+ Reduction ReduceArrayPrototypeSlice(Node* node);
Reduction ReduceArrayIsArray(Node* node);
enum class ArrayIteratorKind { kArray, kTypedArray };
Reduction ReduceArrayIterator(Node* node, IterationKind kind);
@@ -190,6 +191,8 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Reduction ReduceDateNow(Node* node);
Reduction ReduceNumberParseInt(Node* node);
+ Reduction ReduceNumberConstructor(Node* node);
+
// Returns the updated {to} node, and updates control and effect along the
// way.
Node* DoFilterPostCallbackWork(ElementsKind kind, Node** control,
@@ -231,7 +234,7 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
- const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
Isolate* isolate() const;
Factory* factory() const;
Handle<Context> native_context() const { return native_context_; }
@@ -243,7 +246,7 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
CompilationDependencies* dependencies() const { return dependencies_; }
JSGraph* const jsgraph_;
- const JSHeapBroker* const js_heap_broker_;
+ JSHeapBroker* const js_heap_broker_;
Flags const flags_;
Handle<Context> const native_context_;
CompilationDependencies* const dependencies_;
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index 85a80a2b2f..ef2297c9d6 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -17,7 +17,6 @@ namespace internal {
namespace compiler {
Reduction JSContextSpecialization::Reduce(Node* node) {
- DisallowHeapAccess no_heap_access;
switch (node->opcode()) {
case IrOpcode::kParameter:
return ReduceParameter(node);
@@ -101,7 +100,7 @@ bool IsContextParameter(Node* node) {
// specialization context. If successful, update {distance} to whatever
// distance remains from the specialization context.
base::Optional<ContextRef> GetSpecializationContext(
- const JSHeapBroker* broker, Node* node, size_t* distance,
+ JSHeapBroker* broker, Node* node, size_t* distance,
Maybe<OuterContext> maybe_outer) {
switch (node->opcode()) {
case IrOpcode::kHeapConstant: {
diff --git a/deps/v8/src/compiler/js-context-specialization.h b/deps/v8/src/compiler/js-context-specialization.h
index d2f56d50f1..7324c5aaf0 100644
--- a/deps/v8/src/compiler/js-context-specialization.h
+++ b/deps/v8/src/compiler/js-context-specialization.h
@@ -6,6 +6,7 @@
#define V8_COMPILER_JS_CONTEXT_SPECIALIZATION_H_
#include "src/compiler/graph-reducer.h"
+#include "src/maybe-handles.h"
namespace v8 {
namespace internal {
@@ -62,12 +63,12 @@ class JSContextSpecialization final : public AdvancedReducer {
JSGraph* jsgraph() const { return jsgraph_; }
Maybe<OuterContext> outer() const { return outer_; }
MaybeHandle<JSFunction> closure() const { return closure_; }
- const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
JSGraph* const jsgraph_;
Maybe<OuterContext> outer_;
MaybeHandle<JSFunction> closure_;
- const JSHeapBroker* const js_heap_broker_;
+ JSHeapBroker* const js_heap_broker_;
DISALLOW_COPY_AND_ASSIGN(JSContextSpecialization);
};
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index a9ce42e1e2..6484e05061 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -21,6 +21,7 @@
#include "src/objects-inl.h"
#include "src/objects/arguments.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-generator.h"
#include "src/objects/js-promise.h"
#include "src/objects/js-regexp-inl.h"
@@ -43,8 +44,9 @@ Node* GetArgumentsFrameState(Node* frame_state) {
// inlined.
bool IsAllocationInlineable(const JSFunctionRef& target,
const JSFunctionRef& new_target) {
+ CHECK_IMPLIES(new_target.has_initial_map(),
+ !new_target.initial_map().is_dictionary_map());
return new_target.has_initial_map() &&
- !new_target.initial_map().is_dictionary_map() &&
new_target.initial_map().constructor_or_backpointer().equals(target);
}
@@ -59,6 +61,7 @@ const int kBlockContextAllocationLimit = 16;
} // namespace
Reduction JSCreateLowering::Reduce(Node* node) {
+ DisallowHeapAccess disallow_heap_access;
switch (node->opcode()) {
case IrOpcode::kJSCreate:
return ReduceJSCreate(node);
@@ -110,7 +113,6 @@ Reduction JSCreateLowering::Reduce(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreate(Node* node) {
- DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreate, node->opcode());
Node* const target = NodeProperties::GetValueInput(node, 0);
Type const target_type = NodeProperties::GetType(target);
@@ -137,25 +139,22 @@ Reduction JSCreateLowering::ReduceJSCreate(Node* node) {
return NoChange();
}
- // Add a dependency on the {initial_map} to make sure that this code is
- // deoptimized whenever the {initial_map} changes.
- MapRef initial_map = dependencies()->DependOnInitialMap(original_constructor);
-
- // Force completion of inobject slack tracking before
- // generating code to finalize the instance size.
- SlackTrackingResult slack_tracking_result =
- original_constructor.FinishSlackTracking();
+ SlackTrackingPrediction slack_tracking_prediction =
+ dependencies()->DependOnInitialMapInstanceSizePrediction(
+ original_constructor);
+ MapRef initial_map = original_constructor.initial_map();
// Emit code to allocate the JSObject instance for the
// {original_constructor}.
AllocationBuilder a(jsgraph(), effect, control);
- a.Allocate(slack_tracking_result.instance_size);
+ a.Allocate(slack_tracking_prediction.instance_size());
a.Store(AccessBuilder::ForMap(), initial_map);
a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
jsgraph()->EmptyFixedArrayConstant());
a.Store(AccessBuilder::ForJSObjectElements(),
jsgraph()->EmptyFixedArrayConstant());
- for (int i = 0; i < slack_tracking_result.inobject_property_count; ++i) {
+ for (int i = 0; i < slack_tracking_prediction.inobject_property_count();
+ ++i) {
a.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
jsgraph()->UndefinedConstant());
}
@@ -166,7 +165,6 @@ Reduction JSCreateLowering::ReduceJSCreate(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
- DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateArguments, node->opcode());
CreateArgumentsType type = CreateArgumentsTypeOf(node->op());
Node* const frame_state = NodeProperties::GetFrameStateInput(node);
@@ -260,7 +258,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
arguments_frame, rest_length, effect);
// Load the JSArray object map.
Node* const jsarray_map = jsgraph()->Constant(
- native_context_ref().js_array_fast_elements_map_index());
+ native_context_ref().js_array_packed_elements_map());
// Actually allocate and initialize the jsarray.
AllocationBuilder a(jsgraph(), effect, control);
Node* properties = jsgraph()->EmptyFixedArrayConstant();
@@ -379,7 +377,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
// Load the JSArray object map.
Node* const jsarray_map = jsgraph()->Constant(
- native_context_ref().js_array_fast_elements_map_index());
+ native_context_ref().js_array_packed_elements_map());
// Actually allocate and initialize the jsarray.
AllocationBuilder a(jsgraph(), effect, control);
Node* properties = jsgraph()->EmptyFixedArrayConstant();
@@ -404,7 +402,6 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
- DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateGeneratorObject, node->opcode());
Node* const closure = NodeProperties::GetValueInput(node, 0);
Node* const receiver = NodeProperties::GetValueInput(node, 1);
@@ -416,16 +413,12 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
DCHECK(closure_type.AsHeapConstant()->Ref().IsJSFunction());
JSFunctionRef js_function =
closure_type.AsHeapConstant()->Ref().AsJSFunction();
- js_function.EnsureHasInitialMap();
+ if (!js_function.has_initial_map()) return NoChange();
- // Force completion of inobject slack tracking before
- // generating code to finalize the instance size.
- SlackTrackingResult slack_tracking_result =
- js_function.FinishSlackTracking();
+ SlackTrackingPrediction slack_tracking_prediction =
+ dependencies()->DependOnInitialMapInstanceSizePrediction(js_function);
- // Add a dependency on the {initial_map} to make sure that this code is
- // deoptimized whenever the {initial_map} changes.
- MapRef initial_map = dependencies()->DependOnInitialMap(js_function);
+ MapRef initial_map = js_function.initial_map();
DCHECK(initial_map.instance_type() == JS_GENERATOR_OBJECT_TYPE ||
initial_map.instance_type() == JS_ASYNC_GENERATOR_OBJECT_TYPE);
@@ -433,8 +426,8 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
SharedFunctionInfoRef shared = js_function.shared();
DCHECK(shared.HasBytecodeArray());
int parameter_count_no_receiver = shared.internal_formal_parameter_count();
- int size =
- parameter_count_no_receiver + shared.GetBytecodeArrayRegisterCount();
+ int size = parameter_count_no_receiver +
+ shared.GetBytecodeArray().register_count();
AllocationBuilder ab(jsgraph(), effect, control);
ab.AllocateArray(size, factory()->fixed_array_map());
for (int i = 0; i < size; ++i) {
@@ -445,7 +438,7 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
// Emit code to allocate the JS[Async]GeneratorObject instance.
AllocationBuilder a(jsgraph(), effect, control);
- a.Allocate(slack_tracking_result.instance_size);
+ a.Allocate(slack_tracking_prediction.instance_size());
Node* empty_fixed_array = jsgraph()->EmptyFixedArrayConstant();
Node* undefined = jsgraph()->UndefinedConstant();
a.Store(AccessBuilder::ForMap(), initial_map);
@@ -469,7 +462,8 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
}
// Handle in-object properties, too.
- for (int i = 0; i < slack_tracking_result.inobject_property_count; ++i) {
+ for (int i = 0; i < slack_tracking_prediction.inobject_property_count();
+ ++i) {
a.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
undefined);
}
@@ -481,20 +475,18 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
// Constructs an array with a variable {length} when no upper bound
// is known for the capacity.
-Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
- Handle<Map> initial_map,
- PretenureFlag pretenure) {
+Reduction JSCreateLowering::ReduceNewArray(
+ Node* node, Node* length, MapRef initial_map, PretenureFlag pretenure,
+ const SlackTrackingPrediction& slack_tracking_prediction) {
DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
// Constructing an Array via new Array(N) where N is an unsigned
// integer, always creates a holey backing store.
- if (!IsHoleyElementsKind(initial_map->elements_kind())) {
- initial_map =
- Map::AsElementsKind(isolate(), initial_map,
- GetHoleyElementsKind(initial_map->elements_kind()));
- }
+ ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(
+ initial_map, initial_map.AsElementsKind(
+ GetHoleyElementsKind(initial_map.elements_kind())));
// Check that the {limit} is an unsigned integer in the valid range.
// This has to be kept in sync with src/runtime/runtime-array.cc,
@@ -506,7 +498,7 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
// Construct elements and properties for the resulting JSArray.
Node* elements = effect =
- graph()->NewNode(IsDoubleElementsKind(initial_map->elements_kind())
+ graph()->NewNode(IsDoubleElementsKind(initial_map.elements_kind())
? simplified()->NewDoubleElements(pretenure)
: simplified()->NewSmiOrObjectElements(pretenure),
length, effect, control);
@@ -514,15 +506,14 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
// Perform the allocation of the actual JSArray object.
AllocationBuilder a(jsgraph(), effect, control);
- a.Allocate(initial_map->instance_size(), pretenure);
+ a.Allocate(slack_tracking_prediction.instance_size(), pretenure);
a.Store(AccessBuilder::ForMap(), initial_map);
a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
a.Store(AccessBuilder::ForJSObjectElements(), elements);
- a.Store(AccessBuilder::ForJSArrayLength(initial_map->elements_kind()),
- length);
- for (int i = 0; i < initial_map->GetInObjectProperties(); ++i) {
- a.Store(AccessBuilder::ForJSObjectInObjectProperty(
- MapRef(js_heap_broker(), initial_map), i),
+ a.Store(AccessBuilder::ForJSArrayLength(initial_map.elements_kind()), length);
+ for (int i = 0; i < slack_tracking_prediction.inobject_property_count();
+ ++i) {
+ a.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
jsgraph()->UndefinedConstant());
}
RelaxControls(node);
@@ -532,20 +523,21 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
// Constructs an array with a variable {length} when an actual
// upper bound is known for the {capacity}.
-Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
- int capacity,
- Handle<Map> initial_map,
- PretenureFlag pretenure) {
+Reduction JSCreateLowering::ReduceNewArray(
+ Node* node, Node* length, int capacity, MapRef initial_map,
+ PretenureFlag pretenure,
+ const SlackTrackingPrediction& slack_tracking_prediction) {
DCHECK(node->opcode() == IrOpcode::kJSCreateArray ||
node->opcode() == IrOpcode::kJSCreateEmptyLiteralArray);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
// Determine the appropriate elements kind.
- ElementsKind elements_kind = initial_map->elements_kind();
+ ElementsKind elements_kind = initial_map.elements_kind();
if (NodeProperties::GetType(length).Max() > 0.0) {
elements_kind = GetHoleyElementsKind(elements_kind);
- initial_map = Map::AsElementsKind(isolate(), initial_map, elements_kind);
+ ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(
+ initial_map, initial_map.AsElementsKind(elements_kind));
}
DCHECK(IsFastElementsKind(elements_kind));
@@ -561,14 +553,14 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
// Perform the allocation of the actual JSArray object.
AllocationBuilder a(jsgraph(), effect, control);
- a.Allocate(initial_map->instance_size(), pretenure);
+ a.Allocate(slack_tracking_prediction.instance_size(), pretenure);
a.Store(AccessBuilder::ForMap(), initial_map);
a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
a.Store(AccessBuilder::ForJSObjectElements(), elements);
a.Store(AccessBuilder::ForJSArrayLength(elements_kind), length);
- for (int i = 0; i < initial_map->GetInObjectProperties(); ++i) {
- a.Store(AccessBuilder::ForJSObjectInObjectProperty(
- MapRef(js_heap_broker(), initial_map), i),
+ for (int i = 0; i < slack_tracking_prediction.inobject_property_count();
+ ++i) {
+ a.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
jsgraph()->UndefinedConstant());
}
RelaxControls(node);
@@ -576,16 +568,16 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
return Changed(node);
}
-Reduction JSCreateLowering::ReduceNewArray(Node* node,
- std::vector<Node*> values,
- Handle<Map> initial_map,
- PretenureFlag pretenure) {
+Reduction JSCreateLowering::ReduceNewArray(
+ Node* node, std::vector<Node*> values, MapRef initial_map,
+ PretenureFlag pretenure,
+ const SlackTrackingPrediction& slack_tracking_prediction) {
DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
// Determine the appropriate elements kind.
- ElementsKind elements_kind = initial_map->elements_kind();
+ ElementsKind elements_kind = initial_map.elements_kind();
DCHECK(IsFastElementsKind(elements_kind));
// Check {values} based on the {elements_kind}. These checks are guarded
@@ -618,14 +610,14 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node,
// Perform the allocation of the actual JSArray object.
AllocationBuilder a(jsgraph(), effect, control);
- a.Allocate(initial_map->instance_size(), pretenure);
+ a.Allocate(slack_tracking_prediction.instance_size(), pretenure);
a.Store(AccessBuilder::ForMap(), initial_map);
a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
a.Store(AccessBuilder::ForJSObjectElements(), elements);
a.Store(AccessBuilder::ForJSArrayLength(elements_kind), length);
- for (int i = 0; i < initial_map->GetInObjectProperties(); ++i) {
- a.Store(AccessBuilder::ForJSObjectInObjectProperty(
- MapRef(js_heap_broker(), initial_map), i),
+ for (int i = 0; i < slack_tracking_prediction.inobject_property_count();
+ ++i) {
+ a.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
jsgraph()->UndefinedConstant());
}
RelaxControls(node);
@@ -634,19 +626,19 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node,
}
Reduction JSCreateLowering::ReduceNewArrayToStubCall(
- Node* node, Handle<AllocationSite> site) {
+ Node* node, base::Optional<AllocationSiteRef> site) {
CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
int const arity = static_cast<int>(p.arity());
Node* target = NodeProperties::GetValueInput(node, 0);
Node* new_target = NodeProperties::GetValueInput(node, 1);
Type new_target_type = NodeProperties::GetType(new_target);
- Node* type_info = site.is_null() ? jsgraph()->UndefinedConstant()
- : jsgraph()->HeapConstant(site);
+ Node* type_info =
+ site ? jsgraph()->Constant(*site) : jsgraph()->UndefinedConstant();
ElementsKind elements_kind =
- site.is_null() ? GetInitialFastElementsKind() : site->GetElementsKind();
+ site ? site->GetElementsKind() : GetInitialFastElementsKind();
AllocationSiteOverrideMode override_mode =
- (site.is_null() || AllocationSite::ShouldTrack(elements_kind))
+ (!site || AllocationSite::ShouldTrack(elements_kind))
? DISABLE_ALLOCATION_SITES
: DONT_OVERRIDE;
@@ -699,54 +691,48 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
int const arity = static_cast<int>(p.arity());
- Handle<AllocationSite> const site = p.site();
+ base::Optional<AllocationSiteRef> site_ref;
+ {
+ Handle<AllocationSite> site;
+ if (p.site().ToHandle(&site)) {
+ site_ref = AllocationSiteRef(js_heap_broker(), site);
+ }
+ }
PretenureFlag pretenure = NOT_TENURED;
- Handle<JSFunction> constructor(native_context()->array_function(), isolate());
+ JSFunctionRef constructor = native_context_ref().array_function();
Node* target = NodeProperties::GetValueInput(node, 0);
Node* new_target = NodeProperties::GetValueInput(node, 1);
- Type new_target_type =
- (target == new_target)
- ? Type::HeapConstant(js_heap_broker(), constructor, zone())
- : NodeProperties::GetType(new_target);
+ Type new_target_type = (target == new_target)
+ ? Type::HeapConstant(constructor, zone())
+ : NodeProperties::GetType(new_target);
// Extract original constructor function.
if (new_target_type.IsHeapConstant() &&
- new_target_type.AsHeapConstant()->Value()->IsJSFunction()) {
- Handle<JSFunction> original_constructor =
- Handle<JSFunction>::cast(new_target_type.AsHeapConstant()->Value());
- DCHECK(constructor->IsConstructor());
- DCHECK(original_constructor->IsConstructor());
+ new_target_type.AsHeapConstant()->Ref().IsJSFunction()) {
+ JSFunctionRef original_constructor =
+ new_target_type.AsHeapConstant()->Ref().AsJSFunction();
+ DCHECK(constructor.IsConstructor());
+ DCHECK(original_constructor.IsConstructor());
// Check if we can inline the allocation.
- if (IsAllocationInlineable(
- JSFunctionRef(js_heap_broker(), constructor),
- JSFunctionRef(js_heap_broker(), original_constructor))) {
- // Force completion of inobject slack tracking before
- // generating code to finalize the instance size.
- original_constructor->CompleteInobjectSlackTrackingIfActive();
-
- // Add a dependency on the {initial_map} to make sure that this code is
- // deoptimized whenever the {initial_map} changes.
- MapRef initial_map = dependencies()->DependOnInitialMap(
- JSFunctionRef(js_heap_broker(), original_constructor));
+ if (IsAllocationInlineable(constructor, original_constructor)) {
+ SlackTrackingPrediction slack_tracking_prediction =
+ dependencies()->DependOnInitialMapInstanceSizePrediction(
+ original_constructor);
+ MapRef initial_map = original_constructor.initial_map();
// Tells whether we are protected by either the {site} or a
// protector cell to do certain speculative optimizations.
bool can_inline_call = false;
// Check if we have a feedback {site} on the {node}.
- if (!site.is_null()) {
- ElementsKind elements_kind = site->GetElementsKind();
- if (initial_map.elements_kind() != elements_kind) {
- initial_map =
- MapRef(js_heap_broker(),
- Map::AsElementsKind(isolate(), initial_map.object<Map>(),
- elements_kind));
- }
- can_inline_call = site->CanInlineCall();
- auto site_ref = AllocationSiteRef(js_heap_broker(), site);
- pretenure = dependencies()->DependOnPretenureMode(site_ref);
- dependencies()->DependOnElementsKind(site_ref);
+ if (site_ref) {
+ ElementsKind elements_kind = site_ref->GetElementsKind();
+ ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(
+ initial_map, initial_map.AsElementsKind(elements_kind));
+ can_inline_call = site_ref->CanInlineCall();
+ pretenure = dependencies()->DependOnPretenureMode(*site_ref);
+ dependencies()->DependOnElementsKind(*site_ref);
} else {
can_inline_call = isolate()->IsArrayConstructorIntact();
}
@@ -754,8 +740,8 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
if (arity == 0) {
Node* length = jsgraph()->ZeroConstant();
int capacity = JSArray::kPreallocatedArrayElements;
- return ReduceNewArray(node, length, capacity, initial_map.object<Map>(),
- pretenure);
+ return ReduceNewArray(node, length, capacity, initial_map, pretenure,
+ slack_tracking_prediction);
} else if (arity == 1) {
Node* length = NodeProperties::GetValueInput(node, 2);
Type length_type = NodeProperties::GetType(length);
@@ -767,23 +753,21 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
elements_kind, IsHoleyElementsKind(elements_kind)
? HOLEY_ELEMENTS
: PACKED_ELEMENTS);
- initial_map =
- MapRef(js_heap_broker(),
- Map::AsElementsKind(isolate(), initial_map.object<Map>(),
- elements_kind));
- return ReduceNewArray(node, std::vector<Node*>{length},
- initial_map.object<Map>(), pretenure);
+ ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(
+ initial_map, initial_map.AsElementsKind(elements_kind));
+ return ReduceNewArray(node, std::vector<Node*>{length}, initial_map,
+ pretenure, slack_tracking_prediction);
}
if (length_type.Is(Type::SignedSmall()) && length_type.Min() >= 0 &&
length_type.Max() <= kElementLoopUnrollLimit &&
length_type.Min() == length_type.Max()) {
int capacity = static_cast<int>(length_type.Max());
- return ReduceNewArray(node, length, capacity,
- initial_map.object<Map>(), pretenure);
+ return ReduceNewArray(node, length, capacity, initial_map, pretenure,
+ slack_tracking_prediction);
}
if (length_type.Maybe(Type::UnsignedSmall()) && can_inline_call) {
- return ReduceNewArray(node, length, initial_map.object<Map>(),
- pretenure);
+ return ReduceNewArray(node, length, initial_map, pretenure,
+ slack_tracking_prediction);
}
} else if (arity <= JSArray::kInitialMaxFastElementArray) {
// Gather the values to store into the newly created array.
@@ -828,13 +812,10 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
// we cannot inline this invocation of the Array constructor here.
return NoChange();
}
- initial_map =
- MapRef(js_heap_broker(),
- Map::AsElementsKind(isolate(), initial_map.object<Map>(),
- elements_kind));
-
- return ReduceNewArray(node, values, initial_map.object<Map>(),
- pretenure);
+ ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(
+ initial_map, initial_map.AsElementsKind(elements_kind));
+ return ReduceNewArray(node, values, initial_map, pretenure,
+ slack_tracking_prediction);
}
}
}
@@ -842,11 +823,10 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
// TODO(bmeurer): Optimize the subclassing case.
if (target != new_target) return NoChange();
- return ReduceNewArrayToStubCall(node, site);
+ return ReduceNewArrayToStubCall(node, site_ref);
}
Reduction JSCreateLowering::ReduceJSCreateArrayIterator(Node* node) {
- DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateArrayIterator, node->opcode());
CreateArrayIteratorParameters const& p =
CreateArrayIteratorParametersOf(node->op());
@@ -906,7 +886,6 @@ MapRef MapForCollectionIterationKind(const NativeContextRef& native_context,
} // namespace
Reduction JSCreateLowering::ReduceJSCreateCollectionIterator(Node* node) {
- DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateCollectionIterator, node->opcode());
CreateCollectionIteratorParameters const& p =
CreateCollectionIteratorParametersOf(node->op());
@@ -938,7 +917,6 @@ Reduction JSCreateLowering::ReduceJSCreateCollectionIterator(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateBoundFunction(Node* node) {
- DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateBoundFunction, node->opcode());
CreateBoundFunctionParameters const& p =
CreateBoundFunctionParametersOf(node->op());
@@ -979,7 +957,6 @@ Reduction JSCreateLowering::ReduceJSCreateBoundFunction(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
- DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateClosure, node->opcode());
CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
SharedFunctionInfoRef shared(js_heap_broker(), p.shared_info());
@@ -1042,7 +1019,6 @@ Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateIterResultObject(Node* node) {
- DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateIterResultObject, node->opcode());
Node* value = NodeProperties::GetValueInput(node, 0);
Node* done = NodeProperties::GetValueInput(node, 1);
@@ -1067,7 +1043,6 @@ Reduction JSCreateLowering::ReduceJSCreateIterResultObject(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateStringIterator(Node* node) {
- DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateStringIterator, node->opcode());
Node* string = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -1089,14 +1064,13 @@ Reduction JSCreateLowering::ReduceJSCreateStringIterator(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateKeyValueArray(Node* node) {
- DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateKeyValueArray, node->opcode());
Node* key = NodeProperties::GetValueInput(node, 0);
Node* value = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
- Node* array_map = jsgraph()->Constant(
- native_context_ref().js_array_fast_elements_map_index());
+ Node* array_map =
+ jsgraph()->Constant(native_context_ref().js_array_packed_elements_map());
Node* properties = jsgraph()->EmptyFixedArrayConstant();
Node* length = jsgraph()->Constant(2);
@@ -1120,11 +1094,10 @@ Reduction JSCreateLowering::ReduceJSCreateKeyValueArray(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreatePromise(Node* node) {
- DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreatePromise, node->opcode());
Node* effect = NodeProperties::GetEffectInput(node);
- MapRef promise_map = native_context_ref().promise_function_initial_map();
+ MapRef promise_map = native_context_ref().promise_function().initial_map();
AllocationBuilder a(jsgraph(), effect, graph()->start());
a.Allocate(promise_map.instance_size());
@@ -1165,7 +1138,7 @@ Reduction JSCreateLowering::ReduceJSCreateLiteralArrayOrObject(Node* node) {
pretenure = dependencies()->DependOnPretenureMode(site);
}
dependencies()->DependOnElementsKinds(site);
- JSObjectRef boilerplate = site.boilerplate();
+ JSObjectRef boilerplate = site.boilerplate().value();
Node* value = effect =
AllocateFastLiteral(effect, control, boilerplate, pretenure);
ReplaceWithValue(node, value, effect, control);
@@ -1178,20 +1151,21 @@ Reduction JSCreateLowering::ReduceJSCreateLiteralArrayOrObject(Node* node) {
Reduction JSCreateLowering::ReduceJSCreateEmptyLiteralArray(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateEmptyLiteralArray, node->opcode());
FeedbackParameter const& p = FeedbackParameterOf(node->op());
- Handle<Object> feedback(
- p.feedback().vector()->Get(p.feedback().slot())->ToObject(), isolate());
- if (feedback->IsAllocationSite()) {
- Handle<AllocationSite> site = Handle<AllocationSite>::cast(feedback);
- DCHECK(!site->PointsToLiteral());
- Handle<Map> const initial_map(
- native_context()->GetInitialJSArrayMap(site->GetElementsKind()),
- isolate());
- auto site_ref = AllocationSiteRef(js_heap_broker(), site);
- PretenureFlag const pretenure =
- dependencies()->DependOnPretenureMode(site_ref);
- dependencies()->DependOnElementsKind(site_ref);
+ FeedbackVectorRef fv(js_heap_broker(), p.feedback().vector());
+ ObjectRef feedback = fv.get(p.feedback().slot());
+ if (feedback.IsAllocationSite()) {
+ AllocationSiteRef site = feedback.AsAllocationSite();
+ DCHECK(!site.PointsToLiteral());
+ MapRef initial_map =
+ native_context_ref().GetInitialJSArrayMap(site.GetElementsKind());
+ PretenureFlag const pretenure = dependencies()->DependOnPretenureMode(site);
+ dependencies()->DependOnElementsKind(site);
Node* length = jsgraph()->ZeroConstant();
- return ReduceNewArray(node, length, 0, initial_map, pretenure);
+ DCHECK(!initial_map.IsInobjectSlackTrackingInProgress());
+ SlackTrackingPrediction slack_tracking_prediction(
+ initial_map, initial_map.instance_size());
+ return ReduceNewArray(node, length, 0, initial_map, pretenure,
+ slack_tracking_prediction);
}
return NoChange();
}
@@ -1202,10 +1176,10 @@ Reduction JSCreateLowering::ReduceJSCreateEmptyLiteralObject(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
// Retrieve the initial map for the object.
- Handle<Map> map = factory()->ObjectLiteralMapFromCache(native_context(), 0);
- DCHECK(!map->is_dictionary_map());
- DCHECK(!map->IsInobjectSlackTrackingInProgress());
- Node* js_object_map = jsgraph()->HeapConstant(map);
+ MapRef map = native_context_ref().object_function().initial_map();
+ DCHECK(!map.is_dictionary_map());
+ DCHECK(!map.IsInobjectSlackTrackingInProgress());
+ Node* js_object_map = jsgraph()->Constant(map);
// Setup elements and properties.
Node* elements = jsgraph()->EmptyFixedArrayConstant();
@@ -1213,13 +1187,12 @@ Reduction JSCreateLowering::ReduceJSCreateEmptyLiteralObject(Node* node) {
// Perform the allocation of the actual JSArray object.
AllocationBuilder a(jsgraph(), effect, control);
- a.Allocate(map->instance_size());
+ a.Allocate(map.instance_size());
a.Store(AccessBuilder::ForMap(), js_object_map);
a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
a.Store(AccessBuilder::ForJSObjectElements(), elements);
- for (int i = 0; i < map->GetInObjectProperties(); i++) {
- a.Store(AccessBuilder::ForJSObjectInObjectProperty(
- MapRef(js_heap_broker(), map), i),
+ for (int i = 0; i < map.GetInObjectProperties(); i++) {
+ a.Store(AccessBuilder::ForJSObjectInObjectProperty(map, i),
jsgraph()->UndefinedConstant());
}
@@ -1229,7 +1202,6 @@ Reduction JSCreateLowering::ReduceJSCreateEmptyLiteralObject(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateLiteralRegExp(Node* node) {
- DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateLiteralRegExp, node->opcode());
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
Node* effect = NodeProperties::GetEffectInput(node);
@@ -1247,7 +1219,6 @@ Reduction JSCreateLowering::ReduceJSCreateLiteralRegExp(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) {
- DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateFunctionContext, node->opcode());
const CreateFunctionContextParameters& parameters =
CreateFunctionContextParametersOf(node->op());
@@ -1295,7 +1266,6 @@ Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateWithContext(Node* node) {
- DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateWithContext, node->opcode());
ScopeInfoRef scope_info(js_heap_broker(), ScopeInfoOf(node->op()));
Node* extension = NodeProperties::GetValueInput(node, 0);
@@ -1317,7 +1287,6 @@ Reduction JSCreateLowering::ReduceJSCreateWithContext(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateCatchContext(Node* node) {
- DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateCatchContext, node->opcode());
ScopeInfoRef scope_info(js_heap_broker(), ScopeInfoOf(node->op()));
Node* exception = NodeProperties::GetValueInput(node, 0);
@@ -1343,7 +1312,6 @@ Reduction JSCreateLowering::ReduceJSCreateCatchContext(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateBlockContext(Node* node) {
- DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateBlockContext, node->opcode());
ScopeInfoRef scope_info(js_heap_broker(), ScopeInfoOf(node->op()));
int const context_length = scope_info.ContextLength();
@@ -1377,7 +1345,6 @@ Reduction JSCreateLowering::ReduceJSCreateBlockContext(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateObject(Node* node) {
- DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateObject, node->opcode());
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
diff --git a/deps/v8/src/compiler/js-create-lowering.h b/deps/v8/src/compiler/js-create-lowering.h
index 667298c238..151be1b35c 100644
--- a/deps/v8/src/compiler/js-create-lowering.h
+++ b/deps/v8/src/compiler/js-create-lowering.h
@@ -26,14 +26,14 @@ class JSGraph;
class JSOperatorBuilder;
class MachineOperatorBuilder;
class SimplifiedOperatorBuilder;
-
+class SlackTrackingPrediction;
// Lowers JSCreate-level operators to fast (inline) allocations.
class V8_EXPORT_PRIVATE JSCreateLowering final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
JSCreateLowering(Editor* editor, CompilationDependencies* dependencies,
- JSGraph* jsgraph, const JSHeapBroker* js_heap_broker,
+ JSGraph* jsgraph, JSHeapBroker* js_heap_broker,
Handle<Context> native_context, Zone* zone)
: AdvancedReducer(editor),
dependencies_(dependencies),
@@ -68,12 +68,17 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
Reduction ReduceJSCreateCatchContext(Node* node);
Reduction ReduceJSCreateBlockContext(Node* node);
Reduction ReduceJSCreateGeneratorObject(Node* node);
- Reduction ReduceNewArray(Node* node, Node* length, Handle<Map> initial_map,
- PretenureFlag pretenure);
- Reduction ReduceNewArray(Node* node, Node* length, int capacity,
- Handle<Map> initial_map, PretenureFlag pretenure);
- Reduction ReduceNewArray(Node* node, std::vector<Node*> values,
- Handle<Map> initial_map, PretenureFlag pretenure);
+ Reduction ReduceNewArray(
+ Node* node, Node* length, MapRef initial_map, PretenureFlag pretenure,
+ const SlackTrackingPrediction& slack_tracking_prediction);
+ Reduction ReduceNewArray(
+ Node* node, Node* length, int capacity, MapRef initial_map,
+ PretenureFlag pretenure,
+ const SlackTrackingPrediction& slack_tracking_prediction);
+ Reduction ReduceNewArray(
+ Node* node, std::vector<Node*> values, MapRef initial_map,
+ PretenureFlag pretenure,
+ const SlackTrackingPrediction& slack_tracking_prediction);
Reduction ReduceJSCreateObject(Node* node);
Node* AllocateArguments(Node* effect, Node* control, Node* frame_state);
@@ -104,7 +109,8 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
Node* AllocateLiteralRegExp(Node* effect, Node* control,
JSRegExpRef boilerplate);
- Reduction ReduceNewArrayToStubCall(Node* node, Handle<AllocationSite> site);
+ Reduction ReduceNewArrayToStubCall(Node* node,
+ base::Optional<AllocationSiteRef> site);
Factory* factory() const;
Graph* graph() const;
@@ -115,12 +121,12 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
CommonOperatorBuilder* common() const;
SimplifiedOperatorBuilder* simplified() const;
CompilationDependencies* dependencies() const { return dependencies_; }
- const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
Zone* zone() const { return zone_; }
CompilationDependencies* const dependencies_;
JSGraph* const jsgraph_;
- const JSHeapBroker* const js_heap_broker_;
+ JSHeapBroker* const js_heap_broker_;
Handle<Context> const native_context_;
Zone* const zone_;
};
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index 5e134307f4..0903f181b9 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -82,6 +82,7 @@ REPLACE_STUB_CALL(Equal)
REPLACE_STUB_CALL(ToInteger)
REPLACE_STUB_CALL(ToLength)
REPLACE_STUB_CALL(ToNumber)
+REPLACE_STUB_CALL(ToNumberConvertBigInt)
REPLACE_STUB_CALL(ToNumeric)
REPLACE_STUB_CALL(ToName)
REPLACE_STUB_CALL(ToObject)
@@ -358,14 +359,15 @@ void JSGenericLowering::LowerJSCreateArguments(Node* node) {
void JSGenericLowering::LowerJSCreateArray(Node* node) {
CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
int const arity = static_cast<int>(p.arity());
- Handle<AllocationSite> const site = p.site();
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), ArrayConstructorDescriptor{}, arity + 1,
CallDescriptor::kNeedsFrameState, node->op()->properties());
Node* stub_code = jsgraph()->ArrayConstructorStubConstant();
Node* stub_arity = jsgraph()->Int32Constant(arity);
- Node* type_info = site.is_null() ? jsgraph()->UndefinedConstant()
- : jsgraph()->HeapConstant(site);
+ MaybeHandle<AllocationSite> const maybe_site = p.site();
+ Handle<AllocationSite> site;
+ Node* type_info = maybe_site.ToHandle(&site) ? jsgraph()->HeapConstant(site)
+ : jsgraph()->UndefinedConstant();
Node* receiver = jsgraph()->UndefinedConstant();
node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 3, stub_arity);
@@ -532,6 +534,17 @@ void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
}
}
+void JSGenericLowering::LowerJSCloneObject(Node* node) {
+ CloneObjectParameters const& p = CloneObjectParametersOf(node->op());
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kCloneObjectIC);
+ node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.flags()));
+ node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
+ node->InsertInput(zone(), 3, jsgraph()->HeapConstant(p.feedback().vector()));
+ ReplaceWithStubCall(node, callable, flags);
+}
+
void JSGenericLowering::LowerJSCreateEmptyLiteralObject(Node* node) {
UNREACHABLE(); // Eliminated in typed lowering.
}
diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc
index 2624387165..949dca377d 100644
--- a/deps/v8/src/compiler/js-heap-broker.cc
+++ b/deps/v8/src/compiler/js-heap-broker.cc
@@ -4,41 +4,528 @@
#include "src/compiler/js-heap-broker.h"
-#include "src/compiler/compilation-dependencies.h"
+#include "src/compiler/graph-reducer.h"
#include "src/objects-inl.h"
+#include "src/objects/js-array-inl.h"
#include "src/objects/js-regexp-inl.h"
#include "src/objects/module-inl.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
namespace compiler {
-MapRef HeapObjectRef::map() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return MapRef(broker(),
- handle(object<HeapObject>()->map(), broker()->isolate()));
+#define FORWARD_DECL(Name) class Name##Data;
+HEAP_BROKER_OBJECT_LIST(FORWARD_DECL)
+#undef FORWARD_DECL
+
+// TODO(neis): It would be nice to share the serialized data for read-only
+// objects.
+
+class ObjectData : public ZoneObject {
+ public:
+ static ObjectData* Serialize(JSHeapBroker* broker, Handle<Object> object);
+
+ ObjectData(JSHeapBroker* broker_, Handle<Object> object_, bool is_smi_)
+ : broker(broker_), object(object_), is_smi(is_smi_) {
+ broker->AddData(object, this);
+ }
+
+#define DECLARE_IS_AND_AS(Name) \
+ bool Is##Name() const; \
+ Name##Data* As##Name();
+ HEAP_BROKER_OBJECT_LIST(DECLARE_IS_AND_AS)
+#undef DECLARE_IS_AND_AS
+
+ JSHeapBroker* const broker;
+ Handle<Object> const object;
+ bool const is_smi;
+};
+
+// TODO(neis): Perhaps add a boolean that indicates whether serialization of an
+// object has completed. That could be used to add safety checks.
+
+#define GET_OR_CREATE(name) \
+ broker->GetOrCreateData(handle(object_->name(), broker->isolate()))
+
+class HeapObjectData : public ObjectData {
+ public:
+ static HeapObjectData* Serialize(JSHeapBroker* broker,
+ Handle<HeapObject> object);
+
+ HeapObjectType const type;
+ MapData* const map;
+
+ HeapObjectData(JSHeapBroker* broker_, Handle<HeapObject> object_,
+ HeapObjectType type_)
+ : ObjectData(broker_, object_, false),
+ type(type_),
+ map(GET_OR_CREATE(map)->AsMap()) {
+ CHECK(broker_->SerializingAllowed());
+ }
+};
+
+class PropertyCellData : public HeapObjectData {
+ public:
+ PropertyCellData(JSHeapBroker* broker_, Handle<PropertyCell> object_,
+ HeapObjectType type_)
+ : HeapObjectData(broker_, object_, type_) {}
+};
+
+class JSObjectData : public HeapObjectData {
+ public:
+ JSObjectData(JSHeapBroker* broker_, Handle<JSObject> object_,
+ HeapObjectType type_)
+ : HeapObjectData(broker_, object_, type_) {}
+};
+
+class JSFunctionData : public JSObjectData {
+ public:
+ JSGlobalProxyData* const global_proxy;
+ MapData* const initial_map; // Can be nullptr.
+ bool const has_prototype;
+ ObjectData* const prototype; // Can be nullptr.
+ bool const PrototypeRequiresRuntimeLookup;
+ SharedFunctionInfoData* const shared;
+
+ JSFunctionData(JSHeapBroker* broker_, Handle<JSFunction> object_,
+ HeapObjectType type_);
+};
+
+class JSRegExpData : public JSObjectData {
+ public:
+ JSRegExpData(JSHeapBroker* broker_, Handle<JSRegExp> object_,
+ HeapObjectType type_)
+ : JSObjectData(broker_, object_, type_) {}
+};
+
+class HeapNumberData : public HeapObjectData {
+ public:
+ HeapNumberData(JSHeapBroker* broker_, Handle<HeapNumber> object_,
+ HeapObjectType type_)
+ : HeapObjectData(broker_, object_, type_) {}
+};
+
+class MutableHeapNumberData : public HeapObjectData {
+ public:
+ MutableHeapNumberData(JSHeapBroker* broker_,
+ Handle<MutableHeapNumber> object_, HeapObjectType type_)
+ : HeapObjectData(broker_, object_, type_) {}
+};
+
+class ContextData : public HeapObjectData {
+ public:
+ ContextData(JSHeapBroker* broker_, Handle<Context> object_,
+ HeapObjectType type_)
+ : HeapObjectData(broker_, object_, type_) {}
+};
+
+class NativeContextData : public ContextData {
+ public:
+#define DECL_MEMBER(type, name) type##Data* const name;
+ BROKER_NATIVE_CONTEXT_FIELDS(DECL_MEMBER)
+#undef DECL_MEMBER
+
+ NativeContextData(JSHeapBroker* broker_, Handle<NativeContext> object_,
+ HeapObjectType type_)
+ : ContextData(broker_, object_, type_)
+#define INIT_MEMBER(type, name) , name(GET_OR_CREATE(name)->As##type())
+ BROKER_NATIVE_CONTEXT_FIELDS(INIT_MEMBER)
+#undef INIT_MEMBER
+ {
+ }
+};
+
+class NameData : public HeapObjectData {
+ public:
+ NameData(JSHeapBroker* broker, Handle<Name> object, HeapObjectType type)
+ : HeapObjectData(broker, object, type) {}
+};
+
+class StringData : public NameData {
+ public:
+ StringData(JSHeapBroker* broker, Handle<String> object, HeapObjectType type)
+ : NameData(broker, object, type),
+ length(object->length()),
+ first_char(length > 0 ? object->Get(0) : 0) {
+ int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_BINARY;
+ if (length <= kMaxLengthForDoubleConversion) {
+ to_number = StringToDouble(
+ broker->isolate(), broker->isolate()->unicode_cache(), object, flags);
+ }
+ }
+
+ int const length;
+ uint16_t const first_char;
+ base::Optional<double> to_number;
+
+ private:
+ static constexpr int kMaxLengthForDoubleConversion = 23;
+};
+
+class InternalizedStringData : public StringData {
+ public:
+ InternalizedStringData(JSHeapBroker* broker,
+ Handle<InternalizedString> object, HeapObjectType type)
+ : StringData(broker, object, type) {}
+};
+
+namespace {
+
+bool IsFastLiteralHelper(Handle<JSObject> boilerplate, int max_depth,
+ int* max_properties) {
+ DCHECK_GE(max_depth, 0);
+ DCHECK_GE(*max_properties, 0);
+
+ // Make sure the boilerplate map is not deprecated.
+ if (!JSObject::TryMigrateInstance(boilerplate)) return false;
+
+ // Check for too deep nesting.
+ if (max_depth == 0) return false;
+
+ // Check the elements.
+ Isolate* const isolate = boilerplate->GetIsolate();
+ Handle<FixedArrayBase> elements(boilerplate->elements(), isolate);
+ if (elements->length() > 0 &&
+ elements->map() != ReadOnlyRoots(isolate).fixed_cow_array_map()) {
+ if (boilerplate->HasSmiOrObjectElements()) {
+ Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
+ int length = elements->length();
+ for (int i = 0; i < length; i++) {
+ if ((*max_properties)-- == 0) return false;
+ Handle<Object> value(fast_elements->get(i), isolate);
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ if (!IsFastLiteralHelper(value_object, max_depth - 1,
+ max_properties)) {
+ return false;
+ }
+ }
+ }
+ } else if (boilerplate->HasDoubleElements()) {
+ if (elements->Size() > kMaxRegularHeapObjectSize) return false;
+ } else {
+ return false;
+ }
+ }
+
+ // TODO(turbofan): Do we want to support out-of-object properties?
+ if (!(boilerplate->HasFastProperties() &&
+ boilerplate->property_array()->length() == 0)) {
+ return false;
+ }
+
+ // Check the in-object properties.
+ Handle<DescriptorArray> descriptors(
+ boilerplate->map()->instance_descriptors(), isolate);
+ int limit = boilerplate->map()->NumberOfOwnDescriptors();
+ for (int i = 0; i < limit; i++) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.location() != kField) continue;
+ DCHECK_EQ(kData, details.kind());
+ if ((*max_properties)-- == 0) return false;
+ FieldIndex field_index = FieldIndex::ForDescriptor(boilerplate->map(), i);
+ if (boilerplate->IsUnboxedDoubleField(field_index)) continue;
+ Handle<Object> value(boilerplate->RawFastPropertyAt(field_index), isolate);
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ if (!IsFastLiteralHelper(value_object, max_depth - 1, max_properties)) {
+ return false;
+ }
+ }
+ }
+ return true;
}
-double HeapNumberRef::value() const {
- AllowHandleDereference allow_handle_dereference;
- return object<HeapNumber>()->value();
+// Maximum depth and total number of elements and properties for literal
+// graphs to be considered for fast deep-copying. The limit is chosen to
+// match the maximum number of inobject properties, to ensure that the
+// performance of using object literals is not worse than using constructor
+// functions, see crbug.com/v8/6211 for details.
+const int kMaxFastLiteralDepth = 3;
+const int kMaxFastLiteralProperties = JSObject::kMaxInObjectProperties;
+
+// Determines whether the given array or object literal boilerplate satisfies
+// all limits to be considered for fast deep-copying and computes the total
+// size of all objects that are part of the graph.
+bool IsInlinableFastLiteral(Handle<JSObject> boilerplate) {
+ int max_properties = kMaxFastLiteralProperties;
+ return IsFastLiteralHelper(boilerplate, kMaxFastLiteralDepth,
+ &max_properties);
}
-double MutableHeapNumberRef::value() const {
- AllowHandleDereference allow_handle_dereference;
- return object<MutableHeapNumber>()->value();
+} // namespace
+
+class AllocationSiteData : public HeapObjectData {
+ public:
+ AllocationSiteData(JSHeapBroker* broker, Handle<AllocationSite> object_,
+ HeapObjectType type_)
+ : HeapObjectData(broker, object_, type_),
+ PointsToLiteral(object_->PointsToLiteral()),
+ GetPretenureMode(object_->GetPretenureMode()),
+ nested_site(GET_OR_CREATE(nested_site)) {
+ if (PointsToLiteral) {
+ if (IsInlinableFastLiteral(
+ handle(object_->boilerplate(), broker->isolate()))) {
+ boilerplate = GET_OR_CREATE(boilerplate)->AsJSObject();
+ }
+ } else {
+ GetElementsKind = object_->GetElementsKind();
+ CanInlineCall = object_->CanInlineCall();
+ }
+ }
+
+ bool const PointsToLiteral;
+ PretenureFlag const GetPretenureMode;
+ ObjectData* const nested_site;
+ JSObjectData* boilerplate = nullptr;
+
+ // These are only valid if PointsToLiteral is false.
+ ElementsKind GetElementsKind = NO_ELEMENTS;
+ bool CanInlineCall = false;
+};
+
+// Only used in JSNativeContextSpecialization.
+class ScriptContextTableData : public HeapObjectData {
+ public:
+ ScriptContextTableData(JSHeapBroker* broker_,
+ Handle<ScriptContextTable> object_,
+ HeapObjectType type_)
+ : HeapObjectData(broker_, object_, type_) {}
+};
+
+class MapData : public HeapObjectData {
+ public:
+ InstanceType const instance_type;
+ int const instance_size;
+ byte const bit_field;
+ byte const bit_field2;
+ uint32_t const bit_field3;
+
+ MapData(JSHeapBroker* broker_, Handle<Map> object_, HeapObjectType type_);
+
+ // Extra information.
+ void SerializeElementsKindGeneralizations();
+ const ZoneVector<MapData*>& elements_kind_generalizations() {
+ return elements_kind_generalizations_;
+ }
+
+ private:
+ ZoneVector<MapData*> elements_kind_generalizations_;
+};
+
+MapData::MapData(JSHeapBroker* broker_, Handle<Map> object_,
+ HeapObjectType type_)
+ : HeapObjectData(broker_, object_, type_),
+ instance_type(object_->instance_type()),
+ instance_size(object_->instance_size()),
+ bit_field(object_->bit_field()),
+ bit_field2(object_->bit_field2()),
+ bit_field3(object_->bit_field3()),
+ elements_kind_generalizations_(broker->zone()) {}
+
+JSFunctionData::JSFunctionData(JSHeapBroker* broker_,
+ Handle<JSFunction> object_, HeapObjectType type_)
+ : JSObjectData(broker_, object_, type_),
+ global_proxy(GET_OR_CREATE(global_proxy)->AsJSGlobalProxy()),
+ initial_map(object_->has_prototype_slot() && object_->has_initial_map()
+ ? GET_OR_CREATE(initial_map)->AsMap()
+ : nullptr),
+ has_prototype(object_->has_prototype_slot() && object_->has_prototype()),
+ prototype(has_prototype ? GET_OR_CREATE(prototype) : nullptr),
+ PrototypeRequiresRuntimeLookup(object_->PrototypeRequiresRuntimeLookup()),
+ shared(GET_OR_CREATE(shared)->AsSharedFunctionInfo()) {
+ if (initial_map != nullptr && initial_map->instance_type == JS_ARRAY_TYPE) {
+ initial_map->SerializeElementsKindGeneralizations();
+ }
}
-bool ObjectRef::IsSmi() const {
- AllowHandleDereference allow_handle_dereference;
- return object_->IsSmi();
+void MapData::SerializeElementsKindGeneralizations() {
+ broker->Trace("Computing ElementsKind generalizations of %p.\n", *object);
+ DCHECK_EQ(instance_type, JS_ARRAY_TYPE);
+ MapRef self(this);
+ ElementsKind from_kind = self.elements_kind();
+ for (int i = FIRST_FAST_ELEMENTS_KIND; i <= LAST_FAST_ELEMENTS_KIND; i++) {
+ ElementsKind to_kind = static_cast<ElementsKind>(i);
+ if (IsMoreGeneralElementsKindTransition(from_kind, to_kind)) {
+ Handle<Map> target =
+ Map::AsElementsKind(broker->isolate(), self.object<Map>(), to_kind);
+ elements_kind_generalizations_.push_back(
+ broker->GetOrCreateData(target)->AsMap());
+ }
+ }
}
-int ObjectRef::AsSmi() const { return object<Smi>()->value(); }
+class FeedbackVectorData : public HeapObjectData {
+ public:
+ const ZoneVector<ObjectData*>& feedback() { return feedback_; }
+
+ FeedbackVectorData(JSHeapBroker* broker_, Handle<FeedbackVector> object_,
+ HeapObjectType type_);
+
+ private:
+ ZoneVector<ObjectData*> feedback_;
+};
+
+FeedbackVectorData::FeedbackVectorData(JSHeapBroker* broker_,
+ Handle<FeedbackVector> object_,
+ HeapObjectType type_)
+ : HeapObjectData(broker_, object_, type_), feedback_(broker_->zone()) {
+ feedback_.reserve(object_->length());
+ for (int i = 0; i < object_->length(); ++i) {
+ MaybeObject* value = object_->get(i);
+ feedback_.push_back(value->IsObject()
+ ? broker->GetOrCreateData(
+ handle(value->ToObject(), broker->isolate()))
+ : nullptr);
+ }
+ DCHECK_EQ(object_->length(), feedback_.size());
+}
+
+class FixedArrayBaseData : public HeapObjectData {
+ public:
+ int const length;
+
+ FixedArrayBaseData(JSHeapBroker* broker_, Handle<FixedArrayBase> object_,
+ HeapObjectType type_)
+ : HeapObjectData(broker_, object_, type_), length(object_->length()) {}
+};
+
+class FixedArrayData : public FixedArrayBaseData {
+ public:
+ FixedArrayData(JSHeapBroker* broker_, Handle<FixedArray> object_,
+ HeapObjectType type_)
+ : FixedArrayBaseData(broker_, object_, type_) {}
+};
+
+class FixedDoubleArrayData : public FixedArrayBaseData {
+ public:
+ FixedDoubleArrayData(JSHeapBroker* broker_, Handle<FixedDoubleArray> object_,
+ HeapObjectType type_)
+ : FixedArrayBaseData(broker_, object_, type_) {}
+};
+
+class BytecodeArrayData : public FixedArrayBaseData {
+ public:
+ int const register_count;
+
+ BytecodeArrayData(JSHeapBroker* broker_, Handle<BytecodeArray> object_,
+ HeapObjectType type_)
+ : FixedArrayBaseData(broker_, object_, type_),
+ register_count(object_->register_count()) {}
+};
+
+class JSArrayData : public JSObjectData {
+ public:
+ JSArrayData(JSHeapBroker* broker_, Handle<JSArray> object_,
+ HeapObjectType type_)
+ : JSObjectData(broker_, object_, type_) {}
+};
+
+class ScopeInfoData : public HeapObjectData {
+ public:
+ ScopeInfoData(JSHeapBroker* broker_, Handle<ScopeInfo> object_,
+ HeapObjectType type_)
+ : HeapObjectData(broker_, object_, type_) {}
+};
+
+class SharedFunctionInfoData : public HeapObjectData {
+ public:
+ int const builtin_id;
+ BytecodeArrayData* const GetBytecodeArray; // Can be nullptr.
+#define DECL_MEMBER(type, name) type const name;
+ BROKER_SFI_FIELDS(DECL_MEMBER)
+#undef DECL_MEMBER
+
+ SharedFunctionInfoData(JSHeapBroker* broker_,
+ Handle<SharedFunctionInfo> object_,
+ HeapObjectType type_)
+ : HeapObjectData(broker_, object_, type_),
+ builtin_id(object_->HasBuiltinId() ? object_->builtin_id()
+ : Builtins::kNoBuiltinId),
+ GetBytecodeArray(
+ object_->HasBytecodeArray()
+ ? GET_OR_CREATE(GetBytecodeArray)->AsBytecodeArray()
+ : nullptr)
+#define INIT_MEMBER(type, name) , name(object_->name())
+ BROKER_SFI_FIELDS(INIT_MEMBER)
+#undef INIT_MEMBER
+ {
+ DCHECK_EQ(HasBuiltinId, builtin_id != Builtins::kNoBuiltinId);
+ DCHECK_EQ(HasBytecodeArray, GetBytecodeArray != nullptr);
+ }
+};
+
+class ModuleData : public HeapObjectData {
+ public:
+ ModuleData(JSHeapBroker* broker_, Handle<Module> object_,
+ HeapObjectType type_)
+ : HeapObjectData(broker_, object_, type_) {}
+};
+
+class CellData : public HeapObjectData {
+ public:
+ CellData(JSHeapBroker* broker_, Handle<Cell> object_, HeapObjectType type_)
+ : HeapObjectData(broker_, object_, type_) {}
+};
+
+class JSGlobalProxyData : public JSObjectData {
+ public:
+ JSGlobalProxyData(JSHeapBroker* broker_, Handle<JSGlobalProxy> object_,
+ HeapObjectType type_)
+ : JSObjectData(broker_, object_, type_) {}
+};
+
+class CodeData : public HeapObjectData {
+ public:
+ CodeData(JSHeapBroker* broker_, Handle<Code> object_, HeapObjectType type_)
+ : HeapObjectData(broker_, object_, type_) {}
+};
+
+#define DEFINE_IS_AND_AS(Name) \
+ bool ObjectData::Is##Name() const { \
+ if (broker->mode() == JSHeapBroker::kDisabled) { \
+ AllowHandleDereference allow_handle_dereference; \
+ return object->Is##Name(); \
+ } \
+ if (is_smi) return false; \
+ InstanceType instance_type = \
+ static_cast<const HeapObjectData*>(this)->type.instance_type(); \
+ return InstanceTypeChecker::Is##Name(instance_type); \
+ } \
+ Name##Data* ObjectData::As##Name() { \
+ CHECK_NE(broker->mode(), JSHeapBroker::kDisabled); \
+ CHECK(Is##Name()); \
+ return static_cast<Name##Data*>(this); \
+ }
+HEAP_BROKER_OBJECT_LIST(DEFINE_IS_AND_AS)
+#undef DEFINE_IS_AND_AS
+
+ObjectData* ObjectData::Serialize(JSHeapBroker* broker, Handle<Object> object) {
+ CHECK(broker->SerializingAllowed());
+ return object->IsSmi() ? new (broker->zone()) ObjectData(broker, object, true)
+ : HeapObjectData::Serialize(
+ broker, Handle<HeapObject>::cast(object));
+}
+
+HeapObjectData* HeapObjectData::Serialize(JSHeapBroker* broker,
+ Handle<HeapObject> object) {
+ CHECK(broker->SerializingAllowed());
+ Handle<Map> map(object->map(), broker->isolate());
+ HeapObjectType type = broker->HeapObjectTypeFromMap(map);
+
+#define RETURN_CREATE_DATA_IF_MATCH(name) \
+ if (object->Is##name()) { \
+ return new (broker->zone()) \
+ name##Data(broker, Handle<name>::cast(object), type); \
+ }
+ HEAP_BROKER_OBJECT_LIST(RETURN_CREATE_DATA_IF_MATCH)
+#undef RETURN_CREATE_DATA_IF_MATCH
+ UNREACHABLE();
+}
bool ObjectRef::equals(const ObjectRef& other) const {
- return object<Object>().equals(other.object<Object>());
+ return data_ == other.data_;
}
StringRef ObjectRef::TypeOf() const {
@@ -48,6 +535,8 @@ StringRef ObjectRef::TypeOf() const {
Object::TypeOf(broker()->isolate(), object<Object>()));
}
+Isolate* ObjectRef::isolate() const { return broker()->isolate(); }
+
base::Optional<ContextRef> ContextRef::previous() const {
AllowHandleAllocation handle_allocation;
AllowHandleDereference handle_dereference;
@@ -63,7 +552,84 @@ ObjectRef ContextRef::get(int index) const {
return ObjectRef(broker(), value);
}
-JSHeapBroker::JSHeapBroker(Isolate* isolate) : isolate_(isolate) {}
+JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* zone)
+ : isolate_(isolate),
+ zone_(zone),
+ refs_(zone),
+ mode_(FLAG_concurrent_compiler_frontend ? kSerializing : kDisabled) {
+ Trace("%s", "Constructing heap broker.\n");
+}
+
+void JSHeapBroker::Trace(const char* format, ...) const {
+ if (FLAG_trace_heap_broker) {
+ PrintF("[%p] ", this);
+ va_list arguments;
+ va_start(arguments, format);
+ base::OS::VPrint(format, arguments);
+ va_end(arguments);
+ }
+}
+
+bool JSHeapBroker::SerializingAllowed() const {
+ return mode() == kSerializing ||
+ (!FLAG_strict_heap_broker && mode() == kSerialized);
+}
+
+void JSHeapBroker::SerializeStandardObjects() {
+ Trace("Serializing standard objects.\n");
+
+ Builtins* const b = isolate()->builtins();
+ Factory* const f = isolate()->factory();
+
+ // Stuff used by JSGraph:
+ GetOrCreateData(f->empty_fixed_array());
+
+ // Stuff used by JSCreateLowering:
+ GetOrCreateData(f->block_context_map());
+ GetOrCreateData(f->catch_context_map());
+ GetOrCreateData(f->eval_context_map());
+ GetOrCreateData(f->fixed_array_map());
+ GetOrCreateData(f->fixed_double_array_map());
+ GetOrCreateData(f->function_context_map());
+ GetOrCreateData(f->many_closures_cell_map());
+ GetOrCreateData(f->mutable_heap_number_map());
+ GetOrCreateData(f->name_dictionary_map());
+ GetOrCreateData(f->one_pointer_filler_map());
+ GetOrCreateData(f->sloppy_arguments_elements_map());
+ GetOrCreateData(f->with_context_map());
+
+ // Stuff used by TypedOptimization:
+ // Strings produced by typeof:
+ GetOrCreateData(f->boolean_string());
+ GetOrCreateData(f->number_string());
+ GetOrCreateData(f->string_string());
+ GetOrCreateData(f->bigint_string());
+ GetOrCreateData(f->symbol_string());
+ GetOrCreateData(f->undefined_string());
+ GetOrCreateData(f->object_string());
+ GetOrCreateData(f->function_string());
+
+ // Stuff used by JSTypedLowering:
+ GetOrCreateData(f->length_string());
+ Builtins::Name builtins[] = {
+ Builtins::kArgumentsAdaptorTrampoline,
+ Builtins::kCallFunctionForwardVarargs,
+ Builtins::kStringAdd_CheckNone_NotTenured,
+ Builtins::kStringAdd_CheckNone_Tenured,
+ Builtins::kStringAdd_ConvertLeft_NotTenured,
+ Builtins::kStringAdd_ConvertRight_NotTenured,
+ };
+ for (auto id : builtins) {
+ GetOrCreateData(b->builtin_handle(id));
+ }
+ for (int32_t id = 0; id < Builtins::builtin_count; ++id) {
+ if (Builtins::KindOf(id) == Builtins::TFJ) {
+ GetOrCreateData(b->builtin_handle(id));
+ }
+ }
+
+ Trace("Finished serializing standard objects.\n");
+}
HeapObjectType JSHeapBroker::HeapObjectTypeFromMap(Map* map) const {
AllowHandleDereference allow_handle_dereference;
@@ -95,28 +661,58 @@ HeapObjectType JSHeapBroker::HeapObjectTypeFromMap(Map* map) const {
return HeapObjectType(map->instance_type(), flags, oddball_type);
}
-// static
-base::Optional<int> JSHeapBroker::TryGetSmi(Handle<Object> object) {
- AllowHandleDereference allow_handle_dereference;
- if (!object->IsSmi()) return base::Optional<int>();
- return Smi::cast(*object)->value();
+ObjectData* JSHeapBroker::GetData(Handle<Object> object) const {
+ auto it = refs_.find(object.address());
+ return it != refs_.end() ? it->second : nullptr;
+}
+
+ObjectData* JSHeapBroker::GetOrCreateData(Handle<Object> object) {
+ CHECK(SerializingAllowed());
+ ObjectData* data = GetData(object);
+ if (data == nullptr) {
+ // TODO(neis): Remove these Allow* once we serialize everything upfront.
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference handle_dereference;
+ data = ObjectData::Serialize(this, object);
+ }
+ CHECK_NOT_NULL(data);
+ return data;
}
-#define DEFINE_IS_AND_AS(Name) \
- bool ObjectRef::Is##Name() const { \
- AllowHandleDereference allow_handle_dereference; \
- return object<Object>()->Is##Name(); \
- } \
- Name##Ref ObjectRef::As##Name() const { \
- DCHECK(Is##Name()); \
- return Name##Ref(broker(), object<HeapObject>()); \
+void JSHeapBroker::AddData(Handle<Object> object, ObjectData* data) {
+ Trace("Creating data %p for handle %" V8PRIuPTR " (", data, object.address());
+ if (FLAG_trace_heap_broker) {
+ object->ShortPrint();
+ PrintF(")\n");
+ }
+ CHECK_NOT_NULL(isolate()->handle_scope_data()->canonical_scope);
+ CHECK(refs_.insert({object.address(), data}).second);
+}
+
+#define DEFINE_IS_AND_AS(Name) \
+ bool ObjectRef::Is##Name() const { return data()->Is##Name(); } \
+ Name##Ref ObjectRef::As##Name() const { \
+ DCHECK(Is##Name()); \
+ return Name##Ref(data()); \
}
HEAP_BROKER_OBJECT_LIST(DEFINE_IS_AND_AS)
#undef DEFINE_IS_AND_AS
+bool ObjectRef::IsSmi() const { return data()->is_smi; }
+
+int ObjectRef::AsSmi() const {
+ DCHECK(IsSmi());
+ // Handle-dereference is always allowed for Handle<Smi>.
+ return object<Smi>()->value();
+}
+
HeapObjectType HeapObjectRef::type() const {
- AllowHandleDereference allow_handle_dereference;
- return broker()->HeapObjectTypeFromMap(object<HeapObject>()->map());
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ return broker()->HeapObjectTypeFromMap(object<HeapObject>()->map());
+ } else {
+ return data()->AsHeapObject()->type;
+ }
}
base::Optional<MapRef> HeapObjectRef::TryGetObjectCreateMap() const {
@@ -131,75 +727,31 @@ base::Optional<MapRef> HeapObjectRef::TryGetObjectCreateMap() const {
}
}
-bool HeapObjectRef::IsSeqString() const {
- AllowHandleDereference allow_handle_dereference;
- return object<HeapObject>()->IsSeqString();
-}
-
-bool HeapObjectRef::IsExternalString() const {
- AllowHandleDereference allow_handle_dereference;
- return object<HeapObject>()->IsExternalString();
-}
-
-bool JSFunctionRef::HasBuiltinFunctionId() const {
- AllowHandleDereference allow_handle_dereference;
- return object<JSFunction>()->shared()->HasBuiltinFunctionId();
-}
-
-BuiltinFunctionId JSFunctionRef::GetBuiltinFunctionId() const {
- AllowHandleDereference allow_handle_dereference;
- return object<JSFunction>()->shared()->builtin_function_id();
-}
-
-bool JSFunctionRef::IsConstructor() const {
- AllowHandleDereference allow_handle_dereference;
- return object<JSFunction>()->IsConstructor();
-}
-
-void JSFunctionRef::EnsureHasInitialMap() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- AllowHeapAllocation heap_allocation;
- // TODO(jarin) Eventually, we will prepare initial maps for resumable
- // functions (i.e., generators).
- DCHECK(IsResumableFunction(object<JSFunction>()->shared()->kind()));
- JSFunction::EnsureHasInitialMap(object<JSFunction>());
-}
-
-SlackTrackingResult JSFunctionRef::FinishSlackTracking() const {
- AllowHandleDereference allow_handle_dereference;
- AllowHandleAllocation handle_allocation;
- object<JSFunction>()->CompleteInobjectSlackTrackingIfActive();
- int instance_size = object<JSFunction>()->initial_map()->instance_size();
- int inobject_property_count =
- object<JSFunction>()->initial_map()->GetInObjectProperties();
- return SlackTrackingResult(instance_size, inobject_property_count);
-}
-
-bool JSFunctionRef::has_initial_map() const {
- AllowHandleDereference allow_handle_dereference;
- return object<JSFunction>()->has_initial_map();
+base::Optional<MapRef> MapRef::AsElementsKind(ElementsKind kind) const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHeapAllocation heap_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return MapRef(broker(), Map::AsElementsKind(broker()->isolate(),
+ object<Map>(), kind));
+ } else {
+ if (kind == elements_kind()) return *this;
+ const ZoneVector<MapData*>& elements_kind_generalizations =
+ data()->AsMap()->elements_kind_generalizations();
+ for (auto data : elements_kind_generalizations) {
+ MapRef map(data);
+ if (map.elements_kind() == kind) return map;
+ }
+ return base::Optional<MapRef>();
+ }
}
-MapRef JSFunctionRef::initial_map() const {
- AllowHandleAllocation handle_allocation;
+int JSFunctionRef::InitialMapInstanceSizeWithMinSlack() const {
AllowHandleDereference allow_handle_dereference;
- return MapRef(broker(), handle(object<JSFunction>()->initial_map(),
- broker()->isolate()));
-}
-
-SharedFunctionInfoRef JSFunctionRef::shared() const {
AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return SharedFunctionInfoRef(
- broker(), handle(object<JSFunction>()->shared(), broker()->isolate()));
-}
-JSGlobalProxyRef JSFunctionRef::global_proxy() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return JSGlobalProxyRef(broker(), handle(object<JSFunction>()->global_proxy(),
- broker()->isolate()));
+ return object<JSFunction>()->ComputeInstanceSizeWithMinSlack(
+ broker()->isolate());
}
base::Optional<ScriptContextTableRef::LookupResult>
@@ -221,50 +773,20 @@ ScriptContextTableRef::lookup(const NameRef& name) const {
return result;
}
-ScriptContextTableRef NativeContextRef::script_context_table() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference handle_dereference;
- return ScriptContextTableRef(
- broker(),
- handle(object<Context>()->script_context_table(), broker()->isolate()));
-}
-
OddballType ObjectRef::oddball_type() const {
return IsSmi() ? OddballType::kNone : AsHeapObject().type().oddball_type();
}
ObjectRef FeedbackVectorRef::get(FeedbackSlot slot) const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference handle_dereference;
- Handle<Object> value(object<FeedbackVector>()->Get(slot)->ToObject(),
- broker()->isolate());
- return ObjectRef(broker(), value);
-}
-
-JSObjectRef AllocationSiteRef::boilerplate() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference handle_dereference;
- Handle<JSObject> value(object<AllocationSite>()->boilerplate(),
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference handle_dereference;
+ Handle<Object> value(object<FeedbackVector>()->Get(slot)->ToObject(),
broker()->isolate());
- return JSObjectRef(broker(), value);
-}
-
-ObjectRef AllocationSiteRef::nested_site() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference handle_dereference;
- Handle<Object> obj(object<AllocationSite>()->nested_site(),
- broker()->isolate());
- return ObjectRef(broker(), obj);
-}
-
-bool AllocationSiteRef::PointsToLiteral() const {
- AllowHandleDereference handle_dereference;
- return object<AllocationSite>()->PointsToLiteral();
-}
-
-ElementsKind AllocationSiteRef::GetElementsKind() const {
- AllowHandleDereference handle_dereference;
- return object<AllocationSite>()->GetElementsKind();
+ return ObjectRef(broker(), value);
+ }
+ int i = FeedbackVector::GetIndex(slot);
+ return ObjectRef(data()->AsFeedbackVector()->feedback().at(i));
}
bool JSObjectRef::IsUnboxedDoubleField(FieldIndex index) const {
@@ -285,114 +807,18 @@ ObjectRef JSObjectRef::RawFastPropertyAt(FieldIndex index) const {
broker()->isolate()));
}
-ElementsKind JSObjectRef::GetElementsKind() {
- AllowHandleDereference handle_dereference;
- return object<JSObject>()->GetElementsKind();
-}
-
-FixedArrayBaseRef JSObjectRef::elements() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference handle_dereference;
- return FixedArrayBaseRef(
- broker(), handle(object<JSObject>()->elements(), broker()->isolate()));
-}
-
-namespace {
-
-// Determines whether the given array or object literal boilerplate satisfies
-// all limits to be considered for fast deep-copying and computes the total
-// size of all objects that are part of the graph.
-bool IsFastLiteralHelper(Handle<JSObject> boilerplate, int max_depth,
- int* max_properties) {
- DCHECK_GE(max_depth, 0);
- DCHECK_GE(*max_properties, 0);
-
- // Make sure the boilerplate map is not deprecated.
- if (!JSObject::TryMigrateInstance(boilerplate)) return false;
-
- // Check for too deep nesting.
- if (max_depth == 0) return false;
-
- // Check the elements.
- Isolate* const isolate = boilerplate->GetIsolate();
- Handle<FixedArrayBase> elements(boilerplate->elements(), isolate);
- if (elements->length() > 0 &&
- elements->map() != ReadOnlyRoots(isolate).fixed_cow_array_map()) {
- if (boilerplate->HasSmiOrObjectElements()) {
- Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
- int length = elements->length();
- for (int i = 0; i < length; i++) {
- if ((*max_properties)-- == 0) return false;
- Handle<Object> value(fast_elements->get(i), isolate);
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- if (!IsFastLiteralHelper(value_object, max_depth - 1,
- max_properties)) {
- return false;
- }
- }
- }
- } else if (boilerplate->HasDoubleElements()) {
- if (elements->Size() > kMaxRegularHeapObjectSize) return false;
- } else {
- return false;
- }
- }
-
- // TODO(turbofan): Do we want to support out-of-object properties?
- if (!(boilerplate->HasFastProperties() &&
- boilerplate->property_array()->length() == 0)) {
- return false;
- }
-
- // Check the in-object properties.
- Handle<DescriptorArray> descriptors(
- boilerplate->map()->instance_descriptors(), isolate);
- int limit = boilerplate->map()->NumberOfOwnDescriptors();
- for (int i = 0; i < limit; i++) {
- PropertyDetails details = descriptors->GetDetails(i);
- if (details.location() != kField) continue;
- DCHECK_EQ(kData, details.kind());
- if ((*max_properties)-- == 0) return false;
- FieldIndex field_index = FieldIndex::ForDescriptor(boilerplate->map(), i);
- if (boilerplate->IsUnboxedDoubleField(field_index)) continue;
- Handle<Object> value(boilerplate->RawFastPropertyAt(field_index), isolate);
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- if (!IsFastLiteralHelper(value_object, max_depth - 1, max_properties)) {
- return false;
- }
- }
- }
- return true;
-}
-
-} // namespace
-
-// Maximum depth and total number of elements and properties for literal
-// graphs to be considered for fast deep-copying. The limit is chosen to
-// match the maximum number of inobject properties, to ensure that the
-// performance of using object literals is not worse than using constructor
-// functions, see crbug.com/v8/6211 for details.
-const int kMaxFastLiteralDepth = 3;
-const int kMaxFastLiteralProperties = JSObject::kMaxInObjectProperties;
-// Determines whether the given array or object literal boilerplate satisfies
-// all limits to be considered for fast deep-copying and computes the total
-// size of all objects that are part of the graph.
bool AllocationSiteRef::IsFastLiteral() const {
- AllowHandleAllocation allow_handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- int max_properties = kMaxFastLiteralProperties;
- Handle<JSObject> boilerplate(object<AllocationSite>()->boilerplate(),
- broker()->isolate());
- return IsFastLiteralHelper(boilerplate, kMaxFastLiteralDepth,
- &max_properties);
-}
-
-PretenureFlag AllocationSiteRef::GetPretenureMode() const {
- AllowHandleDereference allow_handle_dereference;
- return object<AllocationSite>()->GetPretenureMode();
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHeapAllocation
+ allow_heap_allocation; // This is needed for TryMigrateInstance.
+ AllowHandleAllocation allow_handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return IsInlinableFastLiteral(
+ handle(object<AllocationSite>()->boilerplate(), broker()->isolate()));
+ } else {
+ return data()->AsAllocationSite()->boilerplate != nullptr;
+ }
}
void JSObjectRef::EnsureElementsTenured() {
@@ -400,43 +826,20 @@ void JSObjectRef::EnsureElementsTenured() {
// the compilation job starts.
AllowHandleAllocation allow_handle_allocation;
AllowHandleDereference allow_handle_dereference;
+ AllowHeapAllocation allow_heap_allocation;
+
Handle<FixedArrayBase> object_elements = elements().object<FixedArrayBase>();
if (Heap::InNewSpace(*object_elements)) {
// If we would like to pretenure a fixed cow array, we must ensure that
// the array is already in old space, otherwise we'll create too many
// old-to-new-space pointers (overflowing the store buffer).
- object_elements = Handle<FixedArrayBase>(
+ object_elements =
broker()->isolate()->factory()->CopyAndTenureFixedCOWArray(
- Handle<FixedArray>::cast(object_elements)));
+ Handle<FixedArray>::cast(object_elements));
object<JSObject>()->set_elements(*object_elements);
}
}
-ElementsKind MapRef::elements_kind() const {
- AllowHandleDereference allow_handle_dereference;
- return object<Map>()->elements_kind();
-}
-
-bool MapRef::is_deprecated() const {
- AllowHandleDereference allow_handle_dereference;
- return object<Map>()->is_deprecated();
-}
-
-bool MapRef::CanBeDeprecated() const {
- AllowHandleDereference allow_handle_dereference;
- return object<Map>()->CanBeDeprecated();
-}
-
-int MapRef::GetInObjectProperties() const {
- AllowHandleDereference allow_handle_dereference;
- return object<Map>()->GetInObjectProperties();
-}
-
-int MapRef::NumberOfOwnDescriptors() const {
- AllowHandleDereference allow_handle_dereference;
- return object<Map>()->NumberOfOwnDescriptors();
-}
-
FieldIndex MapRef::GetFieldIndexFor(int i) const {
AllowHandleDereference allow_handle_dereference;
return FieldIndex::ForDescriptor(*object<Map>(), i);
@@ -447,28 +850,6 @@ int MapRef::GetInObjectPropertyOffset(int i) const {
return object<Map>()->GetInObjectPropertyOffset(i);
}
-bool MapRef::is_dictionary_map() const {
- AllowHandleDereference allow_handle_dereference;
- return object<Map>()->is_dictionary_map();
-}
-
-ObjectRef MapRef::constructor_or_backpointer() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return ObjectRef(broker(), handle(object<Map>()->constructor_or_backpointer(),
- broker()->isolate()));
-}
-
-int MapRef::instance_size() const {
- AllowHandleDereference allow_handle_dereference;
- return object<Map>()->instance_size();
-}
-
-InstanceType MapRef::instance_type() const {
- AllowHandleDereference allow_handle_dereference;
- return object<Map>()->instance_type();
-}
-
PropertyDetails MapRef::GetPropertyDetails(int i) const {
AllowHandleDereference allow_handle_dereference;
return object<Map>()->instance_descriptors()->GetDetails(i);
@@ -482,37 +863,12 @@ NameRef MapRef::GetPropertyKey(int i) const {
broker()->isolate()));
}
-bool MapRef::IsJSArrayMap() const {
- AllowHandleDereference allow_handle_dereference;
- return object<Map>()->IsJSArrayMap();
-}
-
-bool MapRef::IsInobjectSlackTrackingInProgress() const {
- AllowHandleDereference allow_handle_dereference;
- return object<Map>()->IsInobjectSlackTrackingInProgress();
-}
-
bool MapRef::IsFixedCowArrayMap() const {
AllowHandleDereference allow_handle_dereference;
return *object<Map>() ==
ReadOnlyRoots(broker()->isolate()).fixed_cow_array_map();
}
-bool MapRef::has_prototype_slot() const {
- AllowHandleDereference allow_handle_dereference;
- return object<Map>()->has_prototype_slot();
-}
-
-bool MapRef::is_stable() const {
- AllowHandleDereference allow_handle_dereference;
- return object<Map>()->is_stable();
-}
-
-bool MapRef::CanTransition() const {
- AllowHandleDereference allow_handle_dereference;
- return object<Map>()->CanTransition();
-}
-
MapRef MapRef::FindFieldOwner(int descriptor) const {
AllowHandleAllocation handle_allocation;
AllowHandleDereference allow_handle_dereference;
@@ -522,86 +878,36 @@ MapRef MapRef::FindFieldOwner(int descriptor) const {
return MapRef(broker(), owner);
}
-FieldTypeRef MapRef::GetFieldType(int descriptor) const {
+ObjectRef MapRef::GetFieldType(int descriptor) const {
AllowHandleAllocation handle_allocation;
AllowHandleDereference allow_handle_dereference;
Handle<FieldType> field_type(
object<Map>()->instance_descriptors()->GetFieldType(descriptor),
broker()->isolate());
- return FieldTypeRef(broker(), field_type);
-}
-
-ElementsKind JSArrayRef::GetElementsKind() const {
- AllowHandleDereference allow_handle_dereference;
- return object<JSArray>()->GetElementsKind();
-}
-
-ObjectRef JSArrayRef::length() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return ObjectRef(broker(),
- handle(object<JSArray>()->length(), broker()->isolate()));
-}
-
-int StringRef::length() const {
- AllowHandleDereference allow_handle_dereference;
- return object<String>()->length();
+ return ObjectRef(broker(), field_type);
}
uint16_t StringRef::GetFirstChar() {
- AllowHandleDereference allow_handle_dereference;
- return object<String>()->Get(0);
-}
-
-double StringRef::ToNumber() {
- AllowHandleDereference allow_handle_dereference;
- AllowHandleAllocation allow_handle_allocation;
- AllowHeapAllocation allow_heap_allocation;
- int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_BINARY;
- return StringToDouble(broker()->isolate(),
- broker()->isolate()->unicode_cache(), object<String>(),
- flags);
-}
-
-ObjectRef JSRegExpRef::raw_properties_or_hash() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return ObjectRef(broker(),
- handle(object<JSRegExp>()->raw_properties_or_hash(),
- broker()->isolate()));
-}
-
-ObjectRef JSRegExpRef::data() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return ObjectRef(broker(),
- handle(object<JSRegExp>()->data(), broker()->isolate()));
-}
-
-ObjectRef JSRegExpRef::source() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return ObjectRef(broker(),
- handle(object<JSRegExp>()->source(), broker()->isolate()));
-}
-
-ObjectRef JSRegExpRef::flags() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return ObjectRef(broker(),
- handle(object<JSRegExp>()->flags(), broker()->isolate()));
-}
-
-ObjectRef JSRegExpRef::last_index() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return ObjectRef(
- broker(), handle(object<JSRegExp>()->last_index(), broker()->isolate()));
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ return object<String>()->Get(0);
+ } else {
+ return data()->AsString()->first_char;
+ }
}
-int FixedArrayBaseRef::length() const {
- AllowHandleDereference allow_handle_dereference;
- return object<FixedArrayBase>()->length();
+base::Optional<double> StringRef::ToNumber() {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ AllowHandleAllocation allow_handle_allocation;
+ AllowHeapAllocation allow_heap_allocation;
+ int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_BINARY;
+ return StringToDouble(broker()->isolate(),
+ broker()->isolate()->unicode_cache(),
+ object<String>(), flags);
+ } else {
+ return data()->AsString()->to_number;
+ }
}
bool FixedArrayRef::is_the_hole(int i) const {
@@ -626,166 +932,139 @@ double FixedDoubleArrayRef::get_scalar(int i) const {
return object<FixedDoubleArray>()->get_scalar(i);
}
-int ScopeInfoRef::ContextLength() const {
- AllowHandleDereference allow_handle_dereference;
- return object<ScopeInfo>()->ContextLength();
-}
+#define IF_BROKER_DISABLED_ACCESS_HANDLE_C(holder, name) \
+ if (broker()->mode() == JSHeapBroker::kDisabled) { \
+ AllowHandleAllocation handle_allocation; \
+ AllowHandleDereference allow_handle_dereference; \
+ return object<holder>()->name(); \
+ }
-int SharedFunctionInfoRef::internal_formal_parameter_count() const {
- AllowHandleDereference allow_handle_dereference;
- return object<SharedFunctionInfo>()->internal_formal_parameter_count();
-}
+// Macros for definining a const getter that, depending on the broker mode,
+// either looks into the handle or into the serialized data. The first one is
+// used for the rare case of a XYZRef class that does not have a corresponding
+// XYZ class in objects.h. The second one is used otherwise.
+#define BIMODAL_ACCESSOR(holder, result, name) \
+ result##Ref holder##Ref::name() const { \
+ if (broker()->mode() == JSHeapBroker::kDisabled) { \
+ AllowHandleAllocation handle_allocation; \
+ AllowHandleDereference allow_handle_dereference; \
+ return result##Ref( \
+ broker(), handle(object<holder>()->name(), broker()->isolate())); \
+ } else { \
+ return result##Ref(data()->As##holder()->name); \
+ } \
+ }
-int SharedFunctionInfoRef::function_map_index() const {
- AllowHandleDereference allow_handle_dereference;
- return object<SharedFunctionInfo>()->function_map_index();
-}
+// Like HANDLE_ACCESSOR except that the result type is not an XYZRef.
+#define BIMODAL_ACCESSOR_C(holder, result, name) \
+ result holder##Ref::name() const { \
+ IF_BROKER_DISABLED_ACCESS_HANDLE_C(holder, name); \
+ return data()->As##holder()->name; \
+ }
-bool SharedFunctionInfoRef::has_duplicate_parameters() const {
- AllowHandleDereference allow_handle_dereference;
- return object<SharedFunctionInfo>()->has_duplicate_parameters();
-}
+// Like HANDLE_ACCESSOR_C but for BitFields.
+#define BIMODAL_ACCESSOR_B(holder, field, name, BitField) \
+ typename BitField::FieldType holder##Ref::name() const { \
+ IF_BROKER_DISABLED_ACCESS_HANDLE_C(holder, name); \
+ return BitField::decode(data()->As##holder()->field); \
+ }
-FunctionKind SharedFunctionInfoRef::kind() const {
- AllowHandleDereference allow_handle_dereference;
- return object<SharedFunctionInfo>()->kind();
-}
+// Macros for definining a const getter that always looks into the handle.
+// (These will go away once we serialize everything.) The first one is used for
+// the rare case of a XYZRef class that does not have a corresponding XYZ class
+// in objects.h. The second one is used otherwise.
+#define HANDLE_ACCESSOR(holder, result, name) \
+ result##Ref holder##Ref::name() const { \
+ AllowHandleAllocation handle_allocation; \
+ AllowHandleDereference allow_handle_dereference; \
+ return result##Ref(broker(), \
+ handle(object<holder>()->name(), broker()->isolate())); \
+ }
-LanguageMode SharedFunctionInfoRef::language_mode() {
- AllowHandleDereference allow_handle_dereference;
- return object<SharedFunctionInfo>()->language_mode();
-}
+// Like HANDLE_ACCESSOR except that the result type is not an XYZRef.
+#define HANDLE_ACCESSOR_C(holder, result, name) \
+ result holder##Ref::name() const { \
+ AllowHandleAllocation handle_allocation; \
+ AllowHandleDereference allow_handle_dereference; \
+ return object<holder>()->name(); \
+ }
-bool SharedFunctionInfoRef::native() const {
- AllowHandleDereference allow_handle_dereference;
- return object<SharedFunctionInfo>()->native();
-}
+BIMODAL_ACCESSOR(AllocationSite, Object, nested_site)
+BIMODAL_ACCESSOR_C(AllocationSite, bool, CanInlineCall)
+BIMODAL_ACCESSOR_C(AllocationSite, bool, PointsToLiteral)
+BIMODAL_ACCESSOR_C(AllocationSite, ElementsKind, GetElementsKind)
+BIMODAL_ACCESSOR_C(AllocationSite, PretenureFlag, GetPretenureMode)
-bool SharedFunctionInfoRef::HasBreakInfo() const {
- AllowHandleDereference allow_handle_dereference;
- return object<SharedFunctionInfo>()->HasBreakInfo();
-}
+BIMODAL_ACCESSOR_C(BytecodeArray, int, register_count)
-bool SharedFunctionInfoRef::HasBuiltinId() const {
- AllowHandleDereference allow_handle_dereference;
- return object<SharedFunctionInfo>()->HasBuiltinId();
-}
+BIMODAL_ACCESSOR_C(FixedArrayBase, int, length)
-int SharedFunctionInfoRef::builtin_id() const {
- AllowHandleDereference allow_handle_dereference;
- return object<SharedFunctionInfo>()->builtin_id();
-}
+BIMODAL_ACCESSOR(HeapObject, Map, map)
+HANDLE_ACCESSOR_C(HeapObject, bool, IsExternalString)
+HANDLE_ACCESSOR_C(HeapObject, bool, IsSeqString)
-bool SharedFunctionInfoRef::construct_as_builtin() const {
- AllowHandleDereference allow_handle_dereference;
- return object<SharedFunctionInfo>()->construct_as_builtin();
-}
+HANDLE_ACCESSOR_C(HeapNumber, double, value)
-bool SharedFunctionInfoRef::HasBytecodeArray() const {
- AllowHandleDereference allow_handle_dereference;
- return object<SharedFunctionInfo>()->HasBytecodeArray();
-}
+HANDLE_ACCESSOR(JSArray, Object, length)
-int SharedFunctionInfoRef::GetBytecodeArrayRegisterCount() const {
- AllowHandleDereference allow_handle_dereference;
- return object<SharedFunctionInfo>()->GetBytecodeArray()->register_count();
-}
+BIMODAL_ACCESSOR_C(JSFunction, bool, has_prototype)
+BIMODAL_ACCESSOR_C(JSFunction, bool, PrototypeRequiresRuntimeLookup)
+BIMODAL_ACCESSOR(JSFunction, Map, initial_map)
+BIMODAL_ACCESSOR(JSFunction, Object, prototype)
+HANDLE_ACCESSOR_C(JSFunction, bool, IsConstructor)
+HANDLE_ACCESSOR(JSFunction, JSGlobalProxy, global_proxy)
+HANDLE_ACCESSOR(JSFunction, SharedFunctionInfo, shared)
-MapRef NativeContextRef::fast_aliased_arguments_map() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return MapRef(broker(),
- handle(object<Context>()->fast_aliased_arguments_map(),
- broker()->isolate()));
-}
+HANDLE_ACCESSOR(JSObject, FixedArrayBase, elements)
-MapRef NativeContextRef::sloppy_arguments_map() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return MapRef(broker(), handle(object<Context>()->sloppy_arguments_map(),
- broker()->isolate()));
-}
+HANDLE_ACCESSOR(JSRegExp, Object, data)
+HANDLE_ACCESSOR(JSRegExp, Object, flags)
+HANDLE_ACCESSOR(JSRegExp, Object, last_index)
+HANDLE_ACCESSOR(JSRegExp, Object, raw_properties_or_hash)
+HANDLE_ACCESSOR(JSRegExp, Object, source)
-MapRef NativeContextRef::strict_arguments_map() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return MapRef(broker(), handle(object<Context>()->strict_arguments_map(),
- broker()->isolate()));
-}
+BIMODAL_ACCESSOR_B(Map, bit_field2, elements_kind, Map::ElementsKindBits)
+BIMODAL_ACCESSOR_B(Map, bit_field3, is_deprecated, Map::IsDeprecatedBit)
+BIMODAL_ACCESSOR_B(Map, bit_field3, is_dictionary_map, Map::IsDictionaryMapBit)
+BIMODAL_ACCESSOR_B(Map, bit_field, has_prototype_slot, Map::HasPrototypeSlotBit)
+BIMODAL_ACCESSOR_C(Map, int, instance_size)
+HANDLE_ACCESSOR_C(Map, bool, CanBeDeprecated)
+HANDLE_ACCESSOR_C(Map, bool, CanTransition)
+HANDLE_ACCESSOR_C(Map, bool, IsInobjectSlackTrackingInProgress)
+HANDLE_ACCESSOR_C(Map, bool, IsJSArrayMap)
+HANDLE_ACCESSOR_C(Map, bool, is_stable)
+HANDLE_ACCESSOR_C(Map, InstanceType, instance_type)
+HANDLE_ACCESSOR_C(Map, int, GetInObjectProperties)
+HANDLE_ACCESSOR_C(Map, int, GetInObjectPropertiesStartInWords)
+HANDLE_ACCESSOR_C(Map, int, NumberOfOwnDescriptors)
+HANDLE_ACCESSOR(Map, Object, constructor_or_backpointer)
-MapRef NativeContextRef::js_array_fast_elements_map_index() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return MapRef(broker(),
- handle(object<Context>()->js_array_fast_elements_map_index(),
- broker()->isolate()));
-}
+HANDLE_ACCESSOR_C(MutableHeapNumber, double, value)
-MapRef NativeContextRef::initial_array_iterator_map() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return MapRef(broker(),
- handle(object<Context>()->initial_array_iterator_map(),
- broker()->isolate()));
-}
+#define DEF_NATIVE_CONTEXT_ACCESSOR(type, name) \
+ BIMODAL_ACCESSOR(NativeContext, type, name)
+BROKER_NATIVE_CONTEXT_FIELDS(DEF_NATIVE_CONTEXT_ACCESSOR)
+#undef DEF_NATIVE_CONTEXT_ACCESSOR
-MapRef NativeContextRef::set_value_iterator_map() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return MapRef(broker(), handle(object<Context>()->set_value_iterator_map(),
- broker()->isolate()));
-}
+HANDLE_ACCESSOR(PropertyCell, Object, value)
+HANDLE_ACCESSOR_C(PropertyCell, PropertyDetails, property_details)
-MapRef NativeContextRef::set_key_value_iterator_map() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return MapRef(broker(),
- handle(object<Context>()->set_key_value_iterator_map(),
- broker()->isolate()));
-}
+HANDLE_ACCESSOR_C(ScopeInfo, int, ContextLength)
-MapRef NativeContextRef::map_key_iterator_map() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return MapRef(broker(), handle(object<Context>()->map_key_iterator_map(),
- broker()->isolate()));
-}
+BIMODAL_ACCESSOR_C(SharedFunctionInfo, int, builtin_id)
+BIMODAL_ACCESSOR(SharedFunctionInfo, BytecodeArray, GetBytecodeArray)
+#define DEF_SFI_ACCESSOR(type, name) \
+ BIMODAL_ACCESSOR_C(SharedFunctionInfo, type, name)
+BROKER_SFI_FIELDS(DEF_SFI_ACCESSOR)
+#undef DEF_SFI_ACCESSOR
-MapRef NativeContextRef::map_value_iterator_map() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return MapRef(broker(), handle(object<Context>()->map_value_iterator_map(),
- broker()->isolate()));
-}
+BIMODAL_ACCESSOR_C(String, int, length)
-MapRef NativeContextRef::map_key_value_iterator_map() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return MapRef(broker(),
- handle(object<Context>()->map_key_value_iterator_map(),
- broker()->isolate()));
-}
+// TODO(neis): Provide StringShape() on StringRef.
-MapRef NativeContextRef::iterator_result_map() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return MapRef(broker(), handle(object<Context>()->iterator_result_map(),
- broker()->isolate()));
-}
-
-MapRef NativeContextRef::string_iterator_map() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return MapRef(broker(), handle(object<Context>()->string_iterator_map(),
- broker()->isolate()));
-}
-
-MapRef NativeContextRef::promise_function_initial_map() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return MapRef(broker(),
- handle(object<Context>()->promise_function()->initial_map(),
- broker()->isolate()));
+bool JSFunctionRef::has_initial_map() const {
+ IF_BROKER_DISABLED_ACCESS_HANDLE_C(JSFunction, has_initial_map);
+ return data()->AsJSFunction()->initial_map != nullptr;
}
MapRef NativeContextRef::GetFunctionMapFromIndex(int index) const {
@@ -794,6 +1073,25 @@ MapRef NativeContextRef::GetFunctionMapFromIndex(int index) const {
return get(index).AsMap();
}
+MapRef NativeContextRef::GetInitialJSArrayMap(ElementsKind kind) const {
+ switch (kind) {
+ case PACKED_SMI_ELEMENTS:
+ return js_array_packed_smi_elements_map();
+ case HOLEY_SMI_ELEMENTS:
+ return js_array_holey_smi_elements_map();
+ case PACKED_DOUBLE_ELEMENTS:
+ return js_array_packed_double_elements_map();
+ case HOLEY_DOUBLE_ELEMENTS:
+ return js_array_holey_double_elements_map();
+ case PACKED_ELEMENTS:
+ return js_array_packed_elements_map();
+ case HOLEY_ELEMENTS:
+ return js_array_holey_elements_map();
+ default:
+ UNREACHABLE();
+ }
+}
+
bool ObjectRef::BooleanValue() {
AllowHandleDereference allow_handle_dereference;
return object<Object>()->BooleanValue(broker()->isolate());
@@ -831,18 +1129,70 @@ CellRef ModuleRef::GetCell(int cell_index) {
broker()->isolate()));
}
-ObjectRef PropertyCellRef::value() const {
- AllowHandleAllocation allow_handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return ObjectRef(
- broker(), handle(object<PropertyCell>()->value(), broker()->isolate()));
+ObjectRef::ObjectRef(JSHeapBroker* broker, Handle<Object> object) {
+ switch (broker->mode()) {
+ case JSHeapBroker::kSerialized:
+ data_ = FLAG_strict_heap_broker ? broker->GetData(object)
+ : broker->GetOrCreateData(object);
+ break;
+ case JSHeapBroker::kSerializing:
+ data_ = broker->GetOrCreateData(object);
+ break;
+ case JSHeapBroker::kDisabled:
+ data_ = broker->GetData(object);
+ if (data_ == nullptr) {
+ AllowHandleDereference handle_dereference;
+ data_ =
+ new (broker->zone()) ObjectData(broker, object, object->IsSmi());
+ }
+ break;
+ }
+ CHECK_NOT_NULL(data_);
}
-PropertyDetails PropertyCellRef::property_details() const {
- AllowHandleDereference allow_handle_dereference;
- return object<PropertyCell>()->property_details();
+base::Optional<JSObjectRef> AllocationSiteRef::boilerplate() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return JSObjectRef(broker(), handle(object<AllocationSite>()->boilerplate(),
+ broker()->isolate()));
+ } else {
+ JSObjectData* boilerplate = data()->AsAllocationSite()->boilerplate;
+ if (boilerplate) {
+ return JSObjectRef(boilerplate);
+ } else {
+ return base::nullopt;
+ }
+ }
+}
+
+ElementsKind JSObjectRef::GetElementsKind() const {
+ return map().elements_kind();
}
+Handle<Object> ObjectRef::object() const { return data_->object; }
+
+JSHeapBroker* ObjectRef::broker() const { return data_->broker; }
+
+ObjectData* ObjectRef::data() const { return data_; }
+
+Reduction NoChangeBecauseOfMissingData(JSHeapBroker* broker,
+ const char* function, int line) {
+ if (FLAG_trace_heap_broker) {
+ PrintF("[%p] Skipping optimization in %s at line %d due to missing data\n",
+ broker, function, line);
+ }
+ return AdvancedReducer::NoChange();
+}
+
+#undef BIMODAL_ACCESSOR
+#undef BIMODAL_ACCESSOR_B
+#undef BIMODAL_ACCESSOR_C
+#undef GET_OR_CREATE
+#undef HANDLE_ACCESSOR
+#undef HANDLE_ACCESSOR_C
+#undef IF_BROKER_DISABLED_ACCESS_HANDLE_C
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-heap-broker.h b/deps/v8/src/compiler/js-heap-broker.h
index 8503e82d12..7ea12ee733 100644
--- a/deps/v8/src/compiler/js-heap-broker.h
+++ b/deps/v8/src/compiler/js-heap-broker.h
@@ -9,18 +9,12 @@
#include "src/base/optional.h"
#include "src/globals.h"
#include "src/objects.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
namespace compiler {
-class DisallowHeapAccess {
- DisallowHeapAllocation no_heap_allocation_;
- DisallowHandleAllocation no_handle_allocation_;
- DisallowHandleDereference no_handle_dereference_;
- DisallowCodeDependencyChange no_dependency_change_;
-};
-
enum class OddballType : uint8_t {
kNone, // Not an Oddball.
kBoolean, // True or False.
@@ -31,6 +25,7 @@ enum class OddballType : uint8_t {
kOther // Oddball, but none of the above.
};
+// TODO(neis): Get rid of the HeapObjectType class.
class HeapObjectType {
public:
enum Flag : uint8_t { kUndetectable = 1 << 0, kCallable = 1 << 1 };
@@ -59,49 +54,63 @@ class HeapObjectType {
Flags const flags_;
};
+// This list is sorted such that subtypes appear before their supertypes.
+// DO NOT VIOLATE THIS PROPERTY!
#define HEAP_BROKER_OBJECT_LIST(V) \
+ /* Subtypes of JSObject */ \
+ V(JSArray) \
+ V(JSFunction) \
+ V(JSGlobalProxy) \
+ V(JSRegExp) \
+ /* Subtypes of Context */ \
+ V(NativeContext) \
+ /* Subtypes of FixedArrayBase */ \
+ V(BytecodeArray) \
+ V(FixedArray) \
+ V(FixedDoubleArray) \
+ /* Subtypes of Name */ \
+ V(InternalizedString) \
+ V(String) \
+ /* Subtypes of HeapObject */ \
V(AllocationSite) \
V(Cell) \
V(Code) \
- V(Context) \
V(FeedbackVector) \
- V(FixedArray) \
+ V(Map) \
+ V(Module) \
+ V(ScopeInfo) \
+ V(ScriptContextTable) \
+ V(SharedFunctionInfo) \
+ V(Context) \
V(FixedArrayBase) \
- V(FixedDoubleArray) \
V(HeapNumber) \
- V(HeapObject) \
- V(InternalizedString) \
- V(JSArray) \
- V(JSFunction) \
- V(JSGlobalProxy) \
V(JSObject) \
- V(JSRegExp) \
- V(Map) \
- V(Module) \
V(MutableHeapNumber) \
V(Name) \
- V(NativeContext) \
V(PropertyCell) \
- V(ScopeInfo) \
- V(ScriptContextTable) \
- V(SharedFunctionInfo) \
- V(String)
+ /* Subtypes of Object */ \
+ V(HeapObject)
class CompilationDependencies;
class JSHeapBroker;
+class ObjectData;
#define FORWARD_DECL(Name) class Name##Ref;
HEAP_BROKER_OBJECT_LIST(FORWARD_DECL)
#undef FORWARD_DECL
class ObjectRef {
public:
- explicit ObjectRef(const JSHeapBroker* broker, Handle<Object> object)
- : broker_(broker), object_(object) {}
+ ObjectRef(JSHeapBroker* broker, Handle<Object> object);
+ explicit ObjectRef(ObjectData* data) : data_(data) { CHECK_NOT_NULL(data_); }
+
+ bool equals(const ObjectRef& other) const;
+ Handle<Object> object() const;
+ // TODO(neis): Remove eventually.
template <typename T>
Handle<T> object() const {
AllowHandleDereference handle_dereference;
- return Handle<T>::cast(object_);
+ return Handle<T>::cast(object());
}
OddballType oddball_type() const;
@@ -109,8 +118,6 @@ class ObjectRef {
bool IsSmi() const;
int AsSmi() const;
- bool equals(const ObjectRef& other) const;
-
#define HEAP_IS_METHOD_DECL(Name) bool Is##Name() const;
HEAP_BROKER_OBJECT_LIST(HEAP_IS_METHOD_DECL)
#undef HEAP_IS_METHOD_DECL
@@ -123,17 +130,14 @@ class ObjectRef {
bool BooleanValue();
double OddballToNumber() const;
+ Isolate* isolate() const;
+
protected:
- const JSHeapBroker* broker() const { return broker_; }
+ JSHeapBroker* broker() const;
+ ObjectData* data() const;
private:
- const JSHeapBroker* broker_;
- Handle<Object> object_;
-};
-
-class FieldTypeRef : public ObjectRef {
- public:
- using ObjectRef::ObjectRef;
+ ObjectData* data_;
};
class HeapObjectRef : public ObjectRef {
@@ -165,30 +169,22 @@ class JSObjectRef : public HeapObjectRef {
FixedArrayBaseRef elements() const;
void EnsureElementsTenured();
- ElementsKind GetElementsKind();
-};
-
-struct SlackTrackingResult {
- SlackTrackingResult(int instance_sizex, int inobject_property_countx)
- : instance_size(instance_sizex),
- inobject_property_count(inobject_property_countx) {}
- int instance_size;
- int inobject_property_count;
+ ElementsKind GetElementsKind() const;
};
class JSFunctionRef : public JSObjectRef {
public:
using JSObjectRef::JSObjectRef;
- bool HasBuiltinFunctionId() const;
- BuiltinFunctionId GetBuiltinFunctionId() const;
bool IsConstructor() const;
bool has_initial_map() const;
MapRef initial_map() const;
+ bool has_prototype() const;
+ ObjectRef prototype() const;
+ bool PrototypeRequiresRuntimeLookup() const;
JSGlobalProxyRef global_proxy() const;
- SlackTrackingResult FinishSlackTracking() const;
+ int InitialMapInstanceSizeWithMinSlack() const;
SharedFunctionInfoRef shared() const;
- void EnsureHasInitialMap() const;
};
class JSRegExpRef : public JSObjectRef {
@@ -224,27 +220,39 @@ class ContextRef : public HeapObjectRef {
ObjectRef get(int index) const;
};
+#define BROKER_NATIVE_CONTEXT_FIELDS(V) \
+ V(JSFunction, array_function) \
+ V(JSFunction, object_function) \
+ V(JSFunction, promise_function) \
+ V(Map, fast_aliased_arguments_map) \
+ V(Map, initial_array_iterator_map) \
+ V(Map, iterator_result_map) \
+ V(Map, js_array_holey_double_elements_map) \
+ V(Map, js_array_holey_elements_map) \
+ V(Map, js_array_holey_smi_elements_map) \
+ V(Map, js_array_packed_double_elements_map) \
+ V(Map, js_array_packed_elements_map) \
+ V(Map, js_array_packed_smi_elements_map) \
+ V(Map, map_key_iterator_map) \
+ V(Map, map_key_value_iterator_map) \
+ V(Map, map_value_iterator_map) \
+ V(Map, set_key_value_iterator_map) \
+ V(Map, set_value_iterator_map) \
+ V(Map, sloppy_arguments_map) \
+ V(Map, strict_arguments_map) \
+ V(Map, string_iterator_map) \
+ V(ScriptContextTable, script_context_table)
+
class NativeContextRef : public ContextRef {
public:
using ContextRef::ContextRef;
- ScriptContextTableRef script_context_table() const;
-
- MapRef fast_aliased_arguments_map() const;
- MapRef sloppy_arguments_map() const;
- MapRef strict_arguments_map() const;
- MapRef js_array_fast_elements_map_index() const;
- MapRef initial_array_iterator_map() const;
- MapRef set_value_iterator_map() const;
- MapRef set_key_value_iterator_map() const;
- MapRef map_key_iterator_map() const;
- MapRef map_value_iterator_map() const;
- MapRef map_key_value_iterator_map() const;
- MapRef iterator_result_map() const;
- MapRef string_iterator_map() const;
- MapRef promise_function_initial_map() const;
+#define DECL_ACCESSOR(type, name) type##Ref name() const;
+ BROKER_NATIVE_CONTEXT_FIELDS(DECL_ACCESSOR)
+#undef DECL_ACCESSOR
MapRef GetFunctionMapFromIndex(int index) const;
+ MapRef GetInitialJSArrayMap(ElementsKind kind) const;
};
class NameRef : public HeapObjectRef {
@@ -276,12 +284,21 @@ class AllocationSiteRef : public HeapObjectRef {
public:
using HeapObjectRef::HeapObjectRef;
- JSObjectRef boilerplate() const;
+ bool PointsToLiteral() const;
PretenureFlag GetPretenureMode() const;
- bool IsFastLiteral() const;
ObjectRef nested_site() const;
- bool PointsToLiteral() const;
+
+ // {IsFastLiteral} determines whether the given array or object literal
+ // boilerplate satisfies all limits to be considered for fast deep-copying
+ // and computes the total size of all objects that are part of the graph.
+ //
+ // If PointsToLiteral() is false, then IsFastLiteral() is also false.
+ bool IsFastLiteral() const;
+ // We only serialize boilerplate if IsFastLiteral is true.
+ base::Optional<JSObjectRef> boilerplate() const;
+
ElementsKind GetElementsKind() const;
+ bool CanInlineCall() const;
};
class MapRef : public HeapObjectRef {
@@ -291,26 +308,30 @@ class MapRef : public HeapObjectRef {
int instance_size() const;
InstanceType instance_type() const;
int GetInObjectProperties() const;
+ int GetInObjectPropertiesStartInWords() const;
int NumberOfOwnDescriptors() const;
- PropertyDetails GetPropertyDetails(int i) const;
- NameRef GetPropertyKey(int i) const;
- FieldIndex GetFieldIndexFor(int i) const;
int GetInObjectPropertyOffset(int index) const;
ElementsKind elements_kind() const;
- ObjectRef constructor_or_backpointer() const;
bool is_stable() const;
bool has_prototype_slot() const;
bool is_deprecated() const;
bool CanBeDeprecated() const;
bool CanTransition() const;
bool IsInobjectSlackTrackingInProgress() const;
- MapRef FindFieldOwner(int descriptor) const;
bool is_dictionary_map() const;
bool IsJSArrayMap() const;
bool IsFixedCowArrayMap() const;
+ ObjectRef constructor_or_backpointer() const;
+
+ base::Optional<MapRef> AsElementsKind(ElementsKind kind) const;
+
// Concerning the underlying instance_descriptors:
- FieldTypeRef GetFieldType(int descriptor) const;
+ MapRef FindFieldOwner(int descriptor) const;
+ PropertyDetails GetPropertyDetails(int i) const;
+ NameRef GetPropertyKey(int i) const;
+ FieldIndex GetFieldIndexFor(int i) const;
+ ObjectRef GetFieldType(int descriptor) const;
};
class FixedArrayBaseRef : public HeapObjectRef {
@@ -336,11 +357,17 @@ class FixedDoubleArrayRef : public FixedArrayBaseRef {
bool is_the_hole(int i) const;
};
+class BytecodeArrayRef : public FixedArrayBaseRef {
+ public:
+ using FixedArrayBaseRef::FixedArrayBaseRef;
+
+ int register_count() const;
+};
+
class JSArrayRef : public JSObjectRef {
public:
using JSObjectRef::JSObjectRef;
- ElementsKind GetElementsKind() const;
ObjectRef length() const;
};
@@ -351,22 +378,29 @@ class ScopeInfoRef : public HeapObjectRef {
int ContextLength() const;
};
+#define BROKER_SFI_FIELDS(V) \
+ V(int, internal_formal_parameter_count) \
+ V(bool, has_duplicate_parameters) \
+ V(int, function_map_index) \
+ V(FunctionKind, kind) \
+ V(LanguageMode, language_mode) \
+ V(bool, native) \
+ V(bool, HasBreakInfo) \
+ V(bool, HasBuiltinFunctionId) \
+ V(bool, HasBuiltinId) \
+ V(BuiltinFunctionId, builtin_function_id) \
+ V(bool, construct_as_builtin) \
+ V(bool, HasBytecodeArray)
+
class SharedFunctionInfoRef : public HeapObjectRef {
public:
using HeapObjectRef::HeapObjectRef;
- int internal_formal_parameter_count() const;
- bool has_duplicate_parameters() const;
- int function_map_index() const;
- FunctionKind kind() const;
- LanguageMode language_mode();
- bool native() const;
- bool HasBreakInfo() const;
- bool HasBuiltinId() const;
int builtin_id() const;
- bool construct_as_builtin() const;
- bool HasBytecodeArray() const;
- int GetBytecodeArrayRegisterCount() const;
+ BytecodeArrayRef GetBytecodeArray() const;
+#define DECL_ACCESSOR(type, name) type name() const;
+ BROKER_SFI_FIELDS(DECL_ACCESSOR)
+#undef DECL_ACCSESOR
};
class StringRef : public NameRef {
@@ -375,7 +409,7 @@ class StringRef : public NameRef {
int length() const;
uint16_t GetFirstChar();
- double ToNumber();
+ base::Optional<double> ToNumber();
};
class ModuleRef : public HeapObjectRef {
@@ -407,23 +441,59 @@ class InternalizedStringRef : public StringRef {
class V8_EXPORT_PRIVATE JSHeapBroker : public NON_EXPORTED_BASE(ZoneObject) {
public:
- JSHeapBroker(Isolate* isolate);
+ JSHeapBroker(Isolate* isolate, Zone* zone);
+ void SerializeStandardObjects();
HeapObjectType HeapObjectTypeFromMap(Handle<Map> map) const {
AllowHandleDereference handle_dereference;
return HeapObjectTypeFromMap(*map);
}
- static base::Optional<int> TryGetSmi(Handle<Object> object);
-
Isolate* isolate() const { return isolate_; }
+ Zone* zone() const { return zone_; }
+
+ enum BrokerMode { kDisabled, kSerializing, kSerialized };
+ BrokerMode mode() const { return mode_; }
+ void StopSerializing() {
+ CHECK_EQ(mode_, kSerializing);
+ mode_ = kSerialized;
+ }
+ bool SerializingAllowed() const;
+
+ // Returns nullptr iff handle unknown.
+ ObjectData* GetData(Handle<Object>) const;
+ // Never returns nullptr.
+ ObjectData* GetOrCreateData(Handle<Object>);
+
+ void Trace(const char* format, ...) const;
private:
friend class HeapObjectRef;
+ friend class ObjectRef;
+ friend class ObjectData;
+
+ // TODO(neis): Remove eventually.
HeapObjectType HeapObjectTypeFromMap(Map* map) const;
+ void AddData(Handle<Object> object, ObjectData* data);
+
Isolate* const isolate_;
-};
+ Zone* const zone_;
+ ZoneUnorderedMap<Address, ObjectData*> refs_;
+ BrokerMode mode_;
+};
+
+#define ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(something_var, \
+ optionally_something) \
+ auto optionally_something_ = optionally_something; \
+ if (!optionally_something_) \
+ return NoChangeBecauseOfMissingData(js_heap_broker(), __FUNCTION__, \
+ __LINE__); \
+ something_var = *optionally_something_;
+
+class Reduction;
+Reduction NoChangeBecauseOfMissingData(JSHeapBroker* broker,
+ const char* function, int line);
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/js-heap-copy-reducer.cc b/deps/v8/src/compiler/js-heap-copy-reducer.cc
new file mode 100644
index 0000000000..0bcc662771
--- /dev/null
+++ b/deps/v8/src/compiler/js-heap-copy-reducer.cc
@@ -0,0 +1,85 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-heap-copy-reducer.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/js-heap-broker.h"
+#include "src/compiler/js-operator.h"
+#include "src/heap/factory-inl.h"
+#include "src/objects/map.h"
+#include "src/objects/scope-info.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// In the functions below, we call the ObjectRef (or subclass) constructor in
+// order to trigger serialization if not yet done.
+
+JSHeapCopyReducer::JSHeapCopyReducer(JSHeapBroker* broker) : broker_(broker) {}
+
+JSHeapBroker* JSHeapCopyReducer::broker() { return broker_; }
+
+Reduction JSHeapCopyReducer::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kHeapConstant: {
+ ObjectRef(broker(), HeapConstantOf(node->op()));
+ break;
+ }
+ case IrOpcode::kJSCreateArray: {
+ CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
+ Handle<AllocationSite> site;
+ if (p.site().ToHandle(&site)) AllocationSiteRef(broker(), site);
+ break;
+ }
+ case IrOpcode::kJSCreateCatchContext: {
+ ScopeInfoRef(broker(), ScopeInfoOf(node->op()));
+ break;
+ }
+ case IrOpcode::kJSCreateClosure: {
+ CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
+ SharedFunctionInfoRef(broker(), p.shared_info());
+ HeapObjectRef(broker(), p.feedback_cell());
+ HeapObjectRef(broker(), p.code());
+ break;
+ }
+ case IrOpcode::kJSCreateEmptyLiteralArray: {
+ // TODO(neis, jarin) Force serialization of the entire feedback vector
+ // rather than just the one element.
+ FeedbackParameter const& p = FeedbackParameterOf(node->op());
+ FeedbackVectorRef(broker(), p.feedback().vector());
+ Handle<Object> feedback(
+ p.feedback().vector()->Get(p.feedback().slot())->ToObject(),
+ broker()->isolate());
+ ObjectRef(broker(), feedback);
+ break;
+ }
+ case IrOpcode::kJSCreateFunctionContext: {
+ CreateFunctionContextParameters const& p =
+ CreateFunctionContextParametersOf(node->op());
+ ScopeInfoRef(broker(), p.scope_info());
+ break;
+ }
+ case IrOpcode::kJSCreateLiteralArray:
+ case IrOpcode::kJSCreateLiteralObject: {
+ CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
+ ObjectRef(broker(), p.feedback().vector());
+ break;
+ }
+ case IrOpcode::kJSLoadNamed:
+ case IrOpcode::kJSStoreNamed: {
+ NamedAccess const& p = NamedAccessOf(node->op());
+ NameRef(broker(), p.name());
+ break;
+ }
+ default:
+ break;
+ }
+ return NoChange();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/js-heap-copy-reducer.h b/deps/v8/src/compiler/js-heap-copy-reducer.h
new file mode 100644
index 0000000000..b94b930d78
--- /dev/null
+++ b/deps/v8/src/compiler/js-heap-copy-reducer.h
@@ -0,0 +1,38 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_HEAP_COPY_REDUCER_H_
+#define V8_COMPILER_JS_HEAP_COPY_REDUCER_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class JSHeapBroker;
+
+// The heap copy reducer makes sure that the relevant heap data referenced
+// by handles embedded in the graph is copied to the heap broker.
+// TODO(jarin) This is just a temporary solution until the graph uses only
+// ObjetRef-derived reference to refer to the heap data.
+class JSHeapCopyReducer : public Reducer {
+ public:
+ explicit JSHeapCopyReducer(JSHeapBroker* broker);
+
+ const char* reducer_name() const override { return "JSHeapCopyReducer"; }
+
+ Reduction Reduce(Node* node) override;
+
+ private:
+ JSHeapBroker* broker();
+
+ JSHeapBroker* broker_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_JS_HEAP_COPY_REDUCER_H_
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.h b/deps/v8/src/compiler/js-inlining-heuristic.h
index dc8d70f6ac..68919c9aec 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.h
+++ b/deps/v8/src/compiler/js-inlining-heuristic.h
@@ -34,7 +34,7 @@ class JSInliningHeuristic final : public AdvancedReducer {
void Finalize() final;
private:
- // This limit currently matches what Crankshaft does. We may want to
+ // This limit currently matches what the old compiler did. We may want to
// re-evaluate and come up with a proper limit for TurboFan.
static const int kMaxCallPolymorphism = 4;
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index fcb9e87adb..194e876849 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -15,6 +15,7 @@
#include "src/compiler/operator-properties.h"
#include "src/counters.h"
#include "src/objects-inl.h"
+#include "src/objects/js-generator.h"
namespace v8 {
namespace internal {
@@ -55,10 +56,6 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceIsInstanceType(node, JS_TYPED_ARRAY_TYPE);
case Runtime::kInlineIsJSProxy:
return ReduceIsInstanceType(node, JS_PROXY_TYPE);
- case Runtime::kInlineIsJSWeakMap:
- return ReduceIsInstanceType(node, JS_WEAK_MAP_TYPE);
- case Runtime::kInlineIsJSWeakSet:
- return ReduceIsInstanceType(node, JS_WEAK_SET_TYPE);
case Runtime::kInlineIsJSReceiver:
return ReduceIsJSReceiver(node);
case Runtime::kInlineIsSmi:
@@ -79,19 +76,6 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceToString(node);
case Runtime::kInlineCall:
return ReduceCall(node);
- case Runtime::kInlineGetSuperConstructor:
- return ReduceGetSuperConstructor(node);
- case Runtime::kInlineArrayBufferViewWasNeutered:
- return ReduceArrayBufferViewWasNeutered(node);
- case Runtime::kInlineMaxSmi:
- return ReduceMaxSmi(node);
- case Runtime::kInlineTypedArrayGetLength:
- return ReduceArrayBufferViewField(node,
- AccessBuilder::ForJSTypedArrayLength());
- case Runtime::kInlineTheHole:
- return ReduceTheHole(node);
- case Runtime::kInlineStringMaxLength:
- return ReduceStringMaxLength(node);
default:
break;
}
@@ -321,66 +305,6 @@ Reduction JSIntrinsicLowering::ReduceGetSuperConstructor(Node* node) {
return Changed(node);
}
-Reduction JSIntrinsicLowering::ReduceArrayBufferViewField(
- Node* node, FieldAccess const& access) {
- Node* receiver = NodeProperties::GetValueInput(node, 0);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- // Load the {receiver}s field.
- Node* value = effect = graph()->NewNode(simplified()->LoadField(access),
- receiver, effect, control);
-
- // Check if the {receiver}s buffer was neutered.
- Node* receiver_buffer = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
- receiver, effect, control);
- Node* check = effect = graph()->NewNode(
- simplified()->ArrayBufferWasNeutered(), receiver_buffer, effect, control);
-
- // Default to zero if the {receiver}s buffer was neutered.
- value = graph()->NewNode(
- common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
- check, jsgraph()->ZeroConstant(), value);
-
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
-}
-
-Reduction JSIntrinsicLowering::ReduceArrayBufferViewWasNeutered(Node* node) {
- Node* receiver = NodeProperties::GetValueInput(node, 0);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- // Check if the {receiver}s buffer was neutered.
- Node* receiver_buffer = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
- receiver, effect, control);
- Node* value = effect = graph()->NewNode(
- simplified()->ArrayBufferWasNeutered(), receiver_buffer, effect, control);
-
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
-}
-
-Reduction JSIntrinsicLowering::ReduceMaxSmi(Node* node) {
- Node* value = jsgraph()->Constant(Smi::kMaxValue);
- ReplaceWithValue(node, value);
- return Replace(value);
-}
-
-Reduction JSIntrinsicLowering::ReduceTheHole(Node* node) {
- Node* value = jsgraph()->TheHoleConstant();
- ReplaceWithValue(node, value);
- return Replace(value);
-}
-
-Reduction JSIntrinsicLowering::ReduceStringMaxLength(Node* node) {
- Node* value = jsgraph()->Constant(String::kMaxLength);
- ReplaceWithValue(node, value);
- return Replace(value);
-}
-
Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
Node* b) {
RelaxControls(node);
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.h b/deps/v8/src/compiler/js-intrinsic-lowering.h
index 18fe1248c7..e0a55d7b06 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.h
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.h
@@ -63,18 +63,6 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
Reduction ReduceCall(Node* node);
Reduction ReduceGetSuperConstructor(Node* node);
- // TODO(turbofan): typedarray.js support; drop once TypedArrays are
- // converted to proper CodeStubAssembler based builtins.
- Reduction ReduceArrayBufferViewField(Node* node, FieldAccess const& access);
- Reduction ReduceArrayBufferViewWasNeutered(Node* node);
- Reduction ReduceMaxSmi(Node* node);
-
- // TODO(turbofan): collection.js support; drop once Maps and Sets are
- // converted to proper CodeStubAssembler based builtins.
- Reduction ReduceTheHole(Node* node);
-
- Reduction ReduceStringMaxLength(Node* node);
-
Reduction Change(Node* node, const Operator* op);
Reduction Change(Node* node, const Operator* op, Node* a, Node* b);
Reduction Change(Node* node, const Operator* op, Node* a, Node* b, Node* c);
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index 4c6ea30bae..e35a860be0 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -5,7 +5,7 @@
#include "src/compiler/js-native-context-specialization.h"
#include "src/accessors.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/code-factory.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/access-info.h"
@@ -20,6 +20,8 @@
#include "src/feedback-vector.h"
#include "src/field-index-inl.h"
#include "src/isolate-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/js-array-inl.h"
#include "src/objects/templates.h"
#include "src/vector-slot-pair.h"
@@ -58,9 +60,9 @@ struct JSNativeContextSpecialization::ScriptContextTableLookupResult {
};
JSNativeContextSpecialization::JSNativeContextSpecialization(
- Editor* editor, JSGraph* jsgraph, const JSHeapBroker* js_heap_broker,
- Flags flags, Handle<Context> native_context,
- CompilationDependencies* dependencies, Zone* zone)
+ Editor* editor, JSGraph* jsgraph, JSHeapBroker* js_heap_broker, Flags flags,
+ Handle<Context> native_context, CompilationDependencies* dependencies,
+ Zone* zone)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
js_heap_broker_(js_heap_broker),
@@ -109,6 +111,8 @@ Reduction JSNativeContextSpecialization::Reduce(Node* node) {
return ReduceJSStoreDataPropertyInLiteral(node);
case IrOpcode::kJSStoreInArrayLiteral:
return ReduceJSStoreInArrayLiteral(node);
+ case IrOpcode::kJSToObject:
+ return ReduceJSToObject(node);
default:
break;
}
@@ -408,10 +412,9 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
if (function->IsConstructor() && function->has_prototype_slot() &&
function->has_instance_prototype() &&
function->prototype()->IsJSReceiver()) {
- // Ensure that the {function} has a valid initial map, so we can
- // depend on that for the prototype constant-folding below.
- JSFunction::EnsureHasInitialMap(function);
-
+ // We need {function}'s initial map so that we can depend on it for the
+ // prototype constant-folding below.
+ if (!function->has_initial_map()) return NoChange();
MapRef initial_map = dependencies()->DependOnInitialMap(
JSFunctionRef(js_heap_broker(), function));
Node* prototype = jsgraph()->Constant(
@@ -1100,19 +1103,17 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
if (m.Value()->IsJSFunction() &&
p.name().is_identical_to(factory()->prototype_string())) {
// Optimize "prototype" property of functions.
- Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
- if (function->IsConstructor()) {
- // We need to add a code dependency on the initial map of the
- // {function} in order to be notified about changes to the
- // "prototype" of {function}.
- JSFunction::EnsureHasInitialMap(function);
- dependencies()->DependOnInitialMap(
- JSFunctionRef(js_heap_broker(), function));
- Handle<Object> prototype(function->prototype(), isolate());
- Node* value = jsgraph()->Constant(prototype);
- ReplaceWithValue(node, value);
- return Replace(value);
+ JSFunctionRef function = m.Ref(js_heap_broker()).AsJSFunction();
+ // TODO(neis): Remove the has_prototype_slot condition once the broker is
+ // always enabled.
+ if (!function.map().has_prototype_slot() || !function.has_prototype() ||
+ function.PrototypeRequiresRuntimeLookup()) {
+ return NoChange();
}
+ ObjectRef prototype = dependencies()->DependOnPrototypeProperty(function);
+ Node* value = jsgraph()->Constant(prototype);
+ ReplaceWithValue(node, value);
+ return Replace(value);
} else if (m.Value()->IsString() &&
p.name().is_identical_to(factory()->length_string())) {
// Constant-fold "length" property on constant strings.
@@ -2180,12 +2181,31 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreInArrayLiteral(
store_mode);
}
+Reduction JSNativeContextSpecialization::ReduceJSToObject(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSToObject, node->opcode());
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+
+ for (size_t i = 0; i < receiver_maps.size(); ++i) {
+ if (!receiver_maps[i]->IsJSReceiverMap()) return NoChange();
+ }
+
+ ReplaceWithValue(node, receiver, effect);
+ return Replace(receiver);
+}
+
namespace {
ExternalArrayType GetArrayTypeFromElementsKind(ElementsKind kind) {
switch (kind) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case TYPE##_ELEMENTS: \
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ case TYPE##_ELEMENTS: \
return kExternal##Type##Array;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
@@ -2221,7 +2241,8 @@ JSNativeContextSpecialization::BuildElementAccess(
Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(m.Value());
// Determine the {receiver}s (known) length.
- length = jsgraph()->Constant(typed_array->length_value());
+ length =
+ jsgraph()->Constant(static_cast<double>(typed_array->length_value()));
// Check if the {receiver}s buffer was neutered.
buffer = jsgraph()->HeapConstant(typed_array->GetBuffer());
@@ -2474,7 +2495,7 @@ JSNativeContextSpecialization::BuildElementAccess(
if (access_mode == AccessMode::kLoad) {
// Compute the real element access type, which includes the hole in case
// of holey backing stores.
- if (IsHoleyOrDictionaryElementsKind(elements_kind)) {
+ if (IsHoleyElementsKind(elements_kind)) {
element_access.type =
Type::Union(element_type, Type::Hole(), graph()->zone());
}
@@ -2513,10 +2534,10 @@ JSNativeContextSpecialization::BuildElementAccess(
} else if (elements_kind == HOLEY_DOUBLE_ELEMENTS) {
// Return the signaling NaN hole directly if all uses are
// truncating.
- vtrue = etrue =
- graph()->NewNode(simplified()->CheckFloat64Hole(
- CheckFloat64HoleMode::kAllowReturnHole),
- vtrue, etrue, if_true);
+ vtrue = etrue = graph()->NewNode(
+ simplified()->CheckFloat64Hole(
+ CheckFloat64HoleMode::kAllowReturnHole, VectorSlotPair()),
+ vtrue, etrue, if_true);
}
}
@@ -2564,7 +2585,8 @@ JSNativeContextSpecialization::BuildElementAccess(
mode = CheckFloat64HoleMode::kAllowReturnHole;
}
value = effect = graph()->NewNode(
- simplified()->CheckFloat64Hole(mode), value, effect, control);
+ simplified()->CheckFloat64Hole(mode, VectorSlotPair()), value,
+ effect, control);
}
}
} else {
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index 53fe9e2c11..413e3c191f 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -47,7 +47,7 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
typedef base::Flags<Flag> Flags;
JSNativeContextSpecialization(Editor* editor, JSGraph* jsgraph,
- const JSHeapBroker* js_heap_broker, Flags flags,
+ JSHeapBroker* js_heap_broker, Flags flags,
Handle<Context> native_context,
CompilationDependencies* dependencies,
Zone* zone);
@@ -76,6 +76,7 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
Reduction ReduceJSStoreNamedOwn(Node* node);
Reduction ReduceJSStoreDataPropertyInLiteral(Node* node);
Reduction ReduceJSStoreInArrayLiteral(Node* node);
+ Reduction ReduceJSToObject(Node* node);
Reduction ReduceElementAccess(Node* node, Node* index, Node* value,
MapHandles const& receiver_maps,
@@ -217,7 +218,7 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
- const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
Isolate* isolate() const;
Factory* factory() const;
CommonOperatorBuilder* common() const;
@@ -231,7 +232,7 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
Zone* zone() const { return zone_; }
JSGraph* const jsgraph_;
- const JSHeapBroker* const js_heap_broker_;
+ JSHeapBroker* const js_heap_broker_;
Flags const flags_;
Handle<JSGlobalObject> global_object_;
Handle<JSGlobalProxy> global_proxy_;
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index 5d45bb7f95..57f9950d55 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -363,7 +363,7 @@ CreateArgumentsType const& CreateArgumentsTypeOf(const Operator* op) {
bool operator==(CreateArrayParameters const& lhs,
CreateArrayParameters const& rhs) {
return lhs.arity() == rhs.arity() &&
- lhs.site().location() == rhs.site().location();
+ lhs.site().address() == rhs.site().address();
}
@@ -374,13 +374,14 @@ bool operator!=(CreateArrayParameters const& lhs,
size_t hash_value(CreateArrayParameters const& p) {
- return base::hash_combine(p.arity(), p.site().location());
+ return base::hash_combine(p.arity(), p.site().address());
}
std::ostream& operator<<(std::ostream& os, CreateArrayParameters const& p) {
os << p.arity();
- if (!p.site().is_null()) os << ", " << Brief(*p.site());
+ Handle<AllocationSite> site;
+ if (p.site().ToHandle(&site)) os << ", " << Brief(*site);
return os;
}
@@ -534,6 +535,29 @@ const CreateLiteralParameters& CreateLiteralParametersOf(const Operator* op) {
return OpParameter<CreateLiteralParameters>(op);
}
+bool operator==(CloneObjectParameters const& lhs,
+ CloneObjectParameters const& rhs) {
+ return lhs.feedback() == rhs.feedback() && lhs.flags() == rhs.flags();
+}
+
+bool operator!=(CloneObjectParameters const& lhs,
+ CloneObjectParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(CloneObjectParameters const& p) {
+ return base::hash_combine(p.feedback(), p.flags());
+}
+
+std::ostream& operator<<(std::ostream& os, CloneObjectParameters const& p) {
+ return os << p.flags();
+}
+
+const CloneObjectParameters& CloneObjectParametersOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kJSCloneObject);
+ return OpParameter<CloneObjectParameters>(op);
+}
+
size_t hash_value(ForInMode mode) { return static_cast<uint8_t>(mode); }
std::ostream& operator<<(std::ostream& os, ForInMode mode) {
@@ -589,6 +613,7 @@ CompareOperationHint CompareOperationHintOf(const Operator* op) {
V(ToLength, Operator::kNoProperties, 1, 1) \
V(ToName, Operator::kNoProperties, 1, 1) \
V(ToNumber, Operator::kNoProperties, 1, 1) \
+ V(ToNumberConvertBigInt, Operator::kNoProperties, 1, 1) \
V(ToNumeric, Operator::kNoProperties, 1, 1) \
V(ToObject, Operator::kFoldable, 1, 1) \
V(ToString, Operator::kNoProperties, 1, 1) \
@@ -1087,8 +1112,8 @@ const Operator* JSOperatorBuilder::CreateArguments(CreateArgumentsType type) {
type); // parameter
}
-const Operator* JSOperatorBuilder::CreateArray(size_t arity,
- Handle<AllocationSite> site) {
+const Operator* JSOperatorBuilder::CreateArray(
+ size_t arity, MaybeHandle<AllocationSite> site) {
// constructor, new_target, arg1, ..., argN
int const value_input_count = static_cast<int>(arity) + 2;
CreateArrayParameters parameters(arity, site);
@@ -1179,6 +1204,17 @@ const Operator* JSOperatorBuilder::CreateLiteralObject(
parameters); // parameter
}
+const Operator* JSOperatorBuilder::CloneObject(VectorSlotPair const& feedback,
+ int literal_flags) {
+ CloneObjectParameters parameters(feedback, literal_flags);
+ return new (zone()) Operator1<CloneObjectParameters>( // --
+ IrOpcode::kJSCloneObject, // opcode
+ Operator::kNoProperties, // properties
+ "JSCloneObject", // name
+ 1, 1, 1, 1, 1, 2, // counts
+ parameters); // parameter
+}
+
const Operator* JSOperatorBuilder::CreateEmptyLiteralObject() {
return new (zone()) Operator( // --
IrOpcode::kJSCreateEmptyLiteralObject, // opcode
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index f73aca819f..b10d89cdb9 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -7,7 +7,7 @@
#include "src/base/compiler-specific.h"
#include "src/globals.h"
-#include "src/handles.h"
+#include "src/maybe-handles.h"
#include "src/runtime/runtime.h"
#include "src/type-hints.h"
#include "src/vector-slot-pair.h"
@@ -457,15 +457,15 @@ CreateArgumentsType const& CreateArgumentsTypeOf(const Operator* op);
// used as parameter by JSCreateArray operators.
class CreateArrayParameters final {
public:
- explicit CreateArrayParameters(size_t arity, Handle<AllocationSite> site)
+ explicit CreateArrayParameters(size_t arity, MaybeHandle<AllocationSite> site)
: arity_(arity), site_(site) {}
size_t arity() const { return arity_; }
- Handle<AllocationSite> site() const { return site_; }
+ MaybeHandle<AllocationSite> site() const { return site_; }
private:
size_t const arity_;
- Handle<AllocationSite> const site_;
+ MaybeHandle<AllocationSite> const site_;
};
bool operator==(CreateArrayParameters const&, CreateArrayParameters const&);
@@ -626,6 +626,28 @@ std::ostream& operator<<(std::ostream&, CreateLiteralParameters const&);
const CreateLiteralParameters& CreateLiteralParametersOf(const Operator* op);
+class CloneObjectParameters final {
+ public:
+ CloneObjectParameters(VectorSlotPair const& feedback, int flags)
+ : feedback_(feedback), flags_(flags) {}
+
+ VectorSlotPair const& feedback() const { return feedback_; }
+ int flags() const { return flags_; }
+
+ private:
+ VectorSlotPair const feedback_;
+ int const flags_;
+};
+
+bool operator==(CloneObjectParameters const&, CloneObjectParameters const&);
+bool operator!=(CloneObjectParameters const&, CloneObjectParameters const&);
+
+size_t hash_value(CloneObjectParameters const&);
+
+std::ostream& operator<<(std::ostream&, CloneObjectParameters const&);
+
+const CloneObjectParameters& CloneObjectParametersOf(const Operator* op);
+
// Descriptor used by the JSForInPrepare and JSForInNext opcodes.
enum class ForInMode : uint8_t {
kUseEnumCacheKeysAndIndices,
@@ -685,13 +707,14 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* ToLength();
const Operator* ToName();
const Operator* ToNumber();
+ const Operator* ToNumberConvertBigInt();
const Operator* ToNumeric();
const Operator* ToObject();
const Operator* ToString();
const Operator* Create();
const Operator* CreateArguments(CreateArgumentsType type);
- const Operator* CreateArray(size_t arity, Handle<AllocationSite> site);
+ const Operator* CreateArray(size_t arity, MaybeHandle<AllocationSite> site);
const Operator* CreateArrayIterator(IterationKind);
const Operator* CreateCollectionIterator(CollectionKind, IterationKind);
const Operator* CreateBoundFunction(size_t arity, Handle<Map> map);
@@ -716,6 +739,8 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
Handle<ObjectBoilerplateDescription> constant,
VectorSlotPair const& feedback, int literal_flags,
int number_of_properties);
+ const Operator* CloneObject(VectorSlotPair const& feedback,
+ int literal_flags);
const Operator* CreateLiteralRegExp(Handle<String> constant_pattern,
VectorSlotPair const& feedback,
int literal_flags);
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 4fc1f84538..56b1f224c7 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -17,6 +17,7 @@
#include "src/compiler/type-cache.h"
#include "src/compiler/types.h"
#include "src/objects-inl.h"
+#include "src/objects/js-generator.h"
#include "src/objects/module-inl.h"
namespace v8 {
@@ -93,7 +94,7 @@ class JSBinopReduction final {
if (BothInputsAre(Type::String()) ||
BinaryOperationHintOf(node_->op()) == BinaryOperationHint::kString) {
HeapObjectBinopMatcher m(node_);
- const JSHeapBroker* broker = lowering_->js_heap_broker();
+ JSHeapBroker* broker = lowering_->js_heap_broker();
if (m.right().HasValue() && m.right().Ref(broker).IsString()) {
StringRef right_string = m.right().Ref(broker).AsString();
if (right_string.length() >= ConsString::kMinLength) return true;
@@ -408,7 +409,7 @@ class JSBinopReduction final {
// - relax effects from generic but not-side-effecting operations
JSTypedLowering::JSTypedLowering(Editor* editor, JSGraph* jsgraph,
- const JSHeapBroker* js_heap_broker, Zone* zone)
+ JSHeapBroker* js_heap_broker, Zone* zone)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
js_heap_broker_(js_heap_broker),
@@ -529,6 +530,33 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
NodeProperties::ReplaceValueInput(node, reduction.replacement(), 0);
}
}
+ // We might be able to constant-fold the String concatenation now.
+ if (r.BothInputsAre(Type::String())) {
+ HeapObjectBinopMatcher m(node);
+ if (m.IsFoldable()) {
+ StringRef left = m.left().Ref(js_heap_broker()).AsString();
+ StringRef right = m.right().Ref(js_heap_broker()).AsString();
+ if (left.length() + right.length() > String::kMaxLength) {
+ // No point in trying to optimize this, as it will just throw.
+ return NoChange();
+ }
+ // TODO(mslekova): get rid of these allows by doing either one of:
+ // 1. remove the optimization and check if it ruins the performance
+ // 2. leave a placeholder and do the actual allocations once back on the
+ // MT
+ AllowHandleDereference allow_handle_dereference;
+ AllowHandleAllocation allow_handle_allocation;
+ AllowHeapAllocation allow_heap_allocation;
+ ObjectRef cons(
+ js_heap_broker(),
+ factory()
+ ->NewConsString(left.object<String>(), right.object<String>())
+ .ToHandleChecked());
+ Node* value = jsgraph()->Constant(cons);
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ }
// We might know for sure that we're creating a ConsString here.
if (r.ShouldCreateConsString()) {
return ReduceCreateConsString(node);
@@ -962,7 +990,9 @@ Reduction JSTypedLowering::ReduceJSToNumberOrNumericInput(Node* input) {
HeapObjectMatcher m(input);
if (m.HasValue() && m.Ref(js_heap_broker()).IsString()) {
StringRef input_value = m.Ref(js_heap_broker()).AsString();
- return Replace(jsgraph()->Constant(input_value.ToNumber()));
+ double number;
+ ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(number, input_value.ToNumber());
+ return Replace(jsgraph()->Constant(number));
}
}
if (input_type.IsHeapConstant()) {
@@ -1035,6 +1065,20 @@ Reduction JSTypedLowering::ReduceJSToStringInput(Node* input) {
if (input_type.Is(Type::NaN())) {
return Replace(jsgraph()->HeapConstant(factory()->NaN_string()));
}
+ if (input_type.Is(Type::OrderedNumber()) &&
+ input_type.Min() == input_type.Max()) {
+ // TODO(mslekova): get rid of these allows by doing either one of:
+ // 1. remove the optimization and check if it ruins the performance
+ // 2. allocate all the ToString's from numbers before the compilation
+ // 3. leave a placeholder and do the actual allocations once back on the MT
+ AllowHandleDereference allow_handle_dereference;
+ AllowHandleAllocation allow_handle_allocation;
+ AllowHeapAllocation allow_heap_allocation;
+ // Note that we can use Type::OrderedNumber(), since
+ // both 0 and -0 map to the String "0" in JavaScript.
+ return Replace(jsgraph()->HeapConstant(
+ factory()->NumberToString(factory()->NewNumber(input_type.Min()))));
+ }
if (input_type.Is(Type::Number())) {
return Replace(graph()->NewNode(simplified()->NumberToString(), input));
}
@@ -1356,7 +1400,7 @@ Node* JSTypedLowering::BuildGetModuleCell(Node* node) {
if (module_type.IsHeapConstant()) {
ModuleRef module_constant = module_type.AsHeapConstant()->Ref().AsModule();
- CellRef cell_constant(module_constant.GetCell(cell_index));
+ CellRef cell_constant = module_constant.GetCell(cell_index);
return jsgraph()->Constant(cell_constant);
}
@@ -1415,8 +1459,8 @@ Reduction JSTypedLowering::ReduceJSStoreModule(Node* node) {
namespace {
-void ReduceBuiltin(Isolate* isolate, JSGraph* jsgraph, Node* node,
- int builtin_index, int arity, CallDescriptor::Flags flags) {
+void ReduceBuiltin(JSGraph* jsgraph, Node* node, int builtin_index, int arity,
+ CallDescriptor::Flags flags) {
// Patch {node} to a direct CEntry call.
//
// ----------- A r g u m e n t s -----------
@@ -1678,8 +1722,7 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
} else if (shared.HasBuiltinId() &&
Builtins::HasCppImplementation(shared.builtin_id())) {
// Patch {node} to a direct CEntry call.
- ReduceBuiltin(isolate(), jsgraph(), node, shared.builtin_id(), arity,
- flags);
+ ReduceBuiltin(jsgraph(), node, shared.builtin_id(), arity, flags);
} else if (shared.HasBuiltinId() &&
Builtins::KindOf(shared.builtin_id()) == Builtins::TFJ) {
// Patch {node} to a direct code object call.
@@ -2278,6 +2321,7 @@ Reduction JSTypedLowering::Reduce(Node* node) {
case IrOpcode::kJSToName:
return ReduceJSToName(node);
case IrOpcode::kJSToNumber:
+ case IrOpcode::kJSToNumberConvertBigInt:
case IrOpcode::kJSToNumeric:
return ReduceJSToNumberOrNumeric(node);
case IrOpcode::kJSToString:
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index c8fcac5ff6..c3bef9aeed 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -32,7 +32,7 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
JSTypedLowering(Editor* editor, JSGraph* jsgraph,
- const JSHeapBroker* js_heap_broker, Zone* zone);
+ JSHeapBroker* js_heap_broker, Zone* zone);
~JSTypedLowering() final {}
const char* reducer_name() const override { return "JSTypedLowering"; }
@@ -98,14 +98,14 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Factory* factory() const;
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
- const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
Isolate* isolate() const;
JSOperatorBuilder* javascript() const;
CommonOperatorBuilder* common() const;
SimplifiedOperatorBuilder* simplified() const;
JSGraph* jsgraph_;
- const JSHeapBroker* js_heap_broker_;
+ JSHeapBroker* js_heap_broker_;
Type empty_string_type_;
Type pointer_comparable_type_;
TypeCache const& type_cache_;
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 99f192acdf..99c52b1ade 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -184,8 +184,6 @@ bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
case Runtime::kInlineGeneratorGetResumeMode:
case Runtime::kInlineCreateJSGeneratorObject:
case Runtime::kInlineIsArray:
- case Runtime::kInlineIsJSWeakMap:
- case Runtime::kInlineIsJSWeakSet:
case Runtime::kInlineIsJSReceiver:
case Runtime::kInlineIsRegExp:
case Runtime::kInlineIsSmi:
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index c1d8570353..6d6cfafdbf 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -19,6 +19,7 @@ namespace {
bool IsRename(Node* node) {
switch (node->opcode()) {
+ case IrOpcode::kCheckHeapObject:
case IrOpcode::kFinishRegion:
case IrOpcode::kTypeGuard:
return true;
@@ -35,12 +36,14 @@ Node* ResolveRenames(Node* node) {
}
bool MayAlias(Node* a, Node* b) {
- if (a == b) return true;
- if (!NodeProperties::GetType(a).Maybe(NodeProperties::GetType(b))) {
- return false;
- }
- switch (b->opcode()) {
- case IrOpcode::kAllocate: {
+ if (a != b) {
+ if (!NodeProperties::GetType(a).Maybe(NodeProperties::GetType(b))) {
+ return false;
+ } else if (IsRename(b)) {
+ return MayAlias(a, b->InputAt(0));
+ } else if (IsRename(a)) {
+ return MayAlias(a->InputAt(0), b);
+ } else if (b->opcode() == IrOpcode::kAllocate) {
switch (a->opcode()) {
case IrOpcode::kAllocate:
case IrOpcode::kHeapConstant:
@@ -49,16 +52,7 @@ bool MayAlias(Node* a, Node* b) {
default:
break;
}
- break;
- }
- case IrOpcode::kFinishRegion:
- case IrOpcode::kTypeGuard:
- return MayAlias(a, b->InputAt(0));
- default:
- break;
- }
- switch (a->opcode()) {
- case IrOpcode::kAllocate: {
+ } else if (a->opcode() == IrOpcode::kAllocate) {
switch (b->opcode()) {
case IrOpcode::kHeapConstant:
case IrOpcode::kParameter:
@@ -66,13 +60,7 @@ bool MayAlias(Node* a, Node* b) {
default:
break;
}
- break;
}
- case IrOpcode::kFinishRegion:
- case IrOpcode::kTypeGuard:
- return MayAlias(a->InputAt(0), b);
- default:
- break;
}
return true;
}
@@ -445,6 +433,7 @@ LoadElimination::AbstractMaps const* LoadElimination::AbstractMaps::Extend(
}
void LoadElimination::AbstractMaps::Print() const {
+ AllowHandleDereference allow_handle_dereference;
StdoutStream os;
for (auto pair : info_for_node_) {
os << " #" << pair.first->id() << ":" << pair.first->op()->mnemonic()
@@ -676,6 +665,12 @@ Node* LoadElimination::AbstractState::LookupField(Node* object,
}
bool LoadElimination::AliasStateInfo::MayAlias(Node* other) const {
+ // If {object} is being initialized right here (indicated by {object} being
+ // an Allocate node instead of a FinishRegion node), we know that {other}
+ // can only alias with {object} if they refer to exactly the same node.
+ if (object_->opcode() == IrOpcode::kAllocate) {
+ return object_ == other;
+ }
// Decide aliasing based on the node kinds.
if (!compiler::MayAlias(object_, other)) {
return false;
@@ -905,8 +900,9 @@ Reduction LoadElimination::ReduceTransitionAndStoreElement(Node* node) {
Reduction LoadElimination::ReduceLoadField(Node* node) {
FieldAccess const& access = FieldAccessOf(node->op());
- Node* const object = NodeProperties::GetValueInput(node, 0);
- Node* const effect = NodeProperties::GetEffectInput(node);
+ Node* object = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
AbstractState const* state = node_states_.Get(effect);
if (state == nullptr) return NoChange();
if (access.offset == HeapObject::kMapOffset &&
@@ -924,12 +920,19 @@ Reduction LoadElimination::ReduceLoadField(Node* node) {
if (field_index >= 0) {
if (Node* replacement = state->LookupField(object, field_index)) {
// Make sure we don't resurrect dead {replacement} nodes.
- // Skip lowering if the type of the {replacement} node is not a subtype
- // of the original {node}'s type.
- // TODO(tebbi): We should insert a {TypeGuard} for the intersection of
- // these two types here once we properly handle {Type::None} everywhere.
- if (!replacement->IsDead() && NodeProperties::GetType(replacement)
- .Is(NodeProperties::GetType(node))) {
+ if (!replacement->IsDead()) {
+ // Introduce a TypeGuard if the type of the {replacement} node is not
+ // a subtype of the original {node}'s type.
+ if (!NodeProperties::GetType(replacement)
+ .Is(NodeProperties::GetType(node))) {
+ Type replacement_type = Type::Intersect(
+ NodeProperties::GetType(node),
+ NodeProperties::GetType(replacement), graph()->zone());
+ replacement = effect =
+ graph()->NewNode(common()->TypeGuard(replacement_type),
+ replacement, effect, control);
+ NodeProperties::SetType(replacement, replacement_type);
+ }
ReplaceWithValue(node, replacement, effect);
return Replace(replacement);
}
diff --git a/deps/v8/src/compiler/load-elimination.h b/deps/v8/src/compiler/load-elimination.h
index d3b1b5c14a..2ce5a04397 100644
--- a/deps/v8/src/compiler/load-elimination.h
+++ b/deps/v8/src/compiler/load-elimination.h
@@ -9,6 +9,7 @@
#include "src/compiler/graph-reducer.h"
#include "src/globals.h"
#include "src/machine-type.h"
+#include "src/maybe-handles.h"
#include "src/zone/zone-handle-set.h"
namespace v8 {
diff --git a/deps/v8/src/compiler/loop-variable-optimizer.cc b/deps/v8/src/compiler/loop-variable-optimizer.cc
index fe5b8c7889..5a0fc9dbfb 100644
--- a/deps/v8/src/compiler/loop-variable-optimizer.cc
+++ b/deps/v8/src/compiler/loop-variable-optimizer.cc
@@ -240,7 +240,8 @@ InductionVariable* LoopVariableOptimizer::TryGetInductionVariable(Node* phi) {
// TODO(jarin) Support both sides.
Node* input = arith->InputAt(0);
if (input->opcode() == IrOpcode::kSpeculativeToNumber ||
- input->opcode() == IrOpcode::kJSToNumber) {
+ input->opcode() == IrOpcode::kJSToNumber ||
+ input->opcode() == IrOpcode::kJSToNumberConvertBigInt) {
input = input->InputAt(0);
}
if (input != phi) return nullptr;
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index 55ef35d231..f3a5fb9023 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -155,7 +155,7 @@ class MachineRepresentationInferrer {
case IrOpcode::kWord32AtomicOr:
case IrOpcode::kWord32AtomicXor:
representation_vector_[node->id()] = PromoteRepresentation(
- AtomicOpRepresentationOf(node->op()).representation());
+ AtomicOpType(node->op()).representation());
break;
case IrOpcode::kStore:
case IrOpcode::kProtectedStore:
@@ -782,7 +782,8 @@ class MachineRepresentationChecker {
str << std::endl;
}
str << " * input " << i << " (" << input->id() << ":" << *input->op()
- << ") doesn't have a " << expected_input_type << " representation.";
+ << ") has a " << input_type
+ << " representation (expected: " << expected_input_type << ").";
}
}
if (should_log_error) {
diff --git a/deps/v8/src/compiler/machine-graph.cc b/deps/v8/src/compiler/machine-graph.cc
index 0fcd97ff90..b81ad03d83 100644
--- a/deps/v8/src/compiler/machine-graph.cc
+++ b/deps/v8/src/compiler/machine-graph.cc
@@ -5,6 +5,7 @@
#include "src/compiler/machine-graph.h"
#include "src/compiler/node-properties.h"
+#include "src/external-reference.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/machine-graph.h b/deps/v8/src/compiler/machine-graph.h
index bd20d8c50d..83d27e03b9 100644
--- a/deps/v8/src/compiler/machine-graph.h
+++ b/deps/v8/src/compiler/machine-graph.h
@@ -5,13 +5,13 @@
#ifndef V8_COMPILER_MACHINE_GRAPH_H_
#define V8_COMPILER_MACHINE_GRAPH_H_
-#include "src/assembler.h"
#include "src/base/compiler-specific.h"
#include "src/compiler/common-node-cache.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/machine-operator.h"
#include "src/globals.h"
+#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 8eac3ed18c..241651254b 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -39,6 +39,7 @@ LoadRepresentation LoadRepresentationOf(Operator const* op) {
IrOpcode::kProtectedLoad == op->opcode() ||
IrOpcode::kWord32AtomicLoad == op->opcode() ||
IrOpcode::kWord64AtomicLoad == op->opcode() ||
+ IrOpcode::kWord32AtomicPairLoad == op->opcode() ||
IrOpcode::kPoisonedLoad == op->opcode() ||
IrOpcode::kUnalignedLoad == op->opcode());
return OpParameter<LoadRepresentation>(op);
@@ -80,11 +81,12 @@ StackSlotRepresentation const& StackSlotRepresentationOf(Operator const* op) {
MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
DCHECK(IrOpcode::kWord32AtomicStore == op->opcode() ||
- IrOpcode::kWord64AtomicStore == op->opcode());
+ IrOpcode::kWord64AtomicStore == op->opcode() ||
+ IrOpcode::kWord32AtomicPairStore == op->opcode());
return OpParameter<MachineRepresentation>(op);
}
-MachineType AtomicOpRepresentationOf(Operator const* op) {
+MachineType AtomicOpType(Operator const* op) {
return OpParameter<MachineType>(op);
}
@@ -137,6 +139,8 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
PURE_BINARY_OP_LIST_64(V) \
V(Word32Clz, Operator::kNoProperties, 1, 0, 1) \
V(Word64Clz, Operator::kNoProperties, 1, 0, 1) \
+ V(Word32ReverseBytes, Operator::kNoProperties, 1, 0, 1) \
+ V(Word64ReverseBytes, Operator::kNoProperties, 1, 0, 1) \
V(BitcastWordToTaggedSigned, Operator::kNoProperties, 1, 0, 1) \
V(TruncateFloat64ToWord32, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
@@ -338,8 +342,6 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
V(Word64Ctz, Operator::kNoProperties, 1, 0, 1) \
V(Word32ReverseBits, Operator::kNoProperties, 1, 0, 1) \
V(Word64ReverseBits, Operator::kNoProperties, 1, 0, 1) \
- V(Word32ReverseBytes, Operator::kNoProperties, 1, 0, 1) \
- V(Word64ReverseBytes, Operator::kNoProperties, 1, 0, 1) \
V(Int32AbsWithOverflow, Operator::kNoProperties, 1, 0, 1) \
V(Int64AbsWithOverflow, Operator::kNoProperties, 1, 0, 1) \
V(Word32Popcnt, Operator::kNoProperties, 1, 0, 1) \
@@ -390,18 +392,19 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
V(kTaggedPointer) \
V(kTagged)
+#define ATOMIC_U32_TYPE_LIST(V) \
+ V(Uint8) \
+ V(Uint16) \
+ V(Uint32)
+
#define ATOMIC_TYPE_LIST(V) \
+ ATOMIC_U32_TYPE_LIST(V) \
V(Int8) \
- V(Uint8) \
V(Int16) \
- V(Uint16) \
- V(Int32) \
- V(Uint32)
+ V(Int32)
-#define ATOMIC64_TYPE_LIST(V) \
- V(Uint8) \
- V(Uint16) \
- V(Uint32) \
+#define ATOMIC_U64_TYPE_LIST(V) \
+ ATOMIC_U32_TYPE_LIST(V) \
V(Uint64)
#define ATOMIC_REPRESENTATION_LIST(V) \
@@ -413,6 +416,14 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
ATOMIC_REPRESENTATION_LIST(V) \
V(kWord64)
+#define ATOMIC_PAIR_BINOP_LIST(V) \
+ V(Add) \
+ V(Sub) \
+ V(And) \
+ V(Or) \
+ V(Xor) \
+ V(Exchange)
+
#define SIMD_LANE_OP_LIST(V) \
V(F32x4, 4) \
V(I32x4, 4) \
@@ -592,7 +603,7 @@ struct MachineOperatorGlobalCache {
"Word64AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
}; \
Word64AtomicLoad##Type##Operator kWord64AtomicLoad##Type;
- ATOMIC64_TYPE_LIST(ATOMIC_LOAD)
+ ATOMIC_U64_TYPE_LIST(ATOMIC_LOAD)
#undef ATOMIC_LOAD
#define ATOMIC_STORE(Type) \
@@ -647,7 +658,7 @@ struct MachineOperatorGlobalCache {
ATOMIC_OP(Word64AtomicOr, type) \
ATOMIC_OP(Word64AtomicXor, type) \
ATOMIC_OP(Word64AtomicExchange, type)
- ATOMIC64_TYPE_LIST(ATOMIC64_OP_LIST)
+ ATOMIC_U64_TYPE_LIST(ATOMIC64_OP_LIST)
#undef ATOMIC64_OP_LIST
#undef ATOMIC_OP
@@ -676,7 +687,76 @@ struct MachineOperatorGlobalCache {
}; \
Word64AtomicCompareExchange##Type##Operator \
kWord64AtomicCompareExchange##Type;
- ATOMIC64_TYPE_LIST(ATOMIC_COMPARE_EXCHANGE)
+ ATOMIC_U64_TYPE_LIST(ATOMIC_COMPARE_EXCHANGE)
+#undef ATOMIC_COMPARE_EXCHANGE
+
+ struct Word32AtomicPairLoadOperator : public Operator {
+ Word32AtomicPairLoadOperator()
+ : Operator(IrOpcode::kWord32AtomicPairLoad,
+ Operator::kNoDeopt | Operator::kNoThrow,
+ "Word32AtomicPairLoad", 2, 1, 1, 2, 1, 0) {}
+ };
+ Word32AtomicPairLoadOperator kWord32AtomicPairLoad;
+
+ struct Word32AtomicPairStoreOperator : public Operator {
+ Word32AtomicPairStoreOperator()
+ : Operator(IrOpcode::kWord32AtomicPairStore,
+ Operator::kNoDeopt | Operator::kNoThrow,
+ "Word32AtomicPairStore", 4, 1, 1, 0, 1, 0) {}
+ };
+ Word32AtomicPairStoreOperator kWord32AtomicPairStore;
+
+#define ATOMIC_PAIR_OP(op) \
+ struct Word32AtomicPair##op##Operator : public Operator { \
+ Word32AtomicPair##op##Operator() \
+ : Operator(IrOpcode::kWord32AtomicPair##op, \
+ Operator::kNoDeopt | Operator::kNoThrow, \
+ "Word32AtomicPair##op", 4, 1, 1, 2, 1, 0) {} \
+ }; \
+ Word32AtomicPair##op##Operator kWord32AtomicPair##op;
+ ATOMIC_PAIR_BINOP_LIST(ATOMIC_PAIR_OP)
+#undef ATOMIC_PAIR_OP
+#undef ATOMIC_PAIR_BINOP_LIST
+
+#define ATOMIC64_NARROW_OP(op, type) \
+ struct op##type##Operator : public Operator1<MachineType> { \
+ op##type##Operator() \
+ : Operator1<MachineType>( \
+ IrOpcode::k##op, Operator::kNoDeopt | Operator::kNoThrow, "#op", \
+ 3, 1, 1, 2, 1, 0, MachineType::type()) {} \
+ }; \
+ op##type##Operator k##op##type;
+#define ATOMIC_OP_LIST(type) \
+ ATOMIC64_NARROW_OP(Word64AtomicNarrowAdd, type) \
+ ATOMIC64_NARROW_OP(Word64AtomicNarrowSub, type) \
+ ATOMIC64_NARROW_OP(Word64AtomicNarrowAnd, type) \
+ ATOMIC64_NARROW_OP(Word64AtomicNarrowOr, type) \
+ ATOMIC64_NARROW_OP(Word64AtomicNarrowXor, type) \
+ ATOMIC64_NARROW_OP(Word64AtomicNarrowExchange, type)
+ ATOMIC_U32_TYPE_LIST(ATOMIC_OP_LIST)
+#undef ATOMIC_OP_LIST
+#undef ATOMIC64_NARROW_OP
+
+ struct Word32AtomicPairCompareExchangeOperator : public Operator {
+ Word32AtomicPairCompareExchangeOperator()
+ : Operator(IrOpcode::kWord32AtomicPairCompareExchange,
+ Operator::kNoDeopt | Operator::kNoThrow,
+ "Word32AtomicPairCompareExchange", 6, 1, 1, 2, 1, 0) {}
+ };
+ Word32AtomicPairCompareExchangeOperator kWord32AtomicPairCompareExchange;
+
+#define ATOMIC_COMPARE_EXCHANGE(Type) \
+ struct Word64AtomicNarrowCompareExchange##Type##Operator \
+ : public Operator1<MachineType> { \
+ Word64AtomicNarrowCompareExchange##Type##Operator() \
+ : Operator1<MachineType>(IrOpcode::kWord64AtomicNarrowCompareExchange, \
+ Operator::kNoDeopt | Operator::kNoThrow, \
+ "Word64AtomicNarrowCompareExchange", 4, 1, 1, \
+ 2, 1, 0, MachineType::Type()) {} \
+ }; \
+ Word64AtomicNarrowCompareExchange##Type##Operator \
+ kWord64AtomicNarrowCompareExchange##Type;
+ ATOMIC_TYPE_LIST(ATOMIC_COMPARE_EXCHANGE)
#undef ATOMIC_COMPARE_EXCHANGE
// The {BitcastWordToTagged} operator must not be marked as pure (especially
@@ -965,10 +1045,10 @@ const Operator* MachineOperatorBuilder::Word32AtomicStore(
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::Word32AtomicExchange(MachineType rep) {
-#define EXCHANGE(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kWord32AtomicExchange##kRep; \
+const Operator* MachineOperatorBuilder::Word32AtomicExchange(MachineType type) {
+#define EXCHANGE(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord32AtomicExchange##kType; \
}
ATOMIC_TYPE_LIST(EXCHANGE)
#undef EXCHANGE
@@ -976,85 +1056,73 @@ const Operator* MachineOperatorBuilder::Word32AtomicExchange(MachineType rep) {
}
const Operator* MachineOperatorBuilder::Word32AtomicCompareExchange(
- MachineType rep) {
-#define COMPARE_EXCHANGE(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kWord32AtomicCompareExchange##kRep; \
+ MachineType type) {
+#define COMPARE_EXCHANGE(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord32AtomicCompareExchange##kType; \
}
ATOMIC_TYPE_LIST(COMPARE_EXCHANGE)
#undef COMPARE_EXCHANGE
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::Word32AtomicAdd(MachineType rep) {
-#define ADD(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kWord32AtomicAdd##kRep; \
+const Operator* MachineOperatorBuilder::Word32AtomicAdd(MachineType type) {
+#define ADD(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord32AtomicAdd##kType; \
}
ATOMIC_TYPE_LIST(ADD)
#undef ADD
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::Word32AtomicSub(MachineType rep) {
-#define SUB(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kWord32AtomicSub##kRep; \
+const Operator* MachineOperatorBuilder::Word32AtomicSub(MachineType type) {
+#define SUB(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord32AtomicSub##kType; \
}
ATOMIC_TYPE_LIST(SUB)
#undef SUB
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::Word32AtomicAnd(MachineType rep) {
-#define AND(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kWord32AtomicAnd##kRep; \
+const Operator* MachineOperatorBuilder::Word32AtomicAnd(MachineType type) {
+#define AND(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord32AtomicAnd##kType; \
}
ATOMIC_TYPE_LIST(AND)
#undef AND
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::Word32AtomicOr(MachineType rep) {
-#define OR(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kWord32AtomicOr##kRep; \
+const Operator* MachineOperatorBuilder::Word32AtomicOr(MachineType type) {
+#define OR(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord32AtomicOr##kType; \
}
ATOMIC_TYPE_LIST(OR)
#undef OR
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::Word32AtomicXor(MachineType rep) {
-#define XOR(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kWord32AtomicXor##kRep; \
+const Operator* MachineOperatorBuilder::Word32AtomicXor(MachineType type) {
+#define XOR(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord32AtomicXor##kType; \
}
ATOMIC_TYPE_LIST(XOR)
#undef XOR
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::TaggedPoisonOnSpeculation() {
- return &cache_.kTaggedPoisonOnSpeculation;
-}
-
-const Operator* MachineOperatorBuilder::Word32PoisonOnSpeculation() {
- return &cache_.kWord32PoisonOnSpeculation;
-}
-
-const Operator* MachineOperatorBuilder::Word64PoisonOnSpeculation() {
- return &cache_.kWord64PoisonOnSpeculation;
-}
-
const Operator* MachineOperatorBuilder::Word64AtomicLoad(
LoadRepresentation rep) {
#define LOAD(Type) \
if (rep == MachineType::Type()) { \
return &cache_.kWord64AtomicLoad##Type; \
}
- ATOMIC64_TYPE_LIST(LOAD)
+ ATOMIC_U64_TYPE_LIST(LOAD)
#undef LOAD
UNREACHABLE();
}
@@ -1070,77 +1138,201 @@ const Operator* MachineOperatorBuilder::Word64AtomicStore(
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::Word64AtomicAdd(MachineType rep) {
-#define ADD(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kWord64AtomicAdd##kRep; \
+const Operator* MachineOperatorBuilder::Word64AtomicAdd(MachineType type) {
+#define ADD(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicAdd##kType; \
}
- ATOMIC64_TYPE_LIST(ADD)
+ ATOMIC_U64_TYPE_LIST(ADD)
#undef ADD
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::Word64AtomicSub(MachineType rep) {
-#define SUB(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kWord64AtomicSub##kRep; \
+const Operator* MachineOperatorBuilder::Word64AtomicSub(MachineType type) {
+#define SUB(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicSub##kType; \
}
- ATOMIC64_TYPE_LIST(SUB)
+ ATOMIC_U64_TYPE_LIST(SUB)
#undef SUB
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::Word64AtomicAnd(MachineType rep) {
-#define AND(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kWord64AtomicAnd##kRep; \
+const Operator* MachineOperatorBuilder::Word64AtomicAnd(MachineType type) {
+#define AND(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicAnd##kType; \
}
- ATOMIC64_TYPE_LIST(AND)
+ ATOMIC_U64_TYPE_LIST(AND)
#undef AND
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::Word64AtomicOr(MachineType rep) {
-#define OR(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kWord64AtomicOr##kRep; \
+const Operator* MachineOperatorBuilder::Word64AtomicOr(MachineType type) {
+#define OR(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicOr##kType; \
}
- ATOMIC64_TYPE_LIST(OR)
+ ATOMIC_U64_TYPE_LIST(OR)
#undef OR
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::Word64AtomicXor(MachineType rep) {
-#define XOR(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kWord64AtomicXor##kRep; \
+const Operator* MachineOperatorBuilder::Word64AtomicXor(MachineType type) {
+#define XOR(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicXor##kType; \
}
- ATOMIC64_TYPE_LIST(XOR)
+ ATOMIC_U64_TYPE_LIST(XOR)
#undef XOR
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::Word64AtomicExchange(MachineType rep) {
-#define EXCHANGE(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kWord64AtomicExchange##kRep; \
+const Operator* MachineOperatorBuilder::Word64AtomicExchange(MachineType type) {
+#define EXCHANGE(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicExchange##kType; \
}
- ATOMIC64_TYPE_LIST(EXCHANGE)
+ ATOMIC_U64_TYPE_LIST(EXCHANGE)
#undef EXCHANGE
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::Word64AtomicCompareExchange(
- MachineType rep) {
-#define COMPARE_EXCHANGE(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kWord64AtomicCompareExchange##kRep; \
+ MachineType type) {
+#define COMPARE_EXCHANGE(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicCompareExchange##kType; \
}
- ATOMIC64_TYPE_LIST(COMPARE_EXCHANGE)
+ ATOMIC_U64_TYPE_LIST(COMPARE_EXCHANGE)
#undef COMPARE_EXCHANGE
UNREACHABLE();
}
+const Operator* MachineOperatorBuilder::Word32AtomicPairLoad() {
+ return &cache_.kWord32AtomicPairLoad;
+}
+
+const Operator* MachineOperatorBuilder::Word32AtomicPairStore() {
+ return &cache_.kWord32AtomicPairStore;
+}
+
+const Operator* MachineOperatorBuilder::Word32AtomicPairAdd() {
+ return &cache_.kWord32AtomicPairAdd;
+}
+
+const Operator* MachineOperatorBuilder::Word32AtomicPairSub() {
+ return &cache_.kWord32AtomicPairSub;
+}
+
+const Operator* MachineOperatorBuilder::Word32AtomicPairAnd() {
+ return &cache_.kWord32AtomicPairAnd;
+}
+
+const Operator* MachineOperatorBuilder::Word32AtomicPairOr() {
+ return &cache_.kWord32AtomicPairOr;
+}
+
+const Operator* MachineOperatorBuilder::Word32AtomicPairXor() {
+ return &cache_.kWord32AtomicPairXor;
+}
+
+const Operator* MachineOperatorBuilder::Word32AtomicPairExchange() {
+ return &cache_.kWord32AtomicPairExchange;
+}
+
+const Operator* MachineOperatorBuilder::Word32AtomicPairCompareExchange() {
+ return &cache_.kWord32AtomicPairCompareExchange;
+}
+
+const Operator* MachineOperatorBuilder::Word64AtomicNarrowAdd(
+ MachineType type) {
+#define ADD(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicNarrowAdd##kType; \
+ }
+ ATOMIC_U32_TYPE_LIST(ADD)
+#undef ADD
+ UNREACHABLE();
+}
+
+const Operator* MachineOperatorBuilder::Word64AtomicNarrowSub(
+ MachineType type) {
+#define SUB(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicNarrowSub##kType; \
+ }
+ ATOMIC_U32_TYPE_LIST(SUB)
+#undef SUB
+ UNREACHABLE();
+}
+
+const Operator* MachineOperatorBuilder::Word64AtomicNarrowAnd(
+ MachineType type) {
+#define AND(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicNarrowAnd##kType; \
+ }
+ ATOMIC_U32_TYPE_LIST(AND)
+#undef AND
+ UNREACHABLE();
+}
+
+const Operator* MachineOperatorBuilder::Word64AtomicNarrowOr(MachineType type) {
+#define OR(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicNarrowOr##kType; \
+ }
+ ATOMIC_U32_TYPE_LIST(OR)
+#undef OR
+ UNREACHABLE();
+}
+
+const Operator* MachineOperatorBuilder::Word64AtomicNarrowXor(
+ MachineType type) {
+#define XOR(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicNarrowXor##kType; \
+ }
+ ATOMIC_U32_TYPE_LIST(XOR)
+#undef XOR
+ UNREACHABLE();
+}
+
+const Operator* MachineOperatorBuilder::Word64AtomicNarrowExchange(
+ MachineType type) {
+#define EXCHANGE(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicNarrowExchange##kType; \
+ }
+ ATOMIC_U32_TYPE_LIST(EXCHANGE)
+#undef EXCHANGE
+ UNREACHABLE();
+}
+
+const Operator* MachineOperatorBuilder::Word64AtomicNarrowCompareExchange(
+ MachineType type) {
+#define CMP_EXCHANGE(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicNarrowCompareExchange##kType; \
+ }
+ ATOMIC_U32_TYPE_LIST(CMP_EXCHANGE)
+#undef CMP_EXCHANGE
+ UNREACHABLE();
+}
+
+const Operator* MachineOperatorBuilder::TaggedPoisonOnSpeculation() {
+ return &cache_.kTaggedPoisonOnSpeculation;
+}
+
+const Operator* MachineOperatorBuilder::Word32PoisonOnSpeculation() {
+ return &cache_.kWord32PoisonOnSpeculation;
+}
+
+const Operator* MachineOperatorBuilder::Word64PoisonOnSpeculation() {
+ return &cache_.kWord64PoisonOnSpeculation;
+}
+
const OptionalOperator MachineOperatorBuilder::SpeculationFence() {
return OptionalOperator(flags_ & kSpeculationFence,
&cache_.kSpeculationFence);
@@ -1203,7 +1395,8 @@ const Operator* MachineOperatorBuilder::S8x16Shuffle(
#undef MACHINE_TYPE_LIST
#undef MACHINE_REPRESENTATION_LIST
#undef ATOMIC_TYPE_LIST
-#undef ATOMIC64_TYPE_LIST
+#undef ATOMIC_U64_TYPE_LIST
+#undef ATOMIC_U32_TYPE_LIST
#undef ATOMIC_REPRESENTATION_LIST
#undef ATOMIC64_REPRESENTATION_LIST
#undef SIMD_LANE_OP_LIST
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 65217cf2a6..261891dcdc 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -110,7 +110,7 @@ V8_EXPORT_PRIVATE StackSlotRepresentation const& StackSlotRepresentationOf(
MachineRepresentation AtomicStoreRepresentationOf(Operator const* op)
V8_WARN_UNUSED_RESULT;
-MachineType AtomicOpRepresentationOf(Operator const* op) V8_WARN_UNUSED_RESULT;
+MachineType AtomicOpType(Operator const* op) V8_WARN_UNUSED_RESULT;
// Interface for building machine-level operators. These operators are
// machine-level but machine-independent and thus define a language suitable
@@ -140,8 +140,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
kWord64Popcnt = 1u << 15,
kWord32ReverseBits = 1u << 16,
kWord64ReverseBits = 1u << 17,
- kWord32ReverseBytes = 1u << 18,
- kWord64ReverseBytes = 1u << 19,
kInt32AbsWithOverflow = 1u << 20,
kInt64AbsWithOverflow = 1u << 21,
kSpeculationFence = 1u << 22,
@@ -150,9 +148,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
kFloat64RoundUp | kFloat32RoundTruncate | kFloat64RoundTruncate |
kFloat64RoundTiesAway | kFloat32RoundTiesEven | kFloat64RoundTiesEven |
kWord32Ctz | kWord64Ctz | kWord32Popcnt | kWord64Popcnt |
- kWord32ReverseBits | kWord64ReverseBits | kWord32ReverseBytes |
- kWord64ReverseBytes | kInt32AbsWithOverflow | kInt64AbsWithOverflow |
- kSpeculationFence
+ kWord32ReverseBits | kWord64ReverseBits | kInt32AbsWithOverflow |
+ kInt64AbsWithOverflow | kSpeculationFence
};
typedef base::Flags<Flag, unsigned> Flags;
@@ -238,8 +235,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const OptionalOperator Word64Popcnt();
const OptionalOperator Word32ReverseBits();
const OptionalOperator Word64ReverseBits();
- const OptionalOperator Word32ReverseBytes();
- const OptionalOperator Word64ReverseBytes();
+ const Operator* Word32ReverseBytes();
+ const Operator* Word64ReverseBytes();
const OptionalOperator Int32AbsWithOverflow();
const OptionalOperator Int64AbsWithOverflow();
@@ -624,33 +621,66 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// atomic-store [base + index], value
const Operator* Word64AtomicStore(MachineRepresentation rep);
// atomic-exchange [base + index], value
- const Operator* Word32AtomicExchange(MachineType rep);
+ const Operator* Word32AtomicExchange(MachineType type);
// atomic-exchange [base + index], value
- const Operator* Word64AtomicExchange(MachineType rep);
+ const Operator* Word64AtomicExchange(MachineType type);
// atomic-compare-exchange [base + index], old_value, new_value
- const Operator* Word32AtomicCompareExchange(MachineType rep);
+ const Operator* Word32AtomicCompareExchange(MachineType type);
// atomic-compare-exchange [base + index], old_value, new_value
- const Operator* Word64AtomicCompareExchange(MachineType rep);
+ const Operator* Word64AtomicCompareExchange(MachineType type);
// atomic-add [base + index], value
- const Operator* Word32AtomicAdd(MachineType rep);
+ const Operator* Word32AtomicAdd(MachineType type);
// atomic-sub [base + index], value
- const Operator* Word32AtomicSub(MachineType rep);
+ const Operator* Word32AtomicSub(MachineType type);
// atomic-and [base + index], value
- const Operator* Word32AtomicAnd(MachineType rep);
+ const Operator* Word32AtomicAnd(MachineType type);
// atomic-or [base + index], value
- const Operator* Word32AtomicOr(MachineType rep);
+ const Operator* Word32AtomicOr(MachineType type);
// atomic-xor [base + index], value
const Operator* Word32AtomicXor(MachineType rep);
- // atomic-load [base + index]
+ // atomic-add [base + index], value
const Operator* Word64AtomicAdd(MachineType rep);
// atomic-sub [base + index], value
- const Operator* Word64AtomicSub(MachineType rep);
+ const Operator* Word64AtomicSub(MachineType type);
// atomic-and [base + index], value
- const Operator* Word64AtomicAnd(MachineType rep);
+ const Operator* Word64AtomicAnd(MachineType type);
// atomic-or [base + index], value
- const Operator* Word64AtomicOr(MachineType rep);
+ const Operator* Word64AtomicOr(MachineType type);
// atomic-xor [base + index], value
const Operator* Word64AtomicXor(MachineType rep);
+ // atomic-narrow-add [base + index], value
+ const Operator* Word64AtomicNarrowAdd(MachineType type);
+ // atomic-narow-sub [base + index], value
+ const Operator* Word64AtomicNarrowSub(MachineType type);
+ // atomic-narrow-and [base + index], value
+ const Operator* Word64AtomicNarrowAnd(MachineType type);
+ // atomic-narrow-or [base + index], value
+ const Operator* Word64AtomicNarrowOr(MachineType type);
+ // atomic-narrow-xor [base + index], value
+ const Operator* Word64AtomicNarrowXor(MachineType type);
+ // atomic-narrow-exchange [base + index], value
+ const Operator* Word64AtomicNarrowExchange(MachineType type);
+ // atomic-narrow-compare-exchange [base + index], old_value, new_value
+ const Operator* Word64AtomicNarrowCompareExchange(MachineType type);
+ // atomic-pair-load [base + index]
+ const Operator* Word32AtomicPairLoad();
+ // atomic-pair-sub [base + index], value_high, value-low
+ const Operator* Word32AtomicPairStore();
+ // atomic-pair-add [base + index], value_high, value_low
+ const Operator* Word32AtomicPairAdd();
+ // atomic-pair-sub [base + index], value_high, value-low
+ const Operator* Word32AtomicPairSub();
+ // atomic-pair-and [base + index], value_high, value_low
+ const Operator* Word32AtomicPairAnd();
+ // atomic-pair-or [base + index], value_high, value_low
+ const Operator* Word32AtomicPairOr();
+ // atomic-pair-xor [base + index], value_high, value_low
+ const Operator* Word32AtomicPairXor();
+ // atomic-pair-exchange [base + index], value_high, value_low
+ const Operator* Word32AtomicPairExchange();
+ // atomic-pair-compare-exchange [base + index], old_value_high, old_value_low,
+ // new_value_high, new_value_low
+ const Operator* Word32AtomicPairCompareExchange();
const OptionalOperator SpeculationFence();
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index e7ec150985..3ba3dcc6b8 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -101,8 +101,10 @@ void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
case IrOpcode::kIfException:
case IrOpcode::kLoad:
case IrOpcode::kProtectedLoad:
+ case IrOpcode::kUnalignedLoad:
case IrOpcode::kStore:
case IrOpcode::kProtectedStore:
+ case IrOpcode::kUnalignedStore:
case IrOpcode::kRetain:
case IrOpcode::kUnsafePointerAdd:
case IrOpcode::kDebugBreak:
diff --git a/deps/v8/src/compiler/mips/OWNERS b/deps/v8/src/compiler/mips/OWNERS
index 4ce9d7f91d..8bbcab4c2d 100644
--- a/deps/v8/src/compiler/mips/OWNERS
+++ b/deps/v8/src/compiler/mips/OWNERS
@@ -1,3 +1,2 @@
-ivica.bogosavljevic@mips.com
-Miran.Karic@mips.com
-sreten.kovacevic@mips.com \ No newline at end of file
+ibogosavljevic@wavecomp.com
+skovacevic@wavecomp.com \ No newline at end of file
diff --git a/deps/v8/src/compiler/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
index cfe132338c..66f38dc283 100644
--- a/deps/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
@@ -534,7 +534,8 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
void InstructionSelector::VisitWord32Sar(Node* node) {
Int32BinopMatcher m(node);
- if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
+ if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
Int32BinopMatcher mleft(m.left().node());
if (m.right().HasValue() && mleft.right().HasValue()) {
MipsOperandGenerator g(this);
@@ -1778,7 +1779,7 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
@@ -1817,7 +1818,7 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
Node* old_value = node->InputAt(2);
Node* new_value = node->InputAt(3);
ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
@@ -1858,7 +1859,7 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = int8_op;
} else if (type == MachineType::Uint8()) {
@@ -2169,8 +2170,8 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
uint8_t offset;
MipsOperandGenerator g(this);
if (TryMatchConcat(shuffle, &offset)) {
- Emit(kMipsS8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input0),
- g.UseRegister(input1), g.UseImmediate(offset));
+ Emit(kMipsS8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1),
+ g.UseRegister(input0), g.UseImmediate(offset));
return;
}
if (TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
@@ -2215,9 +2216,7 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat32RoundDown |
MachineOperatorBuilder::kFloat32RoundUp |
MachineOperatorBuilder::kFloat32RoundTruncate |
- MachineOperatorBuilder::kFloat32RoundTiesEven |
- MachineOperatorBuilder::kWord32ReverseBytes |
- MachineOperatorBuilder::kWord64ReverseBytes;
+ MachineOperatorBuilder::kFloat32RoundTiesEven;
}
// static
diff --git a/deps/v8/src/compiler/mips64/OWNERS b/deps/v8/src/compiler/mips64/OWNERS
index 4ce9d7f91d..8bbcab4c2d 100644
--- a/deps/v8/src/compiler/mips64/OWNERS
+++ b/deps/v8/src/compiler/mips64/OWNERS
@@ -1,3 +1,2 @@
-ivica.bogosavljevic@mips.com
-Miran.Karic@mips.com
-sreten.kovacevic@mips.com \ No newline at end of file
+ibogosavljevic@wavecomp.com
+skovacevic@wavecomp.com \ No newline at end of file
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
index ee02d30244..9f9aebc145 100644
--- a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
@@ -1927,10 +1927,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
bool IsNodeUnsigned(Node* n) {
NodeMatcher m(n);
- if (m.IsLoad()) {
- LoadRepresentation load_rep = LoadRepresentationOf(n->op());
- return load_rep.IsUnsigned();
- } else if (m.IsUnalignedLoad()) {
+ if (m.IsLoad() || m.IsUnalignedLoad() || m.IsPoisonedLoad() ||
+ m.IsProtectedLoad() || m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) {
LoadRepresentation load_rep = LoadRepresentationOf(n->op());
return load_rep.IsUnsigned();
} else {
@@ -2443,7 +2441,7 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
@@ -2482,7 +2480,7 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
Node* old_value = node->InputAt(2);
Node* new_value = node->InputAt(3);
ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
@@ -2523,7 +2521,7 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = int8_op;
} else if (type == MachineType::Uint8()) {
@@ -2839,8 +2837,8 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
uint8_t offset;
Mips64OperandGenerator g(this);
if (TryMatchConcat(shuffle, &offset)) {
- Emit(kMips64S8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input0),
- g.UseRegister(input1), g.UseImmediate(offset));
+ Emit(kMips64S8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1),
+ g.UseRegister(input0), g.UseImmediate(offset));
return;
}
if (TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
@@ -2899,9 +2897,7 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat64RoundTruncate |
MachineOperatorBuilder::kFloat32RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTiesEven |
- MachineOperatorBuilder::kFloat32RoundTiesEven |
- MachineOperatorBuilder::kWord32ReverseBytes |
- MachineOperatorBuilder::kWord64ReverseBytes;
+ MachineOperatorBuilder::kFloat32RoundTiesEven;
}
// static
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index 4b02cad9b9..cbefb0ac35 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -195,7 +195,7 @@ struct HeapObjectMatcher final
return this->HasValue() && this->Value().address() == value.address();
}
- ObjectRef Ref(const JSHeapBroker* broker) const {
+ ObjectRef Ref(JSHeapBroker* broker) const {
return ObjectRef(broker, this->Value());
}
};
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index 0d0e4f3c97..22cdd0b091 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -12,6 +12,7 @@
#include "src/compiler/simplified-operator.h"
#include "src/compiler/verifier.h"
#include "src/handles-inl.h"
+#include "src/objects-inl.h"
#include "src/zone/zone-handle-set.h"
namespace v8 {
@@ -602,6 +603,7 @@ bool NodeProperties::CanBeNullOrUndefined(Isolate* isolate, Node* receiver,
case IrOpcode::kJSToLength:
case IrOpcode::kJSToName:
case IrOpcode::kJSToNumber:
+ case IrOpcode::kJSToNumberConvertBigInt:
case IrOpcode::kJSToNumeric:
case IrOpcode::kJSToString:
case IrOpcode::kToBoolean:
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index 7a6b19cb35..d6ea247fbc 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -121,6 +121,7 @@
V(JSToLength) \
V(JSToName) \
V(JSToNumber) \
+ V(JSToNumberConvertBigInt) \
V(JSToNumeric) \
V(JSToObject) \
V(JSToString) \
@@ -152,6 +153,7 @@
V(JSCreateEmptyLiteralArray) \
V(JSCreateLiteralObject) \
V(JSCreateEmptyLiteralObject) \
+ V(JSCloneObject) \
V(JSCreateLiteralRegExp)
#define JS_OBJECT_OP_LIST(V) \
@@ -560,106 +562,125 @@
V(Float64Mod) \
V(Float64Pow)
-#define MACHINE_OP_LIST(V) \
- MACHINE_UNOP_32_LIST(V) \
- MACHINE_BINOP_32_LIST(V) \
- MACHINE_BINOP_64_LIST(V) \
- MACHINE_COMPARE_BINOP_LIST(V) \
- MACHINE_FLOAT32_BINOP_LIST(V) \
- MACHINE_FLOAT32_UNOP_LIST(V) \
- MACHINE_FLOAT64_BINOP_LIST(V) \
- MACHINE_FLOAT64_UNOP_LIST(V) \
- V(DebugAbort) \
- V(DebugBreak) \
- V(Comment) \
- V(Load) \
- V(PoisonedLoad) \
- V(Store) \
- V(StackSlot) \
- V(Word32Popcnt) \
- V(Word64Popcnt) \
- V(Word64Clz) \
- V(Word64Ctz) \
- V(Word64ReverseBits) \
- V(Word64ReverseBytes) \
- V(Int64AbsWithOverflow) \
- V(BitcastTaggedToWord) \
- V(BitcastWordToTagged) \
- V(BitcastWordToTaggedSigned) \
- V(TruncateFloat64ToWord32) \
- V(ChangeFloat32ToFloat64) \
- V(ChangeFloat64ToInt32) \
- V(ChangeFloat64ToUint32) \
- V(ChangeFloat64ToUint64) \
- V(Float64SilenceNaN) \
- V(TruncateFloat64ToUint32) \
- V(TruncateFloat32ToInt32) \
- V(TruncateFloat32ToUint32) \
- V(TryTruncateFloat32ToInt64) \
- V(TryTruncateFloat64ToInt64) \
- V(TryTruncateFloat32ToUint64) \
- V(TryTruncateFloat64ToUint64) \
- V(ChangeInt32ToFloat64) \
- V(ChangeInt32ToInt64) \
- V(ChangeUint32ToFloat64) \
- V(ChangeUint32ToUint64) \
- V(TruncateFloat64ToFloat32) \
- V(TruncateInt64ToInt32) \
- V(RoundFloat64ToInt32) \
- V(RoundInt32ToFloat32) \
- V(RoundInt64ToFloat32) \
- V(RoundInt64ToFloat64) \
- V(RoundUint32ToFloat32) \
- V(RoundUint64ToFloat32) \
- V(RoundUint64ToFloat64) \
- V(BitcastFloat32ToInt32) \
- V(BitcastFloat64ToInt64) \
- V(BitcastInt32ToFloat32) \
- V(BitcastInt64ToFloat64) \
- V(Float64ExtractLowWord32) \
- V(Float64ExtractHighWord32) \
- V(Float64InsertLowWord32) \
- V(Float64InsertHighWord32) \
- V(TaggedPoisonOnSpeculation) \
- V(Word32PoisonOnSpeculation) \
- V(Word64PoisonOnSpeculation) \
- V(LoadStackPointer) \
- V(LoadFramePointer) \
- V(LoadParentFramePointer) \
- V(UnalignedLoad) \
- V(UnalignedStore) \
- V(Int32PairAdd) \
- V(Int32PairSub) \
- V(Int32PairMul) \
- V(Word32PairShl) \
- V(Word32PairShr) \
- V(Word32PairSar) \
- V(ProtectedLoad) \
- V(ProtectedStore) \
- V(Word32AtomicLoad) \
- V(Word32AtomicStore) \
- V(Word32AtomicExchange) \
- V(Word32AtomicCompareExchange) \
- V(Word32AtomicAdd) \
- V(Word32AtomicSub) \
- V(Word32AtomicAnd) \
- V(Word32AtomicOr) \
- V(Word32AtomicXor) \
- V(Word64AtomicLoad) \
- V(Word64AtomicStore) \
- V(Word64AtomicAdd) \
- V(Word64AtomicSub) \
- V(Word64AtomicAnd) \
- V(Word64AtomicOr) \
- V(Word64AtomicXor) \
- V(Word64AtomicExchange) \
- V(Word64AtomicCompareExchange) \
- V(SpeculationFence) \
- V(SignExtendWord8ToInt32) \
- V(SignExtendWord16ToInt32) \
- V(SignExtendWord8ToInt64) \
- V(SignExtendWord16ToInt64) \
- V(SignExtendWord32ToInt64) \
+#define MACHINE_WORD64_ATOMIC_OP_LIST(V) \
+ V(Word64AtomicLoad) \
+ V(Word64AtomicStore) \
+ V(Word64AtomicAdd) \
+ V(Word64AtomicSub) \
+ V(Word64AtomicAnd) \
+ V(Word64AtomicOr) \
+ V(Word64AtomicXor) \
+ V(Word64AtomicExchange) \
+ V(Word64AtomicCompareExchange) \
+ V(Word64AtomicNarrowAdd) \
+ V(Word64AtomicNarrowSub) \
+ V(Word64AtomicNarrowAnd) \
+ V(Word64AtomicNarrowOr) \
+ V(Word64AtomicNarrowXor) \
+ V(Word64AtomicNarrowExchange) \
+ V(Word64AtomicNarrowCompareExchange)
+
+#define MACHINE_OP_LIST(V) \
+ MACHINE_UNOP_32_LIST(V) \
+ MACHINE_BINOP_32_LIST(V) \
+ MACHINE_BINOP_64_LIST(V) \
+ MACHINE_COMPARE_BINOP_LIST(V) \
+ MACHINE_FLOAT32_BINOP_LIST(V) \
+ MACHINE_FLOAT32_UNOP_LIST(V) \
+ MACHINE_FLOAT64_BINOP_LIST(V) \
+ MACHINE_FLOAT64_UNOP_LIST(V) \
+ MACHINE_WORD64_ATOMIC_OP_LIST(V) \
+ V(DebugAbort) \
+ V(DebugBreak) \
+ V(Comment) \
+ V(Load) \
+ V(PoisonedLoad) \
+ V(Store) \
+ V(StackSlot) \
+ V(Word32Popcnt) \
+ V(Word64Popcnt) \
+ V(Word64Clz) \
+ V(Word64Ctz) \
+ V(Word64ReverseBits) \
+ V(Word64ReverseBytes) \
+ V(Int64AbsWithOverflow) \
+ V(BitcastTaggedToWord) \
+ V(BitcastWordToTagged) \
+ V(BitcastWordToTaggedSigned) \
+ V(TruncateFloat64ToWord32) \
+ V(ChangeFloat32ToFloat64) \
+ V(ChangeFloat64ToInt32) \
+ V(ChangeFloat64ToUint32) \
+ V(ChangeFloat64ToUint64) \
+ V(Float64SilenceNaN) \
+ V(TruncateFloat64ToUint32) \
+ V(TruncateFloat32ToInt32) \
+ V(TruncateFloat32ToUint32) \
+ V(TryTruncateFloat32ToInt64) \
+ V(TryTruncateFloat64ToInt64) \
+ V(TryTruncateFloat32ToUint64) \
+ V(TryTruncateFloat64ToUint64) \
+ V(ChangeInt32ToFloat64) \
+ V(ChangeInt32ToInt64) \
+ V(ChangeUint32ToFloat64) \
+ V(ChangeUint32ToUint64) \
+ V(TruncateFloat64ToFloat32) \
+ V(TruncateInt64ToInt32) \
+ V(RoundFloat64ToInt32) \
+ V(RoundInt32ToFloat32) \
+ V(RoundInt64ToFloat32) \
+ V(RoundInt64ToFloat64) \
+ V(RoundUint32ToFloat32) \
+ V(RoundUint64ToFloat32) \
+ V(RoundUint64ToFloat64) \
+ V(BitcastFloat32ToInt32) \
+ V(BitcastFloat64ToInt64) \
+ V(BitcastInt32ToFloat32) \
+ V(BitcastInt64ToFloat64) \
+ V(Float64ExtractLowWord32) \
+ V(Float64ExtractHighWord32) \
+ V(Float64InsertLowWord32) \
+ V(Float64InsertHighWord32) \
+ V(TaggedPoisonOnSpeculation) \
+ V(Word32PoisonOnSpeculation) \
+ V(Word64PoisonOnSpeculation) \
+ V(LoadStackPointer) \
+ V(LoadFramePointer) \
+ V(LoadParentFramePointer) \
+ V(UnalignedLoad) \
+ V(UnalignedStore) \
+ V(Int32PairAdd) \
+ V(Int32PairSub) \
+ V(Int32PairMul) \
+ V(Word32PairShl) \
+ V(Word32PairShr) \
+ V(Word32PairSar) \
+ V(ProtectedLoad) \
+ V(ProtectedStore) \
+ V(Word32AtomicLoad) \
+ V(Word32AtomicStore) \
+ V(Word32AtomicExchange) \
+ V(Word32AtomicCompareExchange) \
+ V(Word32AtomicAdd) \
+ V(Word32AtomicSub) \
+ V(Word32AtomicAnd) \
+ V(Word32AtomicOr) \
+ V(Word32AtomicXor) \
+ V(Word32AtomicPairLoad) \
+ V(Word32AtomicPairStore) \
+ V(Word32AtomicPairAdd) \
+ V(Word32AtomicPairSub) \
+ V(Word32AtomicPairAnd) \
+ V(Word32AtomicPairOr) \
+ V(Word32AtomicPairXor) \
+ V(Word32AtomicPairExchange) \
+ V(Word32AtomicPairCompareExchange) \
+ V(SpeculationFence) \
+ V(SignExtendWord8ToInt32) \
+ V(SignExtendWord16ToInt32) \
+ V(SignExtendWord8ToInt64) \
+ V(SignExtendWord16ToInt64) \
+ V(SignExtendWord32ToInt64) \
V(UnsafePointerAdd)
#define MACHINE_SIMD_OP_LIST(V) \
diff --git a/deps/v8/src/compiler/operation-typer.cc b/deps/v8/src/compiler/operation-typer.cc
index a9ae8c322a..67a7b138a5 100644
--- a/deps/v8/src/compiler/operation-typer.cc
+++ b/deps/v8/src/compiler/operation-typer.cc
@@ -16,8 +16,8 @@ namespace v8 {
namespace internal {
namespace compiler {
-OperationTyper::OperationTyper(Isolate* isolate,
- const JSHeapBroker* js_heap_broker, Zone* zone)
+OperationTyper::OperationTyper(Isolate* isolate, JSHeapBroker* js_heap_broker,
+ Zone* zone)
: zone_(zone), cache_(TypeCache::Get()) {
Factory* factory = isolate->factory();
infinity_ =
@@ -265,7 +265,9 @@ Type OperationTyper::ConvertReceiver(Type type) {
return type;
}
-Type OperationTyper::ToNumberOrNumeric(Object::Conversion mode, Type type) {
+// Returns the result type of converting {type} to number, if the
+// result does not depend on conversion options.
+base::Optional<Type> OperationTyper::ToNumberCommon(Type type) {
if (type.Is(Type::Number())) return type;
if (type.Is(Type::NullOrUndefined())) {
if (type.Is(Type::Null())) return cache_.kSingletonZero;
@@ -289,6 +291,13 @@ Type OperationTyper::ToNumberOrNumeric(Object::Conversion mode, Type type) {
}
return Type::Intersect(type, Type::Number(), zone());
}
+ return base::Optional<Type>();
+}
+
+Type OperationTyper::ToNumberOrNumeric(Object::Conversion mode, Type type) {
+ if (base::Optional<Type> maybe_result_type = ToNumberCommon(type)) {
+ return *maybe_result_type;
+ }
if (type.Is(Type::BigInt())) {
return mode == Object::Conversion::kToNumber ? Type::None() : type;
}
@@ -300,6 +309,13 @@ Type OperationTyper::ToNumber(Type type) {
return ToNumberOrNumeric(Object::Conversion::kToNumber, type);
}
+Type OperationTyper::ToNumberConvertBigInt(Type type) {
+ if (base::Optional<Type> maybe_result_type = ToNumberCommon(type)) {
+ return *maybe_result_type;
+ }
+ return Type::Number();
+}
+
Type OperationTyper::ToNumeric(Type type) {
return ToNumberOrNumeric(Object::Conversion::kToNumeric, type);
}
diff --git a/deps/v8/src/compiler/operation-typer.h b/deps/v8/src/compiler/operation-typer.h
index 81f20bcda4..fb5997485c 100644
--- a/deps/v8/src/compiler/operation-typer.h
+++ b/deps/v8/src/compiler/operation-typer.h
@@ -27,14 +27,14 @@ class TypeCache;
class V8_EXPORT_PRIVATE OperationTyper {
public:
- OperationTyper(Isolate* isolate, const JSHeapBroker* js_heap_broker,
- Zone* zone);
+ OperationTyper(Isolate* isolate, JSHeapBroker* js_heap_broker, Zone* zone);
// Typing Phi.
Type Merge(Type left, Type right);
Type ToPrimitive(Type type);
Type ToNumber(Type type);
+ Type ToNumberConvertBigInt(Type type);
Type ToNumeric(Type type);
Type ToBoolean(Type type);
@@ -78,6 +78,7 @@ class V8_EXPORT_PRIVATE OperationTyper {
typedef base::Flags<ComparisonOutcomeFlags> ComparisonOutcome;
Type ToNumberOrNumeric(Object::Conversion mode, Type type);
+ base::Optional<Type> ToNumberCommon(Type type);
ComparisonOutcome Invert(ComparisonOutcome);
Type Invert(Type);
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index 689561059c..a5d16053d2 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -76,6 +76,7 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSCreateLiteralObject:
case IrOpcode::kJSCreateLiteralRegExp:
case IrOpcode::kJSCreateObject:
+ case IrOpcode::kJSCloneObject:
// Property access operations
case IrOpcode::kJSLoadNamed:
@@ -93,6 +94,7 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSToLength:
case IrOpcode::kJSToName:
case IrOpcode::kJSToNumber:
+ case IrOpcode::kJSToNumberConvertBigInt:
case IrOpcode::kJSToNumeric:
case IrOpcode::kJSToObject:
case IrOpcode::kJSToString:
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 3366d1db94..5717c70348 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -40,6 +40,7 @@
#include "src/compiler/js-create-lowering.h"
#include "src/compiler/js-generic-lowering.h"
#include "src/compiler/js-heap-broker.h"
+#include "src/compiler/js-heap-copy-reducer.h"
#include "src/compiler/js-inlining-heuristic.h"
#include "src/compiler/js-intrinsic-lowering.h"
#include "src/compiler/js-native-context-specialization.h"
@@ -83,6 +84,7 @@
#include "src/register-configuration.h"
#include "src/utils.h"
#include "src/wasm/function-body-decoder.h"
+#include "src/wasm/wasm-engine.h"
namespace v8 {
namespace internal {
@@ -135,7 +137,7 @@ class PipelineData {
javascript_ = new (graph_zone_) JSOperatorBuilder(graph_zone_);
jsgraph_ = new (graph_zone_)
JSGraph(isolate_, graph_, common_, javascript_, simplified_, machine_);
- js_heap_broker_ = new (codegen_zone_) JSHeapBroker(isolate_);
+ js_heap_broker_ = new (codegen_zone_) JSHeapBroker(isolate_, codegen_zone_);
dependencies_ =
new (codegen_zone_) CompilationDependencies(isolate_, codegen_zone_);
}
@@ -146,7 +148,6 @@ class PipelineData {
PipelineStatistics* pipeline_statistics,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
- WasmCompilationData* wasm_compilation_data,
int wasm_function_index,
const AssemblerOptions& assembler_options)
: isolate_(nullptr),
@@ -171,7 +172,6 @@ class PipelineData {
codegen_zone_(codegen_zone_scope_.zone()),
register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
register_allocation_zone_(register_allocation_zone_scope_.zone()),
- wasm_compilation_data_(wasm_compilation_data),
assembler_options_(assembler_options) {}
// For machine graph testing entry point.
@@ -301,6 +301,10 @@ class PipelineData {
return jump_optimization_info_;
}
+ const AssemblerOptions& assembler_options() const {
+ return assembler_options_;
+ }
+
CodeTracer* GetCodeTracer() const {
return wasm_engine_ == nullptr ? isolate_->GetCodeTracer()
: wasm_engine_->GetCodeTracer();
@@ -393,8 +397,8 @@ class PipelineData {
code_generator_ = new CodeGenerator(
codegen_zone(), frame(), linkage, sequence(), info(), isolate(),
osr_helper_, start_source_position_, jump_optimization_info_,
- wasm_compilation_data_, info()->GetPoisoningMitigationLevel(),
- assembler_options_, info_->builtin_index());
+ info()->GetPoisoningMitigationLevel(), assembler_options_,
+ info_->builtin_index());
}
void BeginPhaseKind(const char* phase_kind_name) {
@@ -411,10 +415,6 @@ class PipelineData {
const char* debug_name() const { return debug_name_.get(); }
- WasmCompilationData* wasm_compilation_data() const {
- return wasm_compilation_data_;
- }
-
int wasm_function_index() const { return wasm_function_index_; }
private:
@@ -478,8 +478,6 @@ class PipelineData {
// Source position output for --trace-turbo.
std::string source_position_output_;
- WasmCompilationData* wasm_compilation_data_ = nullptr;
-
JumpOptimizationInfo* jump_optimization_info_ = nullptr;
AssemblerOptions assembler_options_;
@@ -527,7 +525,9 @@ class PipelineImpl final {
OptimizedCompilationInfo* info() const;
Isolate* isolate() const;
+ CodeGenerator* code_generator() const;
+ private:
PipelineData* const data_;
};
@@ -997,7 +997,6 @@ class PipelineWasmCompilationJob final : public OptimizedCompilationJob {
OptimizedCompilationInfo* info, wasm::WasmEngine* wasm_engine,
MachineGraph* mcgraph, CallDescriptor* call_descriptor,
SourcePositionTable* source_positions, NodeOriginTable* node_origins,
- WasmCompilationData* wasm_compilation_data,
wasm::FunctionBody function_body, wasm::WasmModule* wasm_module,
wasm::NativeModule* native_module, int function_index, bool asmjs_origin)
: OptimizedCompilationJob(kNoStackLimit, info, "TurboFan",
@@ -1007,7 +1006,7 @@ class PipelineWasmCompilationJob final : public OptimizedCompilationJob {
wasm_engine, function_body, wasm_module, info, &zone_stats_)),
data_(&zone_stats_, wasm_engine, info, mcgraph,
pipeline_statistics_.get(), source_positions, node_origins,
- wasm_compilation_data, function_index, WasmAssemblerOptions()),
+ function_index, WasmAssemblerOptions()),
pipeline_(&data_),
linkage_(call_descriptor),
native_module_(native_module),
@@ -1074,7 +1073,7 @@ PipelineWasmCompilationJob::ExecuteJobImpl() {
if (!pipeline_.SelectInstructions(&linkage_)) return FAILED;
pipeline_.AssembleCode(&linkage_);
- CodeGenerator* code_generator = pipeline_.data_->code_generator();
+ CodeGenerator* code_generator = pipeline_.code_generator();
CodeDesc code_desc;
code_generator->tasm()->GetCode(nullptr, &code_desc);
@@ -1083,7 +1082,7 @@ PipelineWasmCompilationJob::ExecuteJobImpl() {
code_generator->frame()->GetTotalFrameSlotCount(),
code_generator->GetSafepointTableOffset(),
code_generator->GetHandlerTableOffset(),
- data_.wasm_compilation_data()->GetProtectedInstructions(),
+ code_generator->GetProtectedInstructions(),
code_generator->GetSourcePositionTable(), wasm::WasmCode::kTurbofan);
if (data_.info()->trace_turbo_json_enabled()) {
@@ -1279,6 +1278,21 @@ struct UntyperPhase {
}
};
+struct CopyMetadataForConcurrentCompilePhase {
+ static const char* phase_name() {
+ return "copy metadata for concurrent compile";
+ }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ GraphReducer graph_reducer(temp_zone, data->graph(),
+ data->jsgraph()->Dead());
+ JSHeapCopyReducer heap_copy_reducer(data->js_heap_broker());
+ AddReducer(data, &graph_reducer, &heap_copy_reducer);
+ graph_reducer.ReduceGraph();
+ data->js_heap_broker()->StopSerializing();
+ }
+};
+
struct TypedLoweringPhase {
static const char* phase_name() { return "typed lowering"; }
@@ -2015,15 +2029,20 @@ bool PipelineImpl::CreateGraph() {
Run<TyperPhase>(&typer);
RunPrintAndVerify(TyperPhase::phase_name());
+ // Do some hacky things to prepare for the optimization phase.
+ // (caching handles, etc.).
+ Run<ConcurrentOptimizationPrepPhase>();
+
+ if (FLAG_concurrent_compiler_frontend) {
+ data->js_heap_broker()->SerializeStandardObjects();
+ Run<CopyMetadataForConcurrentCompilePhase>();
+ }
+
// Lower JSOperators where we can determine types.
Run<TypedLoweringPhase>();
RunPrintAndVerify(TypedLoweringPhase::phase_name());
}
- // Do some hacky things to prepare for the optimization phase.
- // (caching handles, etc.).
- Run<ConcurrentOptimizationPrepPhase>();
-
data->EndPhaseKind();
return true;
@@ -2254,14 +2273,13 @@ OptimizedCompilationJob* Pipeline::NewWasmCompilationJob(
OptimizedCompilationInfo* info, wasm::WasmEngine* wasm_engine,
MachineGraph* mcgraph, CallDescriptor* call_descriptor,
SourcePositionTable* source_positions, NodeOriginTable* node_origins,
- WasmCompilationData* wasm_compilation_data,
wasm::FunctionBody function_body, wasm::WasmModule* wasm_module,
wasm::NativeModule* native_module, int function_index,
wasm::ModuleOrigin asmjs_origin) {
return new PipelineWasmCompilationJob(
info, wasm_engine, mcgraph, call_descriptor, source_positions,
- node_origins, wasm_compilation_data, function_body, wasm_module,
- native_module, function_index, asmjs_origin);
+ node_origins, function_body, wasm_module, native_module, function_index,
+ asmjs_origin);
}
bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
@@ -2271,8 +2289,8 @@ bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
Code::STUB);
ZoneStats zone_stats(sequence->isolate()->allocator());
PipelineData data(&zone_stats, &info, sequence->isolate(), sequence);
+ data.InitializeFrameData(nullptr);
PipelineImpl pipeline(&data);
- pipeline.data_->InitializeFrameData(nullptr);
pipeline.AllocateRegisters(config, nullptr, run_verifier);
return !data.compilation_failed();
}
@@ -2378,6 +2396,18 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
PoisoningMitigationLevel::kDontPoison) {
AllocateRegisters(RegisterConfiguration::Poisoning(), call_descriptor,
run_verifier);
+#if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
+ } else if (data_->assembler_options().isolate_independent_code) {
+ // TODO(v8:6666): Extend support to user code. Ensure that
+ // it is mutually exclusive with the Poisoning configuration above; and that
+ // it cooperates with restricted allocatable registers above.
+ static_assert(kRootRegister == kSpeculationPoisonRegister,
+ "The following checks assume root equals poison register");
+ CHECK_IMPLIES(FLAG_embedded_builtins, !FLAG_branch_load_poisoning);
+ CHECK_IMPLIES(FLAG_embedded_builtins, !FLAG_untrusted_code_mitigations);
+ AllocateRegisters(RegisterConfiguration::PreserveRootIA32(),
+ call_descriptor, run_verifier);
+#endif // V8_TARGET_ARCH_IA32
} else {
AllocateRegisters(RegisterConfiguration::Default(), call_descriptor,
run_verifier);
@@ -2642,6 +2672,10 @@ OptimizedCompilationInfo* PipelineImpl::info() const { return data_->info(); }
Isolate* PipelineImpl::isolate() const { return data_->isolate(); }
+CodeGenerator* PipelineImpl::code_generator() const {
+ return data_->code_generator();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index 95d13f3169..5e4ae8671b 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -37,7 +37,6 @@ class MachineGraph;
class NodeOriginTable;
class Schedule;
class SourcePositionTable;
-class WasmCompilationData;
class Pipeline : public AllStatic {
public:
@@ -51,7 +50,6 @@ class Pipeline : public AllStatic {
OptimizedCompilationInfo* info, wasm::WasmEngine* wasm_engine,
MachineGraph* mcgraph, CallDescriptor* call_descriptor,
SourcePositionTable* source_positions, NodeOriginTable* node_origins,
- WasmCompilationData* wasm_compilation_data,
wasm::FunctionBody function_body, wasm::WasmModule* wasm_module,
wasm::NativeModule* native_module, int function_index,
wasm::ModuleOrigin wasm_origin);
diff --git a/deps/v8/src/compiler/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
index 9c71d65d9c..45cd95a9e0 100644
--- a/deps/v8/src/compiler/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
@@ -13,6 +13,7 @@
#include "src/double.h"
#include "src/optimized-compilation-info.h"
#include "src/ppc/macro-assembler-ppc.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -881,11 +882,7 @@ void CodeGenerator::BailoutIfDeoptimized() {
void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
Register scratch = kScratchReg;
- Label current_pc;
- __ mov_label_addr(scratch, &current_pc);
-
- __ bind(&current_pc);
- __ subi(scratch, scratch, Operand(__ pc_offset()));
+ __ ComputeCodeStartAddress(scratch);
// Calculate a mask which has all bits set in the normal case, but has all
// bits cleared if we are speculatively executing the wrong PC.
@@ -1834,7 +1831,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ isel(i.OutputRegister(1), r0, i.OutputRegister(1), crbit);
} else {
__ li(i.OutputRegister(1), Operand::Zero());
- __ bc(v8::internal::Assembler::kInstrSize * 2, BT, crbit);
+ __ bc(v8::internal::kInstrSize * 2, BT, crbit);
__ li(i.OutputRegister(1), Operand(1));
}
}
@@ -1861,7 +1858,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ isel(i.OutputRegister(1), r0, i.OutputRegister(1), crbit);
} else {
__ li(i.OutputRegister(1), Operand::Zero());
- __ bc(v8::internal::Assembler::kInstrSize * 2, BT, crbit);
+ __ bc(v8::internal::kInstrSize * 2, BT, crbit);
__ li(i.OutputRegister(1), Operand(1));
}
}
@@ -2057,6 +2054,35 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Xor, xor_)
#undef ATOMIC_BINOP_CASE
+ case kPPC_ByteRev32: {
+ Register input = i.InputRegister(0);
+ Register output = i.OutputRegister();
+ Register temp1 = r0;
+ __ rotlwi(temp1, input, 8);
+ __ rlwimi(temp1, input, 24, 0, 7);
+ __ rlwimi(temp1, input, 24, 16, 23);
+ __ extsw(output, temp1);
+ break;
+ }
+#ifdef V8_TARGET_ARCH_PPC64
+ case kPPC_ByteRev64: {
+ Register input = i.InputRegister(0);
+ Register output = i.OutputRegister();
+ Register temp1 = r0;
+ Register temp2 = kScratchReg;
+ Register temp3 = i.TempRegister(0);
+ __ rldicl(temp1, input, 32, 32);
+ __ rotlwi(temp2, input, 8);
+ __ rlwimi(temp2, input, 24, 0, 7);
+ __ rotlwi(temp3, temp1, 8);
+ __ rlwimi(temp2, input, 24, 16, 23);
+ __ rlwimi(temp3, temp1, 24, 0, 7);
+ __ rlwimi(temp3, temp1, 24, 16, 23);
+ __ rldicr(temp2, temp2, 32, 31);
+ __ orx(output, temp2, temp3);
+ break;
+ }
+#endif // V8_TARGET_ARCH_PPC64
default:
UNREACHABLE();
break;
diff --git a/deps/v8/src/compiler/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
index f68ab3ae68..3f3270028c 100644
--- a/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
+++ b/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
@@ -124,7 +124,9 @@ namespace compiler {
V(PPC_StoreWord32) \
V(PPC_StoreWord64) \
V(PPC_StoreFloat32) \
- V(PPC_StoreDouble)
+ V(PPC_StoreDouble) \
+ V(PPC_ByteRev32) \
+ V(PPC_ByteRev64)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc b/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
index 2b491f1b80..51c92e8e84 100644
--- a/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
@@ -109,6 +109,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_BitcastFloat32ToInt32:
case kPPC_BitcastInt64ToDouble:
case kPPC_BitcastDoubleToInt64:
+ case kPPC_ByteRev32:
+ case kPPC_ByteRev64:
return kNoOpcodeFlags;
case kPPC_LoadWordS8:
diff --git a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
index b02e80d389..6cb98c4f95 100644
--- a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
@@ -937,9 +937,18 @@ void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
#endif
-void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
+ PPCOperandGenerator g(this);
+ InstructionOperand temp[] = {g.TempRegister()};
+ Emit(kPPC_ByteRev64, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), 1, temp);
+}
-void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_ByteRev32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
@@ -1977,7 +1986,7 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
@@ -2012,7 +2021,7 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
Node* old_value = node->InputAt(2);
Node* new_value = node->InputAt(3);
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
@@ -2053,7 +2062,7 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc
index d319304df6..42b28cfa76 100644
--- a/deps/v8/src/compiler/property-access-builder.cc
+++ b/deps/v8/src/compiler/property-access-builder.cc
@@ -192,8 +192,7 @@ Node* PropertyAccessBuilder::TryBuildLoadConstantDataField(
// here, once we have the immutable bit in the access_info.
// TODO(turbofan): Given that we already have the field_index here, we
- // might be smarter in the future and not rely on the LookupIterator,
- // but for now let's just do what Crankshaft does.
+ // might be smarter in the future and not rely on the LookupIterator.
LookupIterator it(isolate(), m.Value(), name,
LookupIterator::OWN_SKIP_INTERCEPTOR);
if (it.state() == LookupIterator::DATA) {
diff --git a/deps/v8/src/compiler/property-access-builder.h b/deps/v8/src/compiler/property-access-builder.h
index 7b569a9a12..6a073be65d 100644
--- a/deps/v8/src/compiler/property-access-builder.h
+++ b/deps/v8/src/compiler/property-access-builder.h
@@ -26,7 +26,7 @@ class SimplifiedOperatorBuilder;
class PropertyAccessBuilder {
public:
- PropertyAccessBuilder(JSGraph* jsgraph, const JSHeapBroker* js_heap_broker,
+ PropertyAccessBuilder(JSGraph* jsgraph, JSHeapBroker* js_heap_broker,
CompilationDependencies* dependencies)
: jsgraph_(jsgraph),
js_heap_broker_(js_heap_broker),
@@ -54,7 +54,7 @@ class PropertyAccessBuilder {
private:
JSGraph* jsgraph() const { return jsgraph_; }
- const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
CompilationDependencies* dependencies() const { return dependencies_; }
Graph* graph() const;
Isolate* isolate() const;
@@ -69,7 +69,7 @@ class PropertyAccessBuilder {
Node* ResolveHolder(PropertyAccessInfo const& access_info, Node* receiver);
JSGraph* jsgraph_;
- const JSHeapBroker* js_heap_broker_;
+ JSHeapBroker* js_heap_broker_;
CompilationDependencies* dependencies_;
};
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index ff8bebc411..304d0e4ff1 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -229,7 +229,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
}
Node* WordNot(Node* a) {
if (machine()->Is32()) {
- return Word32Not(a);
+ return Word32BitwiseNot(a);
} else {
return Word64Not(a);
}
@@ -263,7 +263,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* Word32NotEqual(Node* a, Node* b) {
return Word32BinaryNot(Word32Equal(a, b));
}
- Node* Word32Not(Node* a) { return Word32Xor(a, Int32Constant(-1)); }
+ Node* Word32BitwiseNot(Node* a) { return Word32Xor(a, Int32Constant(-1)); }
Node* Word32BinaryNot(Node* a) { return Word32Equal(a, Int32Constant(0)); }
Node* Word64And(Node* a, Node* b) {
@@ -711,10 +711,10 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
return AddNode(machine()->Float64RoundTiesEven().op(), a);
}
Node* Word32ReverseBytes(Node* a) {
- return AddNode(machine()->Word32ReverseBytes().op(), a);
+ return AddNode(machine()->Word32ReverseBytes(), a);
}
Node* Word64ReverseBytes(Node* a) {
- return AddNode(machine()->Word64ReverseBytes().op(), a);
+ return AddNode(machine()->Word64ReverseBytes(), a);
}
// Float64 bit operations.
diff --git a/deps/v8/src/compiler/s390/code-generator-s390.cc b/deps/v8/src/compiler/s390/code-generator-s390.cc
index 01c80e6954..ab9bd16e81 100644
--- a/deps/v8/src/compiler/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/s390/code-generator-s390.cc
@@ -12,6 +12,7 @@
#include "src/compiler/osr.h"
#include "src/optimized-compilation-info.h"
#include "src/s390/macro-assembler-s390.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
index 340cbb65c1..8174551777 100644
--- a/deps/v8/src/compiler/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
@@ -329,12 +329,12 @@ ArchOpcode SelectLoadOpcode(Node* node) {
/* Float unary op*/ \
V(BitcastFloat32ToInt32) \
/* V(TruncateFloat64ToWord32) */ \
- /* V(RoundFloat64ToInt32) */ \
- /* V(TruncateFloat32ToInt32) */ \
- /* V(TruncateFloat32ToUint32) */ \
- /* V(TruncateFloat64ToUint32) */ \
- /* V(ChangeFloat64ToInt32) */ \
- /* V(ChangeFloat64ToUint32) */ \
+ V(RoundFloat64ToInt32) \
+ V(TruncateFloat32ToInt32) \
+ V(TruncateFloat32ToUint32) \
+ V(TruncateFloat64ToUint32) \
+ V(ChangeFloat64ToInt32) \
+ V(ChangeFloat64ToUint32) \
/* Word32 unary op */ \
V(Word32Clz) \
V(Word32Popcnt) \
@@ -2256,7 +2256,7 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
@@ -2291,7 +2291,7 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
Node* old_value = node->InputAt(2);
Node* new_value = node->InputAt(3);
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
@@ -2339,7 +2339,7 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
@@ -2609,8 +2609,6 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat64RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTiesAway |
MachineOperatorBuilder::kWord32Popcnt |
- MachineOperatorBuilder::kWord32ReverseBytes |
- MachineOperatorBuilder::kWord64ReverseBytes |
MachineOperatorBuilder::kInt32AbsWithOverflow |
MachineOperatorBuilder::kInt64AbsWithOverflow |
MachineOperatorBuilder::kWord64Popcnt;
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index 645f47f706..882e3b9d6e 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -484,16 +484,9 @@ void SimdScalarLowering::LowerBinaryOp(Node* node, SimdType input_rep_type,
}
} else {
for (int i = 0; i < num_lanes / 2; ++i) {
-#if defined(V8_TARGET_BIG_ENDIAN)
- rep_node[i] =
- graph()->NewNode(op, rep_right[i * 2], rep_right[i * 2 + 1]);
- rep_node[i + num_lanes / 2] =
- graph()->NewNode(op, rep_left[i * 2], rep_left[i * 2 + 1]);
-#else
rep_node[i] = graph()->NewNode(op, rep_left[i * 2], rep_left[i * 2 + 1]);
rep_node[i + num_lanes / 2] =
graph()->NewNode(op, rep_right[i * 2], rep_right[i * 2 + 1]);
-#endif
}
}
ReplaceNode(node, rep_node, num_lanes);
@@ -554,21 +547,12 @@ void SimdScalarLowering::LowerBinaryOpForSmallInt(Node* node,
}
} else {
for (int i = 0; i < num_lanes / 2; ++i) {
-#if defined(V8_TARGET_BIG_ENDIAN)
- rep_node[i] = FixUpperBits(
- graph()->NewNode(op, rep_right[i * 2], rep_right[i * 2 + 1]),
- shift_val);
- rep_node[i + num_lanes / 2] = FixUpperBits(
- graph()->NewNode(op, rep_left[i * 2], rep_left[i * 2 + 1]),
- shift_val);
-#else
rep_node[i] = FixUpperBits(
graph()->NewNode(op, rep_left[i * 2], rep_left[i * 2 + 1]),
shift_val);
rep_node[i + num_lanes / 2] = FixUpperBits(
graph()->NewNode(op, rep_right[i * 2], rep_right[i * 2 + 1]),
shift_val);
-#endif
}
}
ReplaceNode(node, rep_node, num_lanes);
@@ -804,17 +788,10 @@ void SimdScalarLowering::LowerPack(Node* node, SimdType input_rep_type,
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
for (int i = 0; i < num_lanes; ++i) {
Node* input = nullptr;
-#if defined(V8_TARGET_BIG_ENDIAN)
- if (i < num_lanes / 2)
- input = rep_right[i];
- else
- input = rep_left[i - num_lanes / 2];
-#else
if (i < num_lanes / 2)
input = rep_left[i];
else
input = rep_right[i - num_lanes / 2];
-#endif
if (is_signed) {
Diamond d_min(graph(), common(), graph()->NewNode(less_op, input, min));
input = d_min.Phi(phi_rep, min, input);
@@ -1366,12 +1343,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
Node** rep_node = zone()->NewArray<Node*>(16);
for (int i = 0; i < 16; i++) {
int lane = shuffle[i];
-#if defined(V8_TARGET_BIG_ENDIAN)
- rep_node[15 - i] =
- lane < 16 ? rep_left[15 - lane] : rep_right[31 - lane];
-#else
rep_node[i] = lane < 16 ? rep_left[lane] : rep_right[lane - 16];
-#endif
}
ReplaceNode(node, rep_node, 16);
break;
@@ -1487,6 +1459,59 @@ void SimdScalarLowering::Float32ToInt32(Node** replacements, Node** result) {
}
}
+template <typename T>
+void SimdScalarLowering::Int32ToSmallerInt(Node** replacements, Node** result) {
+ const int num_ints = sizeof(int32_t) / sizeof(T);
+ const int bit_size = sizeof(T) * 8;
+ const Operator* sign_extend;
+ switch (sizeof(T)) {
+ case 1:
+ sign_extend = machine()->SignExtendWord8ToInt32();
+ break;
+ case 2:
+ sign_extend = machine()->SignExtendWord16ToInt32();
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ for (int i = 0; i < kNumLanes32; i++) {
+ if (replacements[i] != nullptr) {
+ for (int j = 0; j < num_ints; j++) {
+ result[num_ints * i + j] = graph()->NewNode(
+ sign_extend,
+ graph()->NewNode(machine()->Word32Sar(), replacements[i],
+ mcgraph_->Int32Constant(j * bit_size)));
+ }
+ } else {
+ for (int j = 0; j < num_ints; j++) {
+ result[num_ints * i + j] = nullptr;
+ }
+ }
+ }
+}
+
+template <typename T>
+void SimdScalarLowering::SmallerIntToInt32(Node** replacements, Node** result) {
+ const int num_ints = sizeof(int32_t) / sizeof(T);
+ const int bit_size = sizeof(T) * 8;
+ const int bit_mask = (1 << bit_size) - 1;
+
+ for (int i = 0; i < kNumLanes32; ++i) {
+ result[i] = mcgraph_->Int32Constant(0);
+ for (int j = 0; j < num_ints; j++) {
+ if (replacements[num_ints * i + j] != nullptr) {
+ Node* clean_bits = graph()->NewNode(machine()->Word32And(),
+ replacements[num_ints * i + j],
+ mcgraph_->Int32Constant(bit_mask));
+ Node* shift = graph()->NewNode(machine()->Word32Shl(), clean_bits,
+ mcgraph_->Int32Constant(j * bit_size));
+ result[i] = graph()->NewNode(machine()->Word32Or(), result[i], shift);
+ }
+ }
+ }
+}
+
Node** SimdScalarLowering::GetReplacementsWithType(Node* node, SimdType type) {
Node** replacements = GetReplacements(node);
if (ReplacementType(node) == type) {
@@ -1498,7 +1523,9 @@ Node** SimdScalarLowering::GetReplacementsWithType(Node* node, SimdType type) {
if (ReplacementType(node) == SimdType::kFloat32x4) {
Float32ToInt32(replacements, result);
} else if (ReplacementType(node) == SimdType::kInt16x8) {
- UNIMPLEMENTED();
+ SmallerIntToInt32<int16_t>(replacements, result);
+ } else if (ReplacementType(node) == SimdType::kInt8x16) {
+ SmallerIntToInt32<int8_t>(replacements, result);
} else {
UNREACHABLE();
}
@@ -1511,12 +1538,19 @@ Node** SimdScalarLowering::GetReplacementsWithType(Node* node, SimdType type) {
UNREACHABLE();
}
} else if (type == SimdType::kInt16x8) {
- if (ReplacementType(node) == SimdType::kInt32x4 ||
- ReplacementType(node) == SimdType::kFloat32x4) {
+ if (ReplacementType(node) == SimdType::kInt32x4) {
+ Int32ToSmallerInt<int16_t>(replacements, result);
+ } else if (ReplacementType(node) == SimdType::kFloat32x4) {
UNIMPLEMENTED();
} else {
UNREACHABLE();
}
+ } else if (type == SimdType::kInt8x16) {
+ if (ReplacementType(node) == SimdType::kInt32x4) {
+ Int32ToSmallerInt<int8_t>(replacements, result);
+ } else {
+ UNIMPLEMENTED();
+ }
} else {
UNREACHABLE();
}
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.h b/deps/v8/src/compiler/simd-scalar-lowering.h
index 9bb6e79cbe..01ea195bdc 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.h
+++ b/deps/v8/src/compiler/simd-scalar-lowering.h
@@ -68,6 +68,10 @@ class SimdScalarLowering {
int ReplacementCount(Node* node);
void Float32ToInt32(Node** replacements, Node** result);
void Int32ToFloat32(Node** replacements, Node** result);
+ template <typename T>
+ void Int32ToSmallerInt(Node** replacements, Node** result);
+ template <typename T>
+ void SmallerIntToInt32(Node** replacements, Node** result);
Node** GetReplacementsWithType(Node* node, SimdType type);
SimdType ReplacementType(Node* node);
void PreparePhiReplacement(Node* phi);
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 74bb7fcd6b..2d82fc99bc 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -286,7 +286,7 @@ class RepresentationSelector {
bool weakened_ = false;
};
- RepresentationSelector(JSGraph* jsgraph, const JSHeapBroker* js_heap_broker,
+ RepresentationSelector(JSGraph* jsgraph, JSHeapBroker* js_heap_broker,
Zone* zone, RepresentationChanger* changer,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins)
@@ -1563,6 +1563,7 @@ class RepresentationSelector {
return;
}
case IrOpcode::kJSToNumber:
+ case IrOpcode::kJSToNumberConvertBigInt:
case IrOpcode::kJSToNumeric: {
VisitInputs(node);
// TODO(bmeurer): Optimize somewhat based on input type?
@@ -2972,7 +2973,8 @@ class RepresentationSelector {
if (input_type.Is(Type::Number())) {
VisitNoop(node, truncation);
} else {
- CheckFloat64HoleMode mode = CheckFloat64HoleModeOf(node->op());
+ CheckFloat64HoleMode mode =
+ CheckFloat64HoleParametersOf(node->op()).mode();
switch (mode) {
case CheckFloat64HoleMode::kAllowReturnHole:
if (truncation.IsUnused()) return VisitUnused(node);
@@ -3274,8 +3276,7 @@ class RepresentationSelector {
};
SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph,
- const JSHeapBroker* js_heap_broker,
- Zone* zone,
+ JSHeapBroker* js_heap_broker, Zone* zone,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
PoisoningMitigationLevel poisoning_level)
@@ -3297,6 +3298,7 @@ void SimplifiedLowering::LowerAllNodes() {
void SimplifiedLowering::DoJSToNumberOrNumericTruncatesToFloat64(
Node* node, RepresentationSelector* selector) {
DCHECK(node->opcode() == IrOpcode::kJSToNumber ||
+ node->opcode() == IrOpcode::kJSToNumberConvertBigInt ||
node->opcode() == IrOpcode::kJSToNumeric);
Node* value = node->InputAt(0);
Node* context = node->InputAt(1);
@@ -3320,11 +3322,17 @@ void SimplifiedLowering::DoJSToNumberOrNumericTruncatesToFloat64(
Node* efalse0 = effect;
Node* vfalse0;
{
- Operator const* op = node->opcode() == IrOpcode::kJSToNumber
- ? ToNumberOperator()
- : ToNumericOperator();
- Node* code = node->opcode() == IrOpcode::kJSToNumber ? ToNumberCode()
- : ToNumericCode();
+ Operator const* op =
+ node->opcode() == IrOpcode::kJSToNumber
+ ? (node->opcode() == IrOpcode::kJSToNumberConvertBigInt
+ ? ToNumberConvertBigIntOperator()
+ : ToNumberOperator())
+ : ToNumericOperator();
+ Node* code = node->opcode() == IrOpcode::kJSToNumber
+ ? ToNumberCode()
+ : (node->opcode() == IrOpcode::kJSToNumberConvertBigInt
+ ? ToNumberConvertBigIntCode()
+ : ToNumericCode());
vfalse0 = efalse0 = if_false0 = graph()->NewNode(
op, code, value, context, frame_state, efalse0, if_false0);
@@ -3392,6 +3400,7 @@ void SimplifiedLowering::DoJSToNumberOrNumericTruncatesToFloat64(
void SimplifiedLowering::DoJSToNumberOrNumericTruncatesToWord32(
Node* node, RepresentationSelector* selector) {
DCHECK(node->opcode() == IrOpcode::kJSToNumber ||
+ node->opcode() == IrOpcode::kJSToNumberConvertBigInt ||
node->opcode() == IrOpcode::kJSToNumeric);
Node* value = node->InputAt(0);
Node* context = node->InputAt(1);
@@ -3412,11 +3421,17 @@ void SimplifiedLowering::DoJSToNumberOrNumericTruncatesToWord32(
Node* efalse0 = effect;
Node* vfalse0;
{
- Operator const* op = node->opcode() == IrOpcode::kJSToNumber
- ? ToNumberOperator()
- : ToNumericOperator();
- Node* code = node->opcode() == IrOpcode::kJSToNumber ? ToNumberCode()
- : ToNumericCode();
+ Operator const* op =
+ node->opcode() == IrOpcode::kJSToNumber
+ ? (node->opcode() == IrOpcode::kJSToNumberConvertBigInt
+ ? ToNumberConvertBigIntOperator()
+ : ToNumberOperator())
+ : ToNumericOperator();
+ Node* code = node->opcode() == IrOpcode::kJSToNumber
+ ? ToNumberCode()
+ : (node->opcode() == IrOpcode::kJSToNumberConvertBigInt
+ ? ToNumberConvertBigIntCode()
+ : ToNumericCode());
vfalse0 = efalse0 = if_false0 = graph()->NewNode(
op, code, value, context, frame_state, efalse0, if_false0);
@@ -3922,6 +3937,16 @@ Node* SimplifiedLowering::ToNumberCode() {
return to_number_code_.get();
}
+Node* SimplifiedLowering::ToNumberConvertBigIntCode() {
+ if (!to_number_convert_big_int_code_.is_set()) {
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kToNumberConvertBigInt);
+ to_number_convert_big_int_code_.set(
+ jsgraph()->HeapConstant(callable.code()));
+ }
+ return to_number_convert_big_int_code_.get();
+}
+
Node* SimplifiedLowering::ToNumericCode() {
if (!to_numeric_code_.is_set()) {
Callable callable = Builtins::CallableFor(isolate(), Builtins::kToNumeric);
@@ -3942,6 +3967,19 @@ Operator const* SimplifiedLowering::ToNumberOperator() {
return to_number_operator_.get();
}
+Operator const* SimplifiedLowering::ToNumberConvertBigIntOperator() {
+ if (!to_number_convert_big_int_operator_.is_set()) {
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kToNumberConvertBigInt);
+ CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
+ auto call_descriptor =
+ Linkage::GetStubCallDescriptor(graph()->zone(), callable.descriptor(),
+ 0, flags, Operator::kNoProperties);
+ to_number_convert_big_int_operator_.set(common()->Call(call_descriptor));
+ }
+ return to_number_convert_big_int_operator_.get();
+}
+
Operator const* SimplifiedLowering::ToNumericOperator() {
if (!to_numeric_operator_.is_set()) {
Callable callable = Builtins::CallableFor(isolate(), Builtins::kToNumeric);
diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h
index 86ac8c75ab..7b21b07813 100644
--- a/deps/v8/src/compiler/simplified-lowering.h
+++ b/deps/v8/src/compiler/simplified-lowering.h
@@ -23,8 +23,8 @@ class TypeCache;
class V8_EXPORT_PRIVATE SimplifiedLowering final {
public:
- SimplifiedLowering(JSGraph* jsgraph, const JSHeapBroker* js_heap_broker,
- Zone* zone, SourcePositionTable* source_position,
+ SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* js_heap_broker, Zone* zone,
+ SourcePositionTable* source_position,
NodeOriginTable* node_origins,
PoisoningMitigationLevel poisoning_level);
~SimplifiedLowering() {}
@@ -48,12 +48,14 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
private:
JSGraph* const jsgraph_;
- const JSHeapBroker* js_heap_broker_;
+ JSHeapBroker* js_heap_broker_;
Zone* const zone_;
TypeCache const& type_cache_;
SetOncePointer<Node> to_number_code_;
+ SetOncePointer<Node> to_number_convert_big_int_code_;
SetOncePointer<Node> to_numeric_code_;
SetOncePointer<Operator const> to_number_operator_;
+ SetOncePointer<Operator const> to_number_convert_big_int_operator_;
SetOncePointer<Operator const> to_numeric_operator_;
// TODO(danno): SimplifiedLowering shouldn't know anything about the source
@@ -76,8 +78,10 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
Node* Uint32Mod(Node* const node);
Node* ToNumberCode();
+ Node* ToNumberConvertBigIntCode();
Node* ToNumericCode();
Operator const* ToNumberOperator();
+ Operator const* ToNumberConvertBigIntOperator();
Operator const* ToNumericOperator();
friend class RepresentationSelector;
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.cc b/deps/v8/src/compiler/simplified-operator-reducer.cc
index 34be9cb0e4..ce7d18f34a 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.cc
+++ b/deps/v8/src/compiler/simplified-operator-reducer.cc
@@ -33,7 +33,7 @@ Decision DecideObjectIsSmi(Node* const input) {
} // namespace
SimplifiedOperatorReducer::SimplifiedOperatorReducer(
- Editor* editor, JSGraph* jsgraph, const JSHeapBroker* js_heap_broker)
+ Editor* editor, JSGraph* jsgraph, JSHeapBroker* js_heap_broker)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
js_heap_broker_(js_heap_broker) {}
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.h b/deps/v8/src/compiler/simplified-operator-reducer.h
index af827a2788..93104e31b0 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.h
+++ b/deps/v8/src/compiler/simplified-operator-reducer.h
@@ -27,7 +27,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorReducer final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
SimplifiedOperatorReducer(Editor* editor, JSGraph* jsgraph,
- const JSHeapBroker* js_heap_broker);
+ JSHeapBroker* js_heap_broker);
~SimplifiedOperatorReducer() final;
const char* reducer_name() const override {
@@ -56,10 +56,10 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorReducer final
SimplifiedOperatorBuilder* simplified() const;
JSGraph* jsgraph() const { return jsgraph_; }
- const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
JSGraph* const jsgraph_;
- const JSHeapBroker* const js_heap_broker_;
+ JSHeapBroker* const js_heap_broker_;
DISALLOW_COPY_AND_ASSIGN(SimplifiedOperatorReducer);
};
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 32aafa33d4..0c331bce5e 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -166,9 +166,31 @@ std::ostream& operator<<(std::ostream& os, CheckFloat64HoleMode mode) {
UNREACHABLE();
}
-CheckFloat64HoleMode CheckFloat64HoleModeOf(const Operator* op) {
+CheckFloat64HoleParameters const& CheckFloat64HoleParametersOf(
+ Operator const* op) {
DCHECK_EQ(IrOpcode::kCheckFloat64Hole, op->opcode());
- return OpParameter<CheckFloat64HoleMode>(op);
+ return OpParameter<CheckFloat64HoleParameters>(op);
+}
+
+std::ostream& operator<<(std::ostream& os,
+ CheckFloat64HoleParameters const& params) {
+ os << params.mode();
+ if (params.feedback().IsValid()) os << "; " << params.feedback();
+ return os;
+}
+
+size_t hash_value(const CheckFloat64HoleParameters& params) {
+ return base::hash_combine(params.mode(), params.feedback());
+}
+
+bool operator==(CheckFloat64HoleParameters const& lhs,
+ CheckFloat64HoleParameters const& rhs) {
+ return lhs.mode() == rhs.mode() && lhs.feedback() == rhs.feedback();
+}
+
+bool operator!=(CheckFloat64HoleParameters const& lhs,
+ CheckFloat64HoleParameters const& rhs) {
+ return !(lhs == rhs);
}
CheckForMinusZeroMode CheckMinusZeroModeOf(const Operator* op) {
@@ -1005,12 +1027,13 @@ struct SimplifiedOperatorGlobalCache final {
template <CheckFloat64HoleMode kMode>
struct CheckFloat64HoleNaNOperator final
- : public Operator1<CheckFloat64HoleMode> {
+ : public Operator1<CheckFloat64HoleParameters> {
CheckFloat64HoleNaNOperator()
- : Operator1<CheckFloat64HoleMode>(
+ : Operator1<CheckFloat64HoleParameters>(
IrOpcode::kCheckFloat64Hole,
Operator::kFoldable | Operator::kNoThrow, "CheckFloat64Hole", 1,
- 1, 1, 1, 1, 0, kMode) {}
+ 1, 1, 1, 1, 0,
+ CheckFloat64HoleParameters(kMode, VectorSlotPair())) {}
};
CheckFloat64HoleNaNOperator<CheckFloat64HoleMode::kAllowReturnHole>
kCheckFloat64HoleAllowReturnHoleOperator;
@@ -1289,14 +1312,20 @@ const Operator* SimplifiedOperatorBuilder::ConvertReceiver(
}
const Operator* SimplifiedOperatorBuilder::CheckFloat64Hole(
- CheckFloat64HoleMode mode) {
- switch (mode) {
- case CheckFloat64HoleMode::kAllowReturnHole:
- return &cache_.kCheckFloat64HoleAllowReturnHoleOperator;
- case CheckFloat64HoleMode::kNeverReturnHole:
- return &cache_.kCheckFloat64HoleNeverReturnHoleOperator;
+ CheckFloat64HoleMode mode, VectorSlotPair const& feedback) {
+ if (!feedback.IsValid()) {
+ switch (mode) {
+ case CheckFloat64HoleMode::kAllowReturnHole:
+ return &cache_.kCheckFloat64HoleAllowReturnHoleOperator;
+ case CheckFloat64HoleMode::kNeverReturnHole:
+ return &cache_.kCheckFloat64HoleNeverReturnHoleOperator;
+ }
+ UNREACHABLE();
}
- UNREACHABLE();
+ return new (zone()) Operator1<CheckFloat64HoleParameters>(
+ IrOpcode::kCheckFloat64Hole, Operator::kFoldable | Operator::kNoThrow,
+ "CheckFloat64Hole", 1, 1, 1, 1, 1, 0,
+ CheckFloat64HoleParameters(mode, feedback));
}
const Operator* SimplifiedOperatorBuilder::SpeculativeToNumber(
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 1708b0e06e..df44e899cd 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -14,6 +14,7 @@
#include "src/globals.h"
#include "src/handles.h"
#include "src/machine-type.h"
+#include "src/maybe-handles.h"
#include "src/objects.h"
#include "src/type-hints.h"
#include "src/vector-slot-pair.h"
@@ -23,7 +24,7 @@ namespace v8 {
namespace internal {
// Forward declarations.
-enum class AbortReason;
+enum class AbortReason : uint8_t;
class Zone;
namespace compiler {
@@ -194,9 +195,32 @@ size_t hash_value(CheckFloat64HoleMode);
std::ostream& operator<<(std::ostream&, CheckFloat64HoleMode);
-CheckFloat64HoleMode CheckFloat64HoleModeOf(const Operator*)
+class CheckFloat64HoleParameters {
+ public:
+ CheckFloat64HoleParameters(CheckFloat64HoleMode mode,
+ VectorSlotPair const& feedback)
+ : mode_(mode), feedback_(feedback) {}
+
+ CheckFloat64HoleMode mode() const { return mode_; }
+ VectorSlotPair const& feedback() const { return feedback_; }
+
+ private:
+ CheckFloat64HoleMode mode_;
+ VectorSlotPair feedback_;
+};
+
+CheckFloat64HoleParameters const& CheckFloat64HoleParametersOf(Operator const*)
V8_WARN_UNUSED_RESULT;
+std::ostream& operator<<(std::ostream&, CheckFloat64HoleParameters const&);
+
+size_t hash_value(CheckFloat64HoleParameters const&);
+
+bool operator==(CheckFloat64HoleParameters const&,
+ CheckFloat64HoleParameters const&);
+bool operator!=(CheckFloat64HoleParameters const&,
+ CheckFloat64HoleParameters const&);
+
enum class CheckTaggedInputMode : uint8_t {
kNumber,
kNumberOrOddball,
@@ -640,7 +664,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* CheckBounds(const VectorSlotPair& feedback);
const Operator* CheckEqualsInternalizedString();
const Operator* CheckEqualsSymbol();
- const Operator* CheckFloat64Hole(CheckFloat64HoleMode);
+ const Operator* CheckFloat64Hole(CheckFloat64HoleMode, VectorSlotPair const&);
const Operator* CheckHeapObject();
const Operator* CheckIf(DeoptimizeReason deoptimize_reason,
const VectorSlotPair& feedback = VectorSlotPair());
diff --git a/deps/v8/src/compiler/type-narrowing-reducer.cc b/deps/v8/src/compiler/type-narrowing-reducer.cc
index 1b8b5b4657..01afdcb911 100644
--- a/deps/v8/src/compiler/type-narrowing-reducer.cc
+++ b/deps/v8/src/compiler/type-narrowing-reducer.cc
@@ -12,7 +12,7 @@ namespace internal {
namespace compiler {
TypeNarrowingReducer::TypeNarrowingReducer(Editor* editor, JSGraph* jsgraph,
- const JSHeapBroker* js_heap_broker)
+ JSHeapBroker* js_heap_broker)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
op_typer_(jsgraph->isolate(), js_heap_broker, zone()) {}
diff --git a/deps/v8/src/compiler/type-narrowing-reducer.h b/deps/v8/src/compiler/type-narrowing-reducer.h
index 77cb07e772..62237ccce3 100644
--- a/deps/v8/src/compiler/type-narrowing-reducer.h
+++ b/deps/v8/src/compiler/type-narrowing-reducer.h
@@ -20,7 +20,7 @@ class V8_EXPORT_PRIVATE TypeNarrowingReducer final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
TypeNarrowingReducer(Editor* editor, JSGraph* jsgraph,
- const JSHeapBroker* js_heap_broker);
+ JSHeapBroker* js_heap_broker);
~TypeNarrowingReducer() final;
const char* reducer_name() const override { return "TypeNarrowingReducer"; }
diff --git a/deps/v8/src/compiler/typed-optimization.cc b/deps/v8/src/compiler/typed-optimization.cc
index 0c001117de..b77fc97859 100644
--- a/deps/v8/src/compiler/typed-optimization.cc
+++ b/deps/v8/src/compiler/typed-optimization.cc
@@ -21,7 +21,7 @@ namespace compiler {
TypedOptimization::TypedOptimization(Editor* editor,
CompilationDependencies* dependencies,
JSGraph* jsgraph,
- const JSHeapBroker* js_heap_broker)
+ JSHeapBroker* js_heap_broker)
: AdvancedReducer(editor),
dependencies_(dependencies),
jsgraph_(jsgraph),
@@ -89,8 +89,8 @@ Reduction TypedOptimization::Reduce(Node* node) {
namespace {
-base::Optional<MapRef> GetStableMapFromObjectType(
- const JSHeapBroker* js_heap_broker, Type object_type) {
+base::Optional<MapRef> GetStableMapFromObjectType(JSHeapBroker* js_heap_broker,
+ Type object_type) {
if (object_type.IsHeapConstant()) {
HeapObjectRef object = object_type.AsHeapConstant()->Ref();
MapRef object_map = object.map();
diff --git a/deps/v8/src/compiler/typed-optimization.h b/deps/v8/src/compiler/typed-optimization.h
index 3c4b6ed9cd..baee65dd4e 100644
--- a/deps/v8/src/compiler/typed-optimization.h
+++ b/deps/v8/src/compiler/typed-optimization.h
@@ -28,7 +28,7 @@ class V8_EXPORT_PRIVATE TypedOptimization final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
TypedOptimization(Editor* editor, CompilationDependencies* dependencies,
- JSGraph* jsgraph, const JSHeapBroker* js_heap_broker);
+ JSGraph* jsgraph, JSHeapBroker* js_heap_broker);
~TypedOptimization();
const char* reducer_name() const override { return "TypedOptimization"; }
@@ -71,11 +71,11 @@ class V8_EXPORT_PRIVATE TypedOptimization final
CompilationDependencies* dependencies() const { return dependencies_; }
JSGraph* jsgraph() const { return jsgraph_; }
- const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
CompilationDependencies* const dependencies_;
JSGraph* const jsgraph_;
- const JSHeapBroker* js_heap_broker_;
+ JSHeapBroker* js_heap_broker_;
Type const true_type_;
Type const false_type_;
TypeCache const& type_cache_;
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 575d4aa893..7627d27b08 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -33,7 +33,7 @@ class Typer::Decorator final : public GraphDecorator {
Typer* const typer_;
};
-Typer::Typer(Isolate* isolate, const JSHeapBroker* js_heap_broker, Flags flags,
+Typer::Typer(Isolate* isolate, JSHeapBroker* js_heap_broker, Flags flags,
Graph* graph)
: flags_(flags),
graph_(graph),
@@ -64,7 +64,6 @@ class Typer::Visitor : public Reducer {
const char* reducer_name() const override { return "Typer"; }
Reduction Reduce(Node* node) override {
- DisallowHeapAccess no_heap_access;
if (node->op()->ValueOutputCount() == 0) return NoChange();
switch (node->opcode()) {
#define DECLARE_CASE(x) \
@@ -266,6 +265,7 @@ class Typer::Visitor : public Reducer {
static Type ToLength(Type, Typer*);
static Type ToName(Type, Typer*);
static Type ToNumber(Type, Typer*);
+ static Type ToNumberConvertBigInt(Type, Typer*);
static Type ToNumeric(Type, Typer*);
static Type ToObject(Type, Typer*);
static Type ToString(Type, Typer*);
@@ -530,6 +530,11 @@ Type Typer::Visitor::ToNumber(Type type, Typer* t) {
}
// static
+Type Typer::Visitor::ToNumberConvertBigInt(Type type, Typer* t) {
+ return t->operation_typer_.ToNumberConvertBigInt(type);
+}
+
+// static
Type Typer::Visitor::ToNumeric(Type type, Typer* t) {
return t->operation_typer_.ToNumeric(type);
}
@@ -1125,6 +1130,10 @@ Type Typer::Visitor::TypeJSToNumber(Node* node) {
return TypeUnaryOp(node, ToNumber);
}
+Type Typer::Visitor::TypeJSToNumberConvertBigInt(Node* node) {
+ return TypeUnaryOp(node, ToNumberConvertBigInt);
+}
+
Type Typer::Visitor::TypeJSToNumeric(Node* node) {
return TypeUnaryOp(node, ToNumeric);
}
@@ -1214,6 +1223,10 @@ Type Typer::Visitor::TypeJSCreateEmptyLiteralObject(Node* node) {
return Type::OtherObject();
}
+Type Typer::Visitor::TypeJSCloneObject(Node* node) {
+ return Type::OtherObject();
+}
+
Type Typer::Visitor::TypeJSCreateLiteralRegExp(Node* node) {
return Type::OtherObject();
}
@@ -1406,270 +1419,270 @@ Type Typer::Visitor::JSCallTyper(Type fun, Typer* t) {
return Type::NonInternal();
}
JSFunctionRef function = fun.AsHeapConstant()->Ref().AsJSFunction();
- if (!function.HasBuiltinFunctionId()) {
+ if (!function.shared().HasBuiltinFunctionId()) {
return Type::NonInternal();
}
- switch (function.GetBuiltinFunctionId()) {
- case kMathRandom:
+ switch (function.shared().builtin_function_id()) {
+ case BuiltinFunctionId::kMathRandom:
return Type::PlainNumber();
- case kMathFloor:
- case kMathCeil:
- case kMathRound:
- case kMathTrunc:
+ case BuiltinFunctionId::kMathFloor:
+ case BuiltinFunctionId::kMathCeil:
+ case BuiltinFunctionId::kMathRound:
+ case BuiltinFunctionId::kMathTrunc:
return t->cache_.kIntegerOrMinusZeroOrNaN;
// Unary math functions.
- case kMathAbs:
- case kMathExp:
- case kMathExpm1:
+ case BuiltinFunctionId::kMathAbs:
+ case BuiltinFunctionId::kMathExp:
+ case BuiltinFunctionId::kMathExpm1:
return Type::Union(Type::PlainNumber(), Type::NaN(), t->zone());
- case kMathAcos:
- case kMathAcosh:
- case kMathAsin:
- case kMathAsinh:
- case kMathAtan:
- case kMathAtanh:
- case kMathCbrt:
- case kMathCos:
- case kMathFround:
- case kMathLog:
- case kMathLog1p:
- case kMathLog10:
- case kMathLog2:
- case kMathSin:
- case kMathSqrt:
- case kMathTan:
+ case BuiltinFunctionId::kMathAcos:
+ case BuiltinFunctionId::kMathAcosh:
+ case BuiltinFunctionId::kMathAsin:
+ case BuiltinFunctionId::kMathAsinh:
+ case BuiltinFunctionId::kMathAtan:
+ case BuiltinFunctionId::kMathAtanh:
+ case BuiltinFunctionId::kMathCbrt:
+ case BuiltinFunctionId::kMathCos:
+ case BuiltinFunctionId::kMathFround:
+ case BuiltinFunctionId::kMathLog:
+ case BuiltinFunctionId::kMathLog1p:
+ case BuiltinFunctionId::kMathLog10:
+ case BuiltinFunctionId::kMathLog2:
+ case BuiltinFunctionId::kMathSin:
+ case BuiltinFunctionId::kMathSqrt:
+ case BuiltinFunctionId::kMathTan:
return Type::Number();
- case kMathSign:
+ case BuiltinFunctionId::kMathSign:
return t->cache_.kMinusOneToOneOrMinusZeroOrNaN;
// Binary math functions.
- case kMathAtan2:
- case kMathPow:
- case kMathMax:
- case kMathMin:
+ case BuiltinFunctionId::kMathAtan2:
+ case BuiltinFunctionId::kMathPow:
+ case BuiltinFunctionId::kMathMax:
+ case BuiltinFunctionId::kMathMin:
return Type::Number();
- case kMathImul:
+ case BuiltinFunctionId::kMathImul:
return Type::Signed32();
- case kMathClz32:
+ case BuiltinFunctionId::kMathClz32:
return t->cache_.kZeroToThirtyTwo;
// Date functions.
- case kDateNow:
+ case BuiltinFunctionId::kDateNow:
return t->cache_.kTimeValueType;
- case kDateGetDate:
+ case BuiltinFunctionId::kDateGetDate:
return t->cache_.kJSDateDayType;
- case kDateGetDay:
+ case BuiltinFunctionId::kDateGetDay:
return t->cache_.kJSDateWeekdayType;
- case kDateGetFullYear:
+ case BuiltinFunctionId::kDateGetFullYear:
return t->cache_.kJSDateYearType;
- case kDateGetHours:
+ case BuiltinFunctionId::kDateGetHours:
return t->cache_.kJSDateHourType;
- case kDateGetMilliseconds:
+ case BuiltinFunctionId::kDateGetMilliseconds:
return Type::Union(Type::Range(0.0, 999.0, t->zone()), Type::NaN(),
t->zone());
- case kDateGetMinutes:
+ case BuiltinFunctionId::kDateGetMinutes:
return t->cache_.kJSDateMinuteType;
- case kDateGetMonth:
+ case BuiltinFunctionId::kDateGetMonth:
return t->cache_.kJSDateMonthType;
- case kDateGetSeconds:
+ case BuiltinFunctionId::kDateGetSeconds:
return t->cache_.kJSDateSecondType;
- case kDateGetTime:
+ case BuiltinFunctionId::kDateGetTime:
return t->cache_.kJSDateValueType;
// Symbol functions.
- case kSymbolConstructor:
+ case BuiltinFunctionId::kSymbolConstructor:
return Type::Symbol();
// BigInt functions.
- case kBigIntConstructor:
+ case BuiltinFunctionId::kBigIntConstructor:
return Type::BigInt();
// Number functions.
- case kNumberConstructor:
+ case BuiltinFunctionId::kNumberConstructor:
return Type::Number();
- case kNumberIsFinite:
- case kNumberIsInteger:
- case kNumberIsNaN:
- case kNumberIsSafeInteger:
+ case BuiltinFunctionId::kNumberIsFinite:
+ case BuiltinFunctionId::kNumberIsInteger:
+ case BuiltinFunctionId::kNumberIsNaN:
+ case BuiltinFunctionId::kNumberIsSafeInteger:
return Type::Boolean();
- case kNumberParseFloat:
+ case BuiltinFunctionId::kNumberParseFloat:
return Type::Number();
- case kNumberParseInt:
+ case BuiltinFunctionId::kNumberParseInt:
return t->cache_.kIntegerOrMinusZeroOrNaN;
- case kNumberToString:
+ case BuiltinFunctionId::kNumberToString:
return Type::String();
// String functions.
- case kStringConstructor:
+ case BuiltinFunctionId::kStringConstructor:
return Type::String();
- case kStringCharCodeAt:
+ case BuiltinFunctionId::kStringCharCodeAt:
return Type::Union(Type::Range(0, kMaxUInt16, t->zone()), Type::NaN(),
t->zone());
- case kStringCharAt:
+ case BuiltinFunctionId::kStringCharAt:
return Type::String();
- case kStringCodePointAt:
+ case BuiltinFunctionId::kStringCodePointAt:
return Type::Union(Type::Range(0.0, String::kMaxCodePoint, t->zone()),
Type::Undefined(), t->zone());
- case kStringConcat:
- case kStringFromCharCode:
- case kStringFromCodePoint:
+ case BuiltinFunctionId::kStringConcat:
+ case BuiltinFunctionId::kStringFromCharCode:
+ case BuiltinFunctionId::kStringFromCodePoint:
return Type::String();
- case kStringIndexOf:
- case kStringLastIndexOf:
+ case BuiltinFunctionId::kStringIndexOf:
+ case BuiltinFunctionId::kStringLastIndexOf:
return Type::Range(-1.0, String::kMaxLength, t->zone());
- case kStringEndsWith:
- case kStringIncludes:
+ case BuiltinFunctionId::kStringEndsWith:
+ case BuiltinFunctionId::kStringIncludes:
return Type::Boolean();
- case kStringRaw:
- case kStringRepeat:
- case kStringSlice:
+ case BuiltinFunctionId::kStringRaw:
+ case BuiltinFunctionId::kStringRepeat:
+ case BuiltinFunctionId::kStringSlice:
return Type::String();
- case kStringStartsWith:
+ case BuiltinFunctionId::kStringStartsWith:
return Type::Boolean();
- case kStringSubstr:
- case kStringSubstring:
- case kStringToLowerCase:
- case kStringToString:
- case kStringToUpperCase:
- case kStringTrim:
- case kStringTrimEnd:
- case kStringTrimStart:
- case kStringValueOf:
+ case BuiltinFunctionId::kStringSubstr:
+ case BuiltinFunctionId::kStringSubstring:
+ case BuiltinFunctionId::kStringToLowerCase:
+ case BuiltinFunctionId::kStringToString:
+ case BuiltinFunctionId::kStringToUpperCase:
+ case BuiltinFunctionId::kStringTrim:
+ case BuiltinFunctionId::kStringTrimEnd:
+ case BuiltinFunctionId::kStringTrimStart:
+ case BuiltinFunctionId::kStringValueOf:
return Type::String();
- case kStringIterator:
- case kStringIteratorNext:
+ case BuiltinFunctionId::kStringIterator:
+ case BuiltinFunctionId::kStringIteratorNext:
return Type::OtherObject();
- case kArrayEntries:
- case kArrayKeys:
- case kArrayValues:
- case kTypedArrayEntries:
- case kTypedArrayKeys:
- case kTypedArrayValues:
- case kArrayIteratorNext:
- case kMapIteratorNext:
- case kSetIteratorNext:
+ case BuiltinFunctionId::kArrayEntries:
+ case BuiltinFunctionId::kArrayKeys:
+ case BuiltinFunctionId::kArrayValues:
+ case BuiltinFunctionId::kTypedArrayEntries:
+ case BuiltinFunctionId::kTypedArrayKeys:
+ case BuiltinFunctionId::kTypedArrayValues:
+ case BuiltinFunctionId::kArrayIteratorNext:
+ case BuiltinFunctionId::kMapIteratorNext:
+ case BuiltinFunctionId::kSetIteratorNext:
return Type::OtherObject();
- case kTypedArrayToStringTag:
+ case BuiltinFunctionId::kTypedArrayToStringTag:
return Type::Union(Type::InternalizedString(), Type::Undefined(),
t->zone());
// Array functions.
- case kArrayIsArray:
+ case BuiltinFunctionId::kArrayIsArray:
return Type::Boolean();
- case kArrayConcat:
+ case BuiltinFunctionId::kArrayConcat:
return Type::Receiver();
- case kArrayEvery:
+ case BuiltinFunctionId::kArrayEvery:
return Type::Boolean();
- case kArrayFill:
- case kArrayFilter:
+ case BuiltinFunctionId::kArrayFill:
+ case BuiltinFunctionId::kArrayFilter:
return Type::Receiver();
- case kArrayFindIndex:
+ case BuiltinFunctionId::kArrayFindIndex:
return Type::Range(-1, kMaxSafeInteger, t->zone());
- case kArrayForEach:
+ case BuiltinFunctionId::kArrayForEach:
return Type::Undefined();
- case kArrayIncludes:
+ case BuiltinFunctionId::kArrayIncludes:
return Type::Boolean();
- case kArrayIndexOf:
+ case BuiltinFunctionId::kArrayIndexOf:
return Type::Range(-1, kMaxSafeInteger, t->zone());
- case kArrayJoin:
+ case BuiltinFunctionId::kArrayJoin:
return Type::String();
- case kArrayLastIndexOf:
+ case BuiltinFunctionId::kArrayLastIndexOf:
return Type::Range(-1, kMaxSafeInteger, t->zone());
- case kArrayMap:
+ case BuiltinFunctionId::kArrayMap:
return Type::Receiver();
- case kArrayPush:
+ case BuiltinFunctionId::kArrayPush:
return t->cache_.kPositiveSafeInteger;
- case kArrayReverse:
- case kArraySlice:
+ case BuiltinFunctionId::kArrayReverse:
+ case BuiltinFunctionId::kArraySlice:
return Type::Receiver();
- case kArraySome:
+ case BuiltinFunctionId::kArraySome:
return Type::Boolean();
- case kArraySplice:
+ case BuiltinFunctionId::kArraySplice:
return Type::Receiver();
- case kArrayUnshift:
+ case BuiltinFunctionId::kArrayUnshift:
return t->cache_.kPositiveSafeInteger;
// ArrayBuffer functions.
- case kArrayBufferIsView:
+ case BuiltinFunctionId::kArrayBufferIsView:
return Type::Boolean();
// Object functions.
- case kObjectAssign:
+ case BuiltinFunctionId::kObjectAssign:
return Type::Receiver();
- case kObjectCreate:
+ case BuiltinFunctionId::kObjectCreate:
return Type::OtherObject();
- case kObjectIs:
- case kObjectHasOwnProperty:
- case kObjectIsPrototypeOf:
+ case BuiltinFunctionId::kObjectIs:
+ case BuiltinFunctionId::kObjectHasOwnProperty:
+ case BuiltinFunctionId::kObjectIsPrototypeOf:
return Type::Boolean();
- case kObjectToString:
+ case BuiltinFunctionId::kObjectToString:
return Type::String();
// RegExp functions.
- case kRegExpCompile:
+ case BuiltinFunctionId::kRegExpCompile:
return Type::OtherObject();
- case kRegExpExec:
+ case BuiltinFunctionId::kRegExpExec:
return Type::Union(Type::Array(), Type::Null(), t->zone());
- case kRegExpTest:
+ case BuiltinFunctionId::kRegExpTest:
return Type::Boolean();
- case kRegExpToString:
+ case BuiltinFunctionId::kRegExpToString:
return Type::String();
// Function functions.
- case kFunctionBind:
+ case BuiltinFunctionId::kFunctionBind:
return Type::BoundFunction();
- case kFunctionHasInstance:
+ case BuiltinFunctionId::kFunctionHasInstance:
return Type::Boolean();
// Global functions.
- case kGlobalDecodeURI:
- case kGlobalDecodeURIComponent:
- case kGlobalEncodeURI:
- case kGlobalEncodeURIComponent:
- case kGlobalEscape:
- case kGlobalUnescape:
+ case BuiltinFunctionId::kGlobalDecodeURI:
+ case BuiltinFunctionId::kGlobalDecodeURIComponent:
+ case BuiltinFunctionId::kGlobalEncodeURI:
+ case BuiltinFunctionId::kGlobalEncodeURIComponent:
+ case BuiltinFunctionId::kGlobalEscape:
+ case BuiltinFunctionId::kGlobalUnescape:
return Type::String();
- case kGlobalIsFinite:
- case kGlobalIsNaN:
+ case BuiltinFunctionId::kGlobalIsFinite:
+ case BuiltinFunctionId::kGlobalIsNaN:
return Type::Boolean();
// Map functions.
- case kMapClear:
- case kMapForEach:
+ case BuiltinFunctionId::kMapClear:
+ case BuiltinFunctionId::kMapForEach:
return Type::Undefined();
- case kMapDelete:
- case kMapHas:
+ case BuiltinFunctionId::kMapDelete:
+ case BuiltinFunctionId::kMapHas:
return Type::Boolean();
- case kMapEntries:
- case kMapKeys:
- case kMapSet:
- case kMapValues:
+ case BuiltinFunctionId::kMapEntries:
+ case BuiltinFunctionId::kMapKeys:
+ case BuiltinFunctionId::kMapSet:
+ case BuiltinFunctionId::kMapValues:
return Type::OtherObject();
// Set functions.
- case kSetAdd:
- case kSetEntries:
- case kSetValues:
+ case BuiltinFunctionId::kSetAdd:
+ case BuiltinFunctionId::kSetEntries:
+ case BuiltinFunctionId::kSetValues:
return Type::OtherObject();
- case kSetClear:
- case kSetForEach:
+ case BuiltinFunctionId::kSetClear:
+ case BuiltinFunctionId::kSetForEach:
return Type::Undefined();
- case kSetDelete:
- case kSetHas:
+ case BuiltinFunctionId::kSetDelete:
+ case BuiltinFunctionId::kSetHas:
return Type::Boolean();
// WeakMap functions.
- case kWeakMapDelete:
- case kWeakMapHas:
+ case BuiltinFunctionId::kWeakMapDelete:
+ case BuiltinFunctionId::kWeakMapHas:
return Type::Boolean();
- case kWeakMapSet:
+ case BuiltinFunctionId::kWeakMapSet:
return Type::OtherObject();
// WeakSet functions.
- case kWeakSetAdd:
+ case BuiltinFunctionId::kWeakSetAdd:
return Type::OtherObject();
- case kWeakSetDelete:
- case kWeakSetHas:
+ case BuiltinFunctionId::kWeakSetDelete:
+ case BuiltinFunctionId::kWeakSetHas:
return Type::Boolean();
default:
return Type::NonInternal();
@@ -2034,8 +2047,8 @@ Type Typer::Visitor::TypeLoadElement(Node* node) {
Type Typer::Visitor::TypeLoadTypedElement(Node* node) {
switch (ExternalArrayTypeOf(node->op())) {
-#define TYPED_ARRAY_CASE(ElemType, type, TYPE, ctype, size) \
- case kExternal##ElemType##Array: \
+#define TYPED_ARRAY_CASE(ElemType, type, TYPE, ctype) \
+ case kExternal##ElemType##Array: \
return typer_->cache_.k##ElemType;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
@@ -2045,8 +2058,8 @@ Type Typer::Visitor::TypeLoadTypedElement(Node* node) {
Type Typer::Visitor::TypeLoadDataViewElement(Node* node) {
switch (ExternalArrayTypeOf(node->op())) {
-#define TYPED_ARRAY_CASE(ElemType, type, TYPE, ctype, size) \
- case kExternal##ElemType##Array: \
+#define TYPED_ARRAY_CASE(ElemType, type, TYPE, ctype) \
+ case kExternal##ElemType##Array: \
return typer_->cache_.k##ElemType;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
diff --git a/deps/v8/src/compiler/typer.h b/deps/v8/src/compiler/typer.h
index 1720bc776f..741ca481c2 100644
--- a/deps/v8/src/compiler/typer.h
+++ b/deps/v8/src/compiler/typer.h
@@ -25,7 +25,7 @@ class V8_EXPORT_PRIVATE Typer {
};
typedef base::Flags<Flag> Flags;
- Typer(Isolate* isolate, const JSHeapBroker* js_heap_broker, Flags flags,
+ Typer(Isolate* isolate, JSHeapBroker* js_heap_broker, Flags flags,
Graph* graph);
~Typer();
@@ -42,13 +42,13 @@ class V8_EXPORT_PRIVATE Typer {
Graph* graph() const { return graph_; }
Zone* zone() const { return graph()->zone(); }
OperationTyper* operation_typer() { return &operation_typer_; }
- const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
Flags const flags_;
Graph* const graph_;
Decorator* decorator_;
TypeCache const& cache_;
- const JSHeapBroker* js_heap_broker_;
+ JSHeapBroker* js_heap_broker_;
OperationTyper operation_typer_;
Type singleton_false_;
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index 8a5871fdb0..968d788fcc 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -207,7 +207,10 @@ Type::bitset BitsetType::Lub(HeapObjectType const& type) {
case JS_MESSAGE_OBJECT_TYPE:
case JS_DATE_TYPE:
#ifdef V8_INTL_SUPPORT
+ case JS_INTL_COLLATOR_TYPE:
+ case JS_INTL_LIST_FORMAT_TYPE:
case JS_INTL_LOCALE_TYPE:
+ case JS_INTL_PLURAL_RULES_TYPE:
case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
#endif // V8_INTL_SUPPORT
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
@@ -273,6 +276,7 @@ Type::bitset BitsetType::Lub(HeapObjectType const& type) {
case BYTE_ARRAY_TYPE:
case BYTECODE_ARRAY_TYPE:
case OBJECT_BOILERPLATE_DESCRIPTION_TYPE:
+ case ARRAY_BOILERPLATE_DESCRIPTION_TYPE:
case DESCRIPTOR_ARRAY_TYPE:
case TRANSITION_ARRAY_TYPE:
case FEEDBACK_CELL_TYPE:
@@ -305,7 +309,7 @@ Type::bitset BitsetType::Lub(HeapObjectType const& type) {
// require bit set types, they should get kOtherInternal.
case MUTABLE_HEAP_NUMBER_TYPE:
case FREE_SPACE_TYPE:
-#define FIXED_TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+#define FIXED_TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
case FIXED_##TYPE##_ARRAY_TYPE:
TYPED_ARRAYS(FIXED_TYPED_ARRAY_CASE)
@@ -321,14 +325,12 @@ Type::bitset BitsetType::Lub(HeapObjectType const& type) {
case PROMISE_REACTION_TYPE:
case DEBUG_INFO_TYPE:
case STACK_FRAME_INFO_TYPE:
- case WEAK_CELL_TYPE:
case SMALL_ORDERED_HASH_MAP_TYPE:
case SMALL_ORDERED_HASH_SET_TYPE:
case PROTOTYPE_INFO_TYPE:
case INTERPRETER_DATA_TYPE:
case TUPLE2_TYPE:
case TUPLE3_TYPE:
- case ARRAY_BOILERPLATE_DESCRIPTION_TYPE:
case WASM_DEBUG_INFO_TYPE:
case WASM_EXPORTED_FUNCTION_DATA_TYPE:
case LOAD_HANDLER_TYPE:
@@ -820,22 +822,19 @@ Type Type::NewConstant(double value, Zone* zone) {
return OtherNumberConstant(value, zone);
}
-Type Type::NewConstant(const JSHeapBroker* js_heap_broker,
- Handle<i::Object> value, Zone* zone) {
- auto maybe_smi = JSHeapBroker::TryGetSmi(value);
- if (maybe_smi.has_value()) {
- return NewConstant(static_cast<double>(maybe_smi.value()), zone);
+Type Type::NewConstant(JSHeapBroker* js_heap_broker, Handle<i::Object> value,
+ Zone* zone) {
+ ObjectRef ref(js_heap_broker, value);
+ if (ref.IsSmi()) {
+ return NewConstant(static_cast<double>(ref.AsSmi()), zone);
}
-
- HeapObjectRef heap_ref(js_heap_broker, value);
- if (heap_ref.IsHeapNumber()) {
- return NewConstant(heap_ref.AsHeapNumber().value(), zone);
+ if (ref.IsHeapNumber()) {
+ return NewConstant(ref.AsHeapNumber().value(), zone);
}
-
- if (heap_ref.IsString() && !heap_ref.IsInternalizedString()) {
+ if (ref.IsString() && !ref.IsInternalizedString()) {
return Type::String();
}
- return HeapConstant(js_heap_broker, value, zone);
+ return HeapConstant(ref.AsHeapObject(), zone);
}
Type Type::Union(Type type1, Type type2, Zone* zone) {
@@ -1061,13 +1060,18 @@ Type Type::OtherNumberConstant(double value, Zone* zone) {
}
// static
-Type Type::HeapConstant(const JSHeapBroker* js_heap_broker,
- Handle<i::Object> value, Zone* zone) {
+Type Type::HeapConstant(JSHeapBroker* js_heap_broker, Handle<i::Object> value,
+ Zone* zone) {
return FromTypeBase(
HeapConstantType::New(HeapObjectRef(js_heap_broker, value), zone));
}
// static
+Type Type::HeapConstant(const HeapObjectRef& value, Zone* zone) {
+ return HeapConstantType::New(value, zone);
+}
+
+// static
Type Type::Range(double min, double max, Zone* zone) {
return FromTypeBase(RangeType::New(min, max, zone));
}
diff --git a/deps/v8/src/compiler/types.h b/deps/v8/src/compiler/types.h
index fbda845ee2..d27f6e3e75 100644
--- a/deps/v8/src/compiler/types.h
+++ b/deps/v8/src/compiler/types.h
@@ -361,22 +361,23 @@ class V8_EXPORT_PRIVATE Type {
static Type UnsignedSmall() { return NewBitset(BitsetType::UnsignedSmall()); }
static Type OtherNumberConstant(double value, Zone* zone);
- static Type HeapConstant(const JSHeapBroker* js_heap_broker,
+ static Type HeapConstant(JSHeapBroker* js_heap_broker,
Handle<i::Object> value, Zone* zone);
+ static Type HeapConstant(const HeapObjectRef& value, Zone* zone);
static Type Range(double min, double max, Zone* zone);
static Type Range(RangeType::Limits lims, Zone* zone);
static Type Tuple(Type first, Type second, Type third, Zone* zone);
static Type Union(int length, Zone* zone);
// NewConstant is a factory that returns Constant, Range or Number.
- static Type NewConstant(const JSHeapBroker* js_heap_broker,
- Handle<i::Object> value, Zone* zone);
+ static Type NewConstant(JSHeapBroker* js_heap_broker, Handle<i::Object> value,
+ Zone* zone);
static Type NewConstant(double value, Zone* zone);
static Type Union(Type type1, Type type2, Zone* zone);
static Type Intersect(Type type1, Type type2, Zone* zone);
- static Type For(const JSHeapBroker* js_heap_broker, Handle<i::Map> map) {
+ static Type For(JSHeapBroker* js_heap_broker, Handle<i::Map> map) {
HeapObjectType type = js_heap_broker->HeapObjectTypeFromMap(map);
return NewBitset(BitsetType::ExpandInternals(BitsetType::Lub(type)));
}
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 52cbd6e0b7..55eaf07711 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -629,6 +629,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckTypeIs(node, Type::Name());
break;
case IrOpcode::kJSToNumber:
+ case IrOpcode::kJSToNumberConvertBigInt:
// Type is Number.
CheckTypeIs(node, Type::Number());
break;
@@ -716,6 +717,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
break;
case IrOpcode::kJSCreateLiteralObject:
case IrOpcode::kJSCreateEmptyLiteralObject:
+ case IrOpcode::kJSCloneObject:
case IrOpcode::kJSCreateLiteralRegExp:
// Type is OtherObject.
CheckTypeIs(node, Type::OtherObject());
@@ -1736,6 +1738,22 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kWord64AtomicXor:
case IrOpcode::kWord64AtomicExchange:
case IrOpcode::kWord64AtomicCompareExchange:
+ case IrOpcode::kWord32AtomicPairLoad:
+ case IrOpcode::kWord32AtomicPairStore:
+ case IrOpcode::kWord32AtomicPairAdd:
+ case IrOpcode::kWord32AtomicPairSub:
+ case IrOpcode::kWord32AtomicPairAnd:
+ case IrOpcode::kWord32AtomicPairOr:
+ case IrOpcode::kWord32AtomicPairXor:
+ case IrOpcode::kWord32AtomicPairExchange:
+ case IrOpcode::kWord32AtomicPairCompareExchange:
+ case IrOpcode::kWord64AtomicNarrowAdd:
+ case IrOpcode::kWord64AtomicNarrowSub:
+ case IrOpcode::kWord64AtomicNarrowAnd:
+ case IrOpcode::kWord64AtomicNarrowOr:
+ case IrOpcode::kWord64AtomicNarrowXor:
+ case IrOpcode::kWord64AtomicNarrowExchange:
+ case IrOpcode::kWord64AtomicNarrowCompareExchange:
case IrOpcode::kSpeculationFence:
case IrOpcode::kSignExtendWord8ToInt32:
case IrOpcode::kSignExtendWord16ToInt32:
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index 1b8f4e9066..f544c2eb10 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -70,16 +70,16 @@ namespace compiler {
(WasmInstanceObject::k##name##Offset - kHeapObjectTag)
#define LOAD_INSTANCE_FIELD(name, type) \
- graph()->NewNode( \
+ SetEffect(graph()->NewNode( \
mcgraph()->machine()->Load(type), instance_node_.get(), \
- mcgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(name)), *effect_, \
- *control_)
+ mcgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(name)), Effect(), \
+ Control()))
-#define LOAD_FIXED_ARRAY_SLOT(array_node, index) \
- graph()->NewNode(mcgraph()->machine()->Load(MachineType::TaggedPointer()), \
- array_node, \
- mcgraph()->Int32Constant(FixedArrayOffsetMinusTag(index)), \
- *effect_, *control_);
+#define LOAD_FIXED_ARRAY_SLOT(array_node, index) \
+ SetEffect(graph()->NewNode( \
+ mcgraph()->machine()->Load(MachineType::TaggedPointer()), array_node, \
+ mcgraph()->Int32Constant(FixedArrayOffsetMinusTag(index)), Effect(), \
+ Control()))
int FixedArrayOffsetMinusTag(uint32_t index) {
auto access = AccessBuilder::ForFixedArraySlot(index);
@@ -214,9 +214,7 @@ Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects,
}
Node* WasmGraphBuilder::RefNull() {
- Node* null = LOAD_INSTANCE_FIELD(NullValue, MachineType::TaggedPointer());
- *effect_ = null;
- return null;
+ return LOAD_INSTANCE_FIELD(NullValue, MachineType::TaggedPointer());
}
Node* WasmGraphBuilder::NoContextConstant() {
@@ -259,7 +257,7 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position,
*effect, *control);
Node* limit = graph()->NewNode(
mcgraph()->machine()->Load(MachineType::Pointer()), limit_address,
- mcgraph()->IntPtrConstant(0), *effect, *control);
+ mcgraph()->IntPtrConstant(0), limit_address, *control);
*effect = limit;
Node* pointer = graph()->NewNode(mcgraph()->machine()->LoadStackPointer());
@@ -893,19 +891,19 @@ Node* Branch(MachineGraph* mcgraph, Node* cond, Node** true_node,
Node* WasmGraphBuilder::BranchNoHint(Node* cond, Node** true_node,
Node** false_node) {
- return Branch(mcgraph(), cond, true_node, false_node, *control_,
+ return Branch(mcgraph(), cond, true_node, false_node, Control(),
BranchHint::kNone);
}
Node* WasmGraphBuilder::BranchExpectTrue(Node* cond, Node** true_node,
Node** false_node) {
- return Branch(mcgraph(), cond, true_node, false_node, *control_,
+ return Branch(mcgraph(), cond, true_node, false_node, Control(),
BranchHint::kTrue);
}
Node* WasmGraphBuilder::BranchExpectFalse(Node* cond, Node** true_node,
Node** false_node) {
- return Branch(mcgraph(), cond, true_node, false_node, *control_,
+ return Branch(mcgraph(), cond, true_node, false_node, Control(),
BranchHint::kFalse);
}
@@ -936,9 +934,8 @@ TrapId WasmGraphBuilder::GetTrapIdForTrap(wasm::TrapReason reason) {
Node* WasmGraphBuilder::TrapIfTrue(wasm::TrapReason reason, Node* cond,
wasm::WasmCodePosition position) {
TrapId trap_id = GetTrapIdForTrap(reason);
- Node* node = graph()->NewNode(mcgraph()->common()->TrapIf(trap_id), cond,
- Effect(), Control());
- *control_ = node;
+ Node* node = SetControl(graph()->NewNode(mcgraph()->common()->TrapIf(trap_id),
+ cond, Effect(), Control()));
SetSourcePosition(node, position);
return node;
}
@@ -946,9 +943,8 @@ Node* WasmGraphBuilder::TrapIfTrue(wasm::TrapReason reason, Node* cond,
Node* WasmGraphBuilder::TrapIfFalse(wasm::TrapReason reason, Node* cond,
wasm::WasmCodePosition position) {
TrapId trap_id = GetTrapIdForTrap(reason);
- Node* node = graph()->NewNode(mcgraph()->common()->TrapUnless(trap_id), cond,
- Effect(), Control());
- *control_ = node;
+ Node* node = SetControl(graph()->NewNode(
+ mcgraph()->common()->TrapUnless(trap_id), cond, Effect(), Control()));
SetSourcePosition(node, position);
return node;
}
@@ -994,7 +990,7 @@ Node* WasmGraphBuilder::ZeroCheck64(wasm::TrapReason reason, Node* node,
}
Node* WasmGraphBuilder::Switch(unsigned count, Node* key) {
- return graph()->NewNode(mcgraph()->common()->Switch(count), key, *control_);
+ return graph()->NewNode(mcgraph()->common()->Switch(count), key, Control());
}
Node* WasmGraphBuilder::IfValue(int32_t value, Node* sw) {
@@ -1008,9 +1004,6 @@ Node* WasmGraphBuilder::IfDefault(Node* sw) {
}
Node* WasmGraphBuilder::Return(unsigned count, Node** vals) {
- DCHECK_NOT_NULL(*control_);
- DCHECK_NOT_NULL(*effect_);
-
static const int kStackAllocatedNodeBufferSize = 8;
Node* stack_buffer[kStackAllocatedNodeBufferSize];
std::vector<Node*> heap_buffer;
@@ -1023,8 +1016,8 @@ Node* WasmGraphBuilder::Return(unsigned count, Node** vals) {
buf[0] = mcgraph()->Int32Constant(0);
memcpy(buf + 1, vals, sizeof(void*) * count);
- buf[count + 1] = *effect_;
- buf[count + 2] = *control_;
+ buf[count + 1] = Effect();
+ buf[count + 2] = Control();
Node* ret =
graph()->NewNode(mcgraph()->common()->Return(count), count + 3, buf);
@@ -1077,9 +1070,9 @@ static bool ReverseBytesSupported(MachineOperatorBuilder* m,
switch (size_in_bytes) {
case 4:
case 16:
- return m->Word32ReverseBytes().IsSupported();
+ return true;
case 8:
- return m->Word64ReverseBytes().IsSupported();
+ return m->Is64();
default:
break;
}
@@ -1144,16 +1137,16 @@ Node* WasmGraphBuilder::BuildChangeEndiannessStore(
if (ReverseBytesSupported(m, valueSizeInBytes)) {
switch (valueSizeInBytes) {
case 4:
- result = graph()->NewNode(m->Word32ReverseBytes().op(), value);
+ result = graph()->NewNode(m->Word32ReverseBytes(), value);
break;
case 8:
- result = graph()->NewNode(m->Word64ReverseBytes().op(), value);
+ result = graph()->NewNode(m->Word64ReverseBytes(), value);
break;
case 16: {
Node* byte_reversed_lanes[4];
for (int lane = 0; lane < 4; lane++) {
byte_reversed_lanes[lane] = graph()->NewNode(
- m->Word32ReverseBytes().op(),
+ m->Word32ReverseBytes(),
graph()->NewNode(mcgraph()->machine()->I32x4ExtractLane(lane),
value));
}
@@ -1279,21 +1272,21 @@ Node* WasmGraphBuilder::BuildChangeEndiannessLoad(Node* node,
switch (valueSizeInBytes) {
case 2:
result =
- graph()->NewNode(m->Word32ReverseBytes().op(),
+ graph()->NewNode(m->Word32ReverseBytes(),
graph()->NewNode(m->Word32Shl(), value,
mcgraph()->Int32Constant(16)));
break;
case 4:
- result = graph()->NewNode(m->Word32ReverseBytes().op(), value);
+ result = graph()->NewNode(m->Word32ReverseBytes(), value);
break;
case 8:
- result = graph()->NewNode(m->Word64ReverseBytes().op(), value);
+ result = graph()->NewNode(m->Word64ReverseBytes(), value);
break;
case 16: {
Node* byte_reversed_lanes[4];
for (int lane = 0; lane < 4; lane++) {
byte_reversed_lanes[lane] = graph()->NewNode(
- m->Word32ReverseBytes().op(),
+ m->Word32ReverseBytes(),
graph()->NewNode(mcgraph()->machine()->I32x4ExtractLane(lane),
value));
}
@@ -1749,9 +1742,9 @@ Node* WasmGraphBuilder::BuildBitCountingCall(Node* input, ExternalReference ref,
const Operator* store_op = mcgraph()->machine()->Store(
StoreRepresentation(input_type, kNoWriteBarrier));
- *effect_ =
- graph()->NewNode(store_op, stack_slot_param, mcgraph()->Int32Constant(0),
- input, *effect_, *control_);
+ SetEffect(graph()->NewNode(store_op, stack_slot_param,
+ mcgraph()->Int32Constant(0), input, Effect(),
+ Control()));
MachineType sig_types[] = {MachineType::Int32(), MachineType::Pointer()};
MachineSignature sig(1, 1, sig_types);
@@ -1874,26 +1867,24 @@ Node* WasmGraphBuilder::BuildCFuncInstruction(ExternalReference ref,
const Operator* store_op = mcgraph()->machine()->Store(
StoreRepresentation(type.representation(), kNoWriteBarrier));
- *effect_ = graph()->NewNode(store_op, stack_slot, mcgraph()->Int32Constant(0),
- input0, *effect_, *control_);
+ SetEffect(graph()->NewNode(store_op, stack_slot, mcgraph()->Int32Constant(0),
+ input0, Effect(), Control()));
Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(ref));
if (input1 != nullptr) {
- *effect_ = graph()->NewNode(store_op, stack_slot,
- mcgraph()->Int32Constant(type_size), input1,
- *effect_, *control_);
+ SetEffect(graph()->NewNode(store_op, stack_slot,
+ mcgraph()->Int32Constant(type_size), input1,
+ Effect(), Control()));
}
MachineType sig_types[] = {MachineType::Pointer()};
MachineSignature sig(0, 1, sig_types);
BuildCCall(&sig, function, stack_slot);
- const Operator* load_op = mcgraph()->machine()->Load(type);
- Node* load = graph()->NewNode(
- load_op, stack_slot, mcgraph()->Int32Constant(0), *effect_, *control_);
- *effect_ = load;
- return load;
+ return SetEffect(graph()->NewNode(mcgraph()->machine()->Load(type),
+ stack_slot, mcgraph()->Int32Constant(0),
+ Effect(), Control()));
}
Node* WasmGraphBuilder::BuildF32SConvertI64(Node* input) {
@@ -1930,17 +1921,15 @@ Node* WasmGraphBuilder::BuildIntToFloatConversionInstruction(
graph()->NewNode(mcgraph()->machine()->StackSlot(stack_slot_size));
const Operator* store_op = mcgraph()->machine()->Store(
StoreRepresentation(parameter_representation, kNoWriteBarrier));
- *effect_ = graph()->NewNode(store_op, stack_slot, mcgraph()->Int32Constant(0),
- input, *effect_, *control_);
+ SetEffect(graph()->NewNode(store_op, stack_slot, mcgraph()->Int32Constant(0),
+ input, Effect(), Control()));
MachineType sig_types[] = {MachineType::Pointer()};
MachineSignature sig(0, 1, sig_types);
Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(ref));
BuildCCall(&sig, function, stack_slot);
- const Operator* load_op = mcgraph()->machine()->Load(result_type);
- Node* load = graph()->NewNode(
- load_op, stack_slot, mcgraph()->Int32Constant(0), *effect_, *control_);
- *effect_ = load;
- return load;
+ return SetEffect(graph()->NewNode(mcgraph()->machine()->Load(result_type),
+ stack_slot, mcgraph()->Int32Constant(0),
+ Effect(), Control()));
}
namespace {
@@ -1979,8 +1968,8 @@ Node* WasmGraphBuilder::BuildCcallConvertFloat(Node* input,
graph()->NewNode(mcgraph()->machine()->StackSlot(stack_slot_size));
const Operator* store_op = mcgraph()->machine()->Store(
StoreRepresentation(float_ty.representation(), kNoWriteBarrier));
- *effect_ = graph()->NewNode(store_op, stack_slot, Int32Constant(0), input,
- *effect_, *control_);
+ SetEffect(graph()->NewNode(store_op, stack_slot, Int32Constant(0), input,
+ Effect(), Control()));
MachineType sig_types[] = {MachineType::Int32(), MachineType::Pointer()};
MachineSignature sig(1, 1, sig_types);
Node* function =
@@ -1988,11 +1977,9 @@ Node* WasmGraphBuilder::BuildCcallConvertFloat(Node* input,
Node* overflow = BuildCCall(&sig, function, stack_slot);
if (IsTrappingConvertOp(opcode)) {
ZeroCheck32(wasm::kTrapFloatUnrepresentable, overflow, position);
- const Operator* load_op = mcgraph()->machine()->Load(int_ty);
- Node* load = graph()->NewNode(load_op, stack_slot, Int32Constant(0),
- *effect_, *control_);
- *effect_ = load;
- return load;
+ return SetEffect(graph()->NewNode(mcgraph()->machine()->Load(int_ty),
+ stack_slot, Int32Constant(0), Effect(),
+ Control()));
}
Node* test = Binop(wasm::kExprI32Eq, overflow, Int32Constant(0), position);
Diamond tl_d(graph(), mcgraph()->common(), test, BranchHint::kFalse);
@@ -2005,9 +1992,9 @@ Node* WasmGraphBuilder::BuildCcallConvertFloat(Node* input,
sat_d.Nest(nan_d, false);
Node* sat_val =
sat_d.Phi(int_ty.representation(), Min(this, int_ty), Max(this, int_ty));
- const Operator* load_op = mcgraph()->machine()->Load(int_ty);
- Node* load = graph()->NewNode(load_op, stack_slot, Int32Constant(0), *effect_,
- *control_);
+ Node* load =
+ SetEffect(graph()->NewNode(mcgraph()->machine()->Load(int_ty), stack_slot,
+ Int32Constant(0), Effect(), Control()));
Node* nan_val =
nan_d.Phi(int_ty.representation(), Zero(this, int_ty), sat_val);
return tl_d.Phi(int_ty.representation(), nan_val, load);
@@ -2028,12 +2015,9 @@ Node* WasmGraphBuilder::GrowMemory(Node* input) {
// Just encode the stub index. This will be patched at relocation.
Node* call_target = mcgraph()->RelocatableIntPtrConstant(
wasm::WasmCode::kWasmGrowMemory, RelocInfo::WASM_STUB_CALL);
- Node* call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
- call_target, input, *effect_, *control_);
-
- *effect_ = call;
- *control_ = call;
- return call;
+ return SetEffect(
+ SetControl(graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
+ call_target, input, Effect(), Control())));
}
uint32_t WasmGraphBuilder::GetExceptionEncodedSize(
@@ -2196,21 +2180,21 @@ Node* WasmGraphBuilder::BuildI32DivS(Node* left, Node* right,
wasm::WasmCodePosition position) {
MachineOperatorBuilder* m = mcgraph()->machine();
ZeroCheck32(wasm::kTrapDivByZero, right, position);
- Node* before = *control_;
+ Node* before = Control();
Node* denom_is_m1;
Node* denom_is_not_m1;
BranchExpectFalse(
graph()->NewNode(m->Word32Equal(), right, mcgraph()->Int32Constant(-1)),
&denom_is_m1, &denom_is_not_m1);
- *control_ = denom_is_m1;
+ SetControl(denom_is_m1);
TrapIfEq32(wasm::kTrapDivUnrepresentable, left, kMinInt, position);
- if (*control_ != denom_is_m1) {
- *control_ = graph()->NewNode(mcgraph()->common()->Merge(2), denom_is_not_m1,
- *control_);
+ if (Control() != denom_is_m1) {
+ SetControl(graph()->NewNode(mcgraph()->common()->Merge(2), denom_is_not_m1,
+ Control()));
} else {
- *control_ = before;
+ SetControl(before);
}
- return graph()->NewNode(m->Int32Div(), left, right, *control_);
+ return graph()->NewNode(m->Int32Div(), left, right, Control());
}
Node* WasmGraphBuilder::BuildI32RemS(Node* left, Node* right,
@@ -2223,7 +2207,7 @@ Node* WasmGraphBuilder::BuildI32RemS(Node* left, Node* right,
graph(), mcgraph()->common(),
graph()->NewNode(m->Word32Equal(), right, mcgraph()->Int32Constant(-1)),
BranchHint::kFalse);
- d.Chain(*control_);
+ d.Chain(Control());
return d.Phi(MachineRepresentation::kWord32, mcgraph()->Int32Constant(0),
graph()->NewNode(m->Int32Mod(), left, right, d.if_false));
@@ -2254,7 +2238,7 @@ Node* WasmGraphBuilder::BuildI32AsmjsDivS(Node* left, Node* right) {
// The result is the negation of the left input.
return graph()->NewNode(m->Int32Sub(), mcgraph()->Int32Constant(0), left);
}
- return graph()->NewNode(m->Int32Div(), left, right, *control_);
+ return graph()->NewNode(m->Int32Div(), left, right, Control());
}
// asm.js semantics return 0 on divide or mod by zero.
@@ -2294,7 +2278,7 @@ Node* WasmGraphBuilder::BuildI32AsmjsRemS(Node* left, Node* right) {
if (mr.Value() == 0 || mr.Value() == -1) {
return zero;
}
- return graph()->NewNode(m->Int32Mod(), left, right, *control_);
+ return graph()->NewNode(m->Int32Mod(), left, right, Control());
}
// General case for signed integer modulus, with optimization for (unknown)
@@ -2423,23 +2407,23 @@ Node* WasmGraphBuilder::BuildI64DivS(Node* left, Node* right,
MachineType::Int64(), wasm::kTrapDivByZero, position);
}
ZeroCheck64(wasm::kTrapDivByZero, right, position);
- Node* before = *control_;
+ Node* before = Control();
Node* denom_is_m1;
Node* denom_is_not_m1;
BranchExpectFalse(graph()->NewNode(mcgraph()->machine()->Word64Equal(), right,
mcgraph()->Int64Constant(-1)),
&denom_is_m1, &denom_is_not_m1);
- *control_ = denom_is_m1;
+ SetControl(denom_is_m1);
TrapIfEq64(wasm::kTrapDivUnrepresentable, left,
std::numeric_limits<int64_t>::min(), position);
- if (*control_ != denom_is_m1) {
- *control_ = graph()->NewNode(mcgraph()->common()->Merge(2), denom_is_not_m1,
- *control_);
+ if (Control() != denom_is_m1) {
+ SetControl(graph()->NewNode(mcgraph()->common()->Merge(2), denom_is_not_m1,
+ Control()));
} else {
- *control_ = before;
+ SetControl(before);
}
return graph()->NewNode(mcgraph()->machine()->Int64Div(), left, right,
- *control_);
+ Control());
}
Node* WasmGraphBuilder::BuildI64RemS(Node* left, Node* right,
@@ -2453,7 +2437,7 @@ Node* WasmGraphBuilder::BuildI64RemS(Node* left, Node* right,
graph()->NewNode(mcgraph()->machine()->Word64Equal(), right,
mcgraph()->Int64Constant(-1)));
- d.Chain(*control_);
+ d.Chain(Control());
Node* rem = graph()->NewNode(mcgraph()->machine()->Int64Mod(), left, right,
d.if_false);
@@ -2491,11 +2475,11 @@ Node* WasmGraphBuilder::BuildDiv64Call(Node* left, Node* right,
const Operator* store_op = mcgraph()->machine()->Store(
StoreRepresentation(MachineRepresentation::kWord64, kNoWriteBarrier));
- *effect_ = graph()->NewNode(store_op, stack_slot, mcgraph()->Int32Constant(0),
- left, *effect_, *control_);
- *effect_ = graph()->NewNode(store_op, stack_slot,
- mcgraph()->Int32Constant(sizeof(double)), right,
- *effect_, *control_);
+ SetEffect(graph()->NewNode(store_op, stack_slot, mcgraph()->Int32Constant(0),
+ left, Effect(), Control()));
+ SetEffect(graph()->NewNode(store_op, stack_slot,
+ mcgraph()->Int32Constant(sizeof(double)), right,
+ Effect(), Control()));
MachineType sig_types[] = {MachineType::Int32(), MachineType::Pointer()};
MachineSignature sig(1, 1, sig_types);
@@ -2505,11 +2489,9 @@ Node* WasmGraphBuilder::BuildDiv64Call(Node* left, Node* right,
ZeroCheck32(trap_zero, call, position);
TrapIfEq32(wasm::kTrapDivUnrepresentable, call, -1, position);
- const Operator* load_op = mcgraph()->machine()->Load(result_type);
- Node* load = graph()->NewNode(
- load_op, stack_slot, mcgraph()->Int32Constant(0), *effect_, *control_);
- *effect_ = load;
- return load;
+ return SetEffect(graph()->NewNode(mcgraph()->machine()->Load(result_type),
+ stack_slot, mcgraph()->Int32Constant(0),
+ Effect(), Control()));
}
template <typename... Args>
@@ -2517,15 +2499,13 @@ Node* WasmGraphBuilder::BuildCCall(MachineSignature* sig, Node* function,
Args... args) {
DCHECK_LE(sig->return_count(), 1);
DCHECK_EQ(sizeof...(args), sig->parameter_count());
- Node* const call_args[] = {function, args..., *effect_, *control_};
+ Node* const call_args[] = {function, args..., Effect(), Control()};
auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(mcgraph()->zone(), sig);
const Operator* op = mcgraph()->common()->Call(call_descriptor);
- Node* call = graph()->NewNode(op, arraysize(call_args), call_args);
- *effect_ = call;
- return call;
+ return SetEffect(graph()->NewNode(op, arraysize(call_args), call_args));
}
Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args,
@@ -2550,17 +2530,16 @@ Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args,
args[1] = instance_node;
// Add effect and control inputs.
- args[params + 2] = *effect_;
- args[params + 3] = *control_;
+ args[params + 2] = Effect();
+ args[params + 3] = Control();
auto call_descriptor =
GetWasmCallDescriptor(mcgraph()->zone(), sig, use_retpoline);
const Operator* op = mcgraph()->common()->Call(call_descriptor);
- Node* call = graph()->NewNode(op, static_cast<int>(count), args);
+ Node* call = SetEffect(graph()->NewNode(op, static_cast<int>(count), args));
DCHECK(position == wasm::kNoCodePosition || position > 0);
if (position > 0) SetSourcePosition(call, position);
- *effect_ = call;
size_t ret_count = sig->return_count();
if (ret_count == 0) return call; // No return value.
@@ -2590,10 +2569,10 @@ Node* WasmGraphBuilder::BuildImportWasmCall(wasm::FunctionSig* sig, Node** args,
// Load the target from the imported_targets array at a known offset.
Node* imported_targets =
LOAD_INSTANCE_FIELD(ImportedFunctionTargets, MachineType::Pointer());
- Node* target_node = graph()->NewNode(
+ Node* target_node = SetEffect(graph()->NewNode(
mcgraph()->machine()->Load(MachineType::Pointer()), imported_targets,
- mcgraph()->Int32Constant(func_index * kPointerSize),
- mcgraph()->graph()->start(), mcgraph()->graph()->start());
+ mcgraph()->Int32Constant(func_index * kPointerSize), Effect(),
+ Control()));
args[0] = target_node;
return BuildWasmCall(sig, args, rets, position, instance_node,
untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline);
@@ -2613,19 +2592,18 @@ Node* WasmGraphBuilder::BuildImportWasmCall(wasm::FunctionSig* sig, Node** args,
Node* func_index_times_pointersize = graph()->NewNode(
mcgraph()->machine()->IntMul(), Uint32ToUintptr(func_index),
mcgraph()->Int32Constant(kPointerSize));
- Node* instance_node =
+ Node* instance_node = SetEffect(
graph()->NewNode(mcgraph()->machine()->Load(MachineType::TaggedPointer()),
imported_instances_data, func_index_times_pointersize,
- *effect_, *control_);
+ Effect(), Control()));
// Load the target from the imported_targets array at the offset of
// {func_index}.
Node* imported_targets =
LOAD_INSTANCE_FIELD(ImportedFunctionTargets, MachineType::Pointer());
- Node* target_node = graph()->NewNode(
+ Node* target_node = SetEffect(graph()->NewNode(
mcgraph()->machine()->Load(MachineType::Pointer()), imported_targets,
- func_index_times_pointersize, mcgraph()->graph()->start(),
- mcgraph()->graph()->start());
+ func_index_times_pointersize, Effect(), Control()));
args[0] = target_node;
return BuildWasmCall(sig, args, rets, position, instance_node,
untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline);
@@ -2686,19 +2664,12 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
LOAD_INSTANCE_FIELD(IndirectFunctionTableSigIds, MachineType::Pointer());
int32_t expected_sig_id = env_->module->signature_ids[sig_index];
- Node* scaled_key =
- graph()->NewNode(machine->Word32Shl(), key, Int32Constant(2));
- const Operator* add = nullptr;
- if (machine->Is64()) {
- scaled_key = graph()->NewNode(machine->ChangeUint32ToUint64(), scaled_key);
- add = machine->Int64Add();
- } else {
- add = machine->Int32Add();
- }
+ Node* scaled_key = Uint32ToUintptr(
+ graph()->NewNode(machine->Word32Shl(), key, Int32Constant(2)));
Node* loaded_sig =
- graph()->NewNode(machine->Load(MachineType::Int32()), ift_sig_ids,
- scaled_key, *effect_, *control_);
+ SetEffect(graph()->NewNode(machine->Load(MachineType::Int32()),
+ ift_sig_ids, scaled_key, Effect(), Control()));
Node* sig_match = graph()->NewNode(machine->WordEqual(), loaded_sig,
Int32Constant(expected_sig_id));
@@ -2712,14 +2683,15 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
scaled_key = graph()->NewNode(machine->Word32Shl(), key,
Int32Constant(kPointerSizeLog2));
- Node* target = graph()->NewNode(machine->Load(MachineType::Pointer()),
- ift_targets, scaled_key, *effect_, *control_);
+ Node* target =
+ SetEffect(graph()->NewNode(machine->Load(MachineType::Pointer()),
+ ift_targets, scaled_key, Effect(), Control()));
auto access = AccessBuilder::ForFixedArrayElement();
- Node* target_instance = graph()->NewNode(
+ Node* target_instance = SetEffect(graph()->NewNode(
machine->Load(MachineType::TaggedPointer()),
- graph()->NewNode(add, ift_instances, scaled_key),
- Int32Constant(access.header_size - access.tag()), *effect_, *control_);
+ graph()->NewNode(machine->IntAdd(), ift_instances, scaled_key),
+ Int32Constant(access.header_size - access.tag()), Effect(), Control()));
args[0] = target;
@@ -2805,33 +2777,26 @@ Node* WasmGraphBuilder::BuildChangeSmiToInt32(Node* value) {
void WasmGraphBuilder::InitInstanceCache(
WasmInstanceCacheNodes* instance_cache) {
DCHECK_NOT_NULL(instance_node_);
- DCHECK_NOT_NULL(*control_);
- DCHECK_NOT_NULL(*effect_);
// Load the memory start.
- Node* mem_start = graph()->NewNode(
+ instance_cache->mem_start = SetEffect(graph()->NewNode(
mcgraph()->machine()->Load(MachineType::UintPtr()), instance_node_.get(),
mcgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(MemoryStart)),
- *effect_, *control_);
- *effect_ = mem_start;
- instance_cache->mem_start = mem_start;
+ Effect(), Control()));
// Load the memory size.
- Node* mem_size = graph()->NewNode(
- mcgraph()->machine()->Load(MachineType::Uint32()), instance_node_.get(),
+ instance_cache->mem_size = SetEffect(graph()->NewNode(
+ mcgraph()->machine()->Load(MachineType::UintPtr()), instance_node_.get(),
mcgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(MemorySize)),
- *effect_, *control_);
- *effect_ = mem_size;
- instance_cache->mem_size = mem_size;
+ Effect(), Control()));
if (untrusted_code_mitigations_) {
// Load the memory mask.
- Node* mem_mask = graph()->NewNode(
- mcgraph()->machine()->Load(MachineType::Uint32()), instance_node_.get(),
+ instance_cache->mem_mask = SetEffect(graph()->NewNode(
+ mcgraph()->machine()->Load(MachineType::UintPtr()),
+ instance_node_.get(),
mcgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(MemoryMask)),
- *effect_, *control_);
- *effect_ = mem_mask;
- instance_cache->mem_mask = mem_mask;
+ Effect(), Control()));
} else {
// Explicitly set to nullptr to ensure a SEGV when we try to use it.
instance_cache->mem_mask = nullptr;
@@ -2874,13 +2839,13 @@ void WasmGraphBuilder::NewInstanceCacheMerge(WasmInstanceCacheNodes* to,
void WasmGraphBuilder::MergeInstanceCacheInto(WasmInstanceCacheNodes* to,
WasmInstanceCacheNodes* from,
Node* merge) {
- to->mem_size = CreateOrMergeIntoPhi(MachineRepresentation::kWord32, merge,
- to->mem_size, from->mem_size);
+ to->mem_size = CreateOrMergeIntoPhi(MachineType::PointerRepresentation(),
+ merge, to->mem_size, from->mem_size);
to->mem_start = CreateOrMergeIntoPhi(MachineType::PointerRepresentation(),
merge, to->mem_start, from->mem_start);
if (untrusted_code_mitigations_) {
- to->mem_mask = CreateOrMergeIntoPhi(MachineRepresentation::kWord32, merge,
- to->mem_mask, from->mem_mask);
+ to->mem_mask = CreateOrMergeIntoPhi(MachineType::PointerRepresentation(),
+ merge, to->mem_mask, from->mem_mask);
}
}
@@ -2924,7 +2889,6 @@ void WasmGraphBuilder::GetGlobalBaseAndOffset(MachineType mem_type,
Node** offset_node) {
DCHECK_NOT_NULL(instance_node_);
if (global.mutability && global.imported) {
- DCHECK(FLAG_experimental_wasm_mut_global);
if (imported_mutable_globals_ == nullptr) {
// Load imported_mutable_globals_ from the instance object at runtime.
imported_mutable_globals_ = graph()->NewNode(
@@ -2934,13 +2898,12 @@ void WasmGraphBuilder::GetGlobalBaseAndOffset(MachineType mem_type,
WASM_INSTANCE_OBJECT_OFFSET(ImportedMutableGlobals)),
graph()->start(), graph()->start());
}
- *base_node = graph()->NewNode(
+ *base_node = SetEffect(graph()->NewNode(
mcgraph()->machine()->Load(MachineType::UintPtr()),
imported_mutable_globals_.get(),
- mcgraph()->Int32Constant(global.index * sizeof(Address)), *effect_,
- *control_);
+ mcgraph()->Int32Constant(global.index * sizeof(Address)), Effect(),
+ Control()));
*offset_node = mcgraph()->Int32Constant(0);
- *effect_ = *base_node;
} else {
if (globals_start_ == nullptr) {
// Load globals_start from the instance object at runtime.
@@ -2985,9 +2948,14 @@ Node* WasmGraphBuilder::CurrentMemoryPages() {
DCHECK_NOT_NULL(instance_cache_);
Node* mem_size = instance_cache_->mem_size;
DCHECK_NOT_NULL(mem_size);
- return graph()->NewNode(
- mcgraph()->machine()->Word32Shr(), mem_size,
- mcgraph()->Int32Constant(WhichPowerOf2(wasm::kWasmPageSize)));
+ Node* result =
+ graph()->NewNode(mcgraph()->machine()->WordShr(), mem_size,
+ mcgraph()->Int32Constant(wasm::kWasmPageSizeLog2));
+ if (mcgraph()->machine()->Is64()) {
+ result =
+ graph()->NewNode(mcgraph()->machine()->TruncateInt64ToInt32(), result);
+ }
+ return result;
}
// Only call this function for code which is not reused across instantiations,
@@ -3003,7 +2971,7 @@ Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(Runtime::FunctionId f,
// The CEntryStub is loaded from the instance_node so that generated code is
// Isolate independent. At the moment this is only done for CEntryStub(1).
DCHECK_EQ(1, fun->result_size);
- Node* centry_stub = *effect_ =
+ Node* centry_stub =
LOAD_INSTANCE_FIELD(CEntryStub, MachineType::TaggedPointer());
// At the moment we only allow 4 parameters. If more parameters are needed,
// increase this constant accordingly.
@@ -3019,14 +2987,11 @@ Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(Runtime::FunctionId f,
mcgraph()->ExternalConstant(ExternalReference::Create(f)); // ref
inputs[count++] = mcgraph()->Int32Constant(fun->nargs); // arity
inputs[count++] = js_context; // js_context
- inputs[count++] = *effect_;
- inputs[count++] = *control_;
+ inputs[count++] = Effect();
+ inputs[count++] = Control();
- Node* node = mcgraph()->graph()->NewNode(
- mcgraph()->common()->Call(call_descriptor), count, inputs);
- *effect_ = node;
-
- return node;
+ return SetEffect(mcgraph()->graph()->NewNode(
+ mcgraph()->common()->Call(call_descriptor), count, inputs));
}
Node* WasmGraphBuilder::BuildCallToRuntime(Runtime::FunctionId f,
@@ -3043,10 +3008,13 @@ Node* WasmGraphBuilder::GetGlobal(uint32_t index) {
Node* offset = nullptr;
GetGlobalBaseAndOffset(mem_type, env_->module->globals[index], &base,
&offset);
- Node* node = graph()->NewNode(mcgraph()->machine()->Load(mem_type), base,
- offset, *effect_, *control_);
- *effect_ = node;
- return node;
+ Node* load = SetEffect(graph()->NewNode(mcgraph()->machine()->Load(mem_type),
+ base, offset, Effect(), Control()));
+#if defined(V8_TARGET_BIG_ENDIAN)
+ load = BuildChangeEndiannessLoad(load, mem_type,
+ env_->module->globals[index].type);
+#endif
+ return load;
}
Node* WasmGraphBuilder::SetGlobal(uint32_t index, Node* val) {
@@ -3058,44 +3026,58 @@ Node* WasmGraphBuilder::SetGlobal(uint32_t index, Node* val) {
&offset);
const Operator* op = mcgraph()->machine()->Store(
StoreRepresentation(mem_type.representation(), kNoWriteBarrier));
- Node* node = graph()->NewNode(op, base, offset, val, *effect_, *control_);
- *effect_ = node;
- return node;
+#if defined(V8_TARGET_BIG_ENDIAN)
+ val = BuildChangeEndiannessStore(val, mem_type.representation(),
+ env_->module->globals[index].type);
+#endif
+ return SetEffect(
+ graph()->NewNode(op, base, offset, val, Effect(), Control()));
+}
+
+Node* WasmGraphBuilder::CheckBoundsAndAlignment(
+ uint8_t access_size, Node* index, uint32_t offset,
+ wasm::WasmCodePosition position) {
+ // Atomic operations access the memory, need to be bound checked till
+ // TrapHandlers are enabled on atomic operations
+ index =
+ BoundsCheckMem(access_size, index, offset, position, kNeedsBoundsCheck);
+ Node* effective_address =
+ graph()->NewNode(mcgraph()->machine()->IntAdd(), MemBuffer(offset),
+ Uint32ToUintptr(index));
+ // Unlike regular memory accesses, unaligned memory accesses for atomic
+ // operations should trap
+ // Access sizes are in powers of two, calculate mod without using division
+ Node* cond =
+ graph()->NewNode(mcgraph()->machine()->WordAnd(), effective_address,
+ IntPtrConstant(access_size - 1));
+ TrapIfFalse(wasm::kTrapUnalignedAccess,
+ graph()->NewNode(mcgraph()->machine()->Word32Equal(), cond,
+ mcgraph()->Int32Constant(0)),
+ position);
+ return index;
}
Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
uint32_t offset,
wasm::WasmCodePosition position,
EnforceBoundsCheck enforce_check) {
- if (FLAG_wasm_no_bounds_checks) return Uint32ToUintptr(index);
- DCHECK_NOT_NULL(instance_cache_);
- Node* mem_size = instance_cache_->mem_size;
- DCHECK_NOT_NULL(mem_size);
+ DCHECK_LE(1, access_size);
+ index = Uint32ToUintptr(index);
+ if (FLAG_wasm_no_bounds_checks) return index;
- auto m = mcgraph()->machine();
if (use_trap_handler() && enforce_check == kCanOmitBoundsCheck) {
- // Simply zero out the 32-bits on 64-bit targets and let the trap handler
- // do its job.
- return Uint32ToUintptr(index);
+ return index;
}
- uint32_t min_size = env_->module->initial_pages * wasm::kWasmPageSize;
- uint32_t max_size =
- (env_->module->has_maximum_pages ? env_->module->maximum_pages
- : wasm::kV8MaxWasmMemoryPages) *
- wasm::kWasmPageSize;
-
- if (access_size > max_size || offset > max_size - access_size) {
+ const bool statically_oob = access_size > env_->max_memory_size ||
+ offset > env_->max_memory_size - access_size;
+ if (statically_oob) {
// The access will be out of bounds, even for the largest memory.
TrapIfEq32(wasm::kTrapMemOutOfBounds, Int32Constant(0), 0, position);
return mcgraph()->IntPtrConstant(0);
}
- DCHECK_LE(1, access_size);
- // This computation cannot overflow, since
- // {offset <= max_size - access_size <= kMaxUint32 - access_size}.
- // It also cannot underflow, since {access_size >= 1}.
- uint32_t end_offset = offset + access_size - 1;
- Node* end_offset_node = Int32Constant(end_offset);
+ uint64_t end_offset = uint64_t{offset} + access_size - 1u;
+ Node* end_offset_node = IntPtrConstant(end_offset);
// The accessed memory is [index + offset, index + end_offset].
// Check that the last read byte (at {index + end_offset}) is in bounds.
@@ -3106,42 +3088,42 @@ Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
// - computing {effective_size} as {mem_size - end_offset} and
// - checking that {index < effective_size}.
- if (end_offset >= min_size) {
+ auto m = mcgraph()->machine();
+ Node* mem_size = instance_cache_->mem_size;
+ if (end_offset >= env_->min_memory_size) {
// The end offset is larger than the smallest memory.
- // Dynamically check the end offset against the actual memory size, which
- // is not known at compile time.
- Node* cond = graph()->NewNode(mcgraph()->machine()->Uint32LessThan(),
- end_offset_node, mem_size);
+ // Dynamically check the end offset against the dynamic memory size.
+ Node* cond = graph()->NewNode(m->UintLessThan(), end_offset_node, mem_size);
TrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
} else {
- // The end offset is within the bounds of the smallest memory, so only
- // one check is required. Check to see if the index is also a constant.
- Uint32Matcher match(index);
+ // The end offset is smaller than the smallest memory, so only one check is
+ // required. Check to see if the index is also a constant.
+ UintPtrMatcher match(index);
if (match.HasValue()) {
- uint32_t index_val = match.Value();
- if (index_val < min_size - end_offset) {
+ uintptr_t index_val = match.Value();
+ if (index_val < env_->min_memory_size - end_offset) {
// The input index is a constant and everything is statically within
// bounds of the smallest possible memory.
- return Uint32ToUintptr(index);
+ return index;
}
}
}
// This produces a positive number, since {end_offset < min_size <= mem_size}.
- Node* effective_size = graph()->NewNode(mcgraph()->machine()->Int32Sub(),
- mem_size, end_offset_node);
+ Node* effective_size =
+ graph()->NewNode(m->IntSub(), mem_size, end_offset_node);
// Introduce the actual bounds check.
- Node* cond = graph()->NewNode(m->Uint32LessThan(), index, effective_size);
+ Node* cond = graph()->NewNode(m->UintLessThan(), index, effective_size);
TrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
if (untrusted_code_mitigations_) {
// In the fallthrough case, condition the index with the memory mask.
Node* mem_mask = instance_cache_->mem_mask;
DCHECK_NOT_NULL(mem_mask);
- index = graph()->NewNode(m->Word32And(), index, mem_mask);
+ index = graph()->NewNode(m->WordAnd(), index, mem_mask);
}
- return Uint32ToUintptr(index);
+ return index;
}
const Operator* WasmGraphBuilder::GetSafeLoadOperator(int offset,
@@ -3178,9 +3160,9 @@ Node* WasmGraphBuilder::TraceMemoryOperation(bool is_store,
Node* address = graph()->NewNode(mcgraph()->machine()->Int32Add(),
Int32Constant(offset), index);
auto store = [&](int offset, MachineRepresentation rep, Node* data) {
- *effect_ = graph()->NewNode(
+ SetEffect(graph()->NewNode(
mcgraph()->machine()->Store(StoreRepresentation(rep, kNoWriteBarrier)),
- info, mcgraph()->Int32Constant(offset), data, *effect_, *control_);
+ info, mcgraph()->Int32Constant(offset), data, Effect(), Control()));
};
// Store address, is_store, and mem_rep.
store(offsetof(wasm::MemoryTracingInfo, address),
@@ -3212,20 +3194,20 @@ Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
mcgraph()->machine()->UnalignedLoadSupported(memtype.representation())) {
if (use_trap_handler()) {
load = graph()->NewNode(mcgraph()->machine()->ProtectedLoad(memtype),
- MemBuffer(offset), index, *effect_, *control_);
+ MemBuffer(offset), index, Effect(), Control());
SetSourcePosition(load, position);
} else {
load = graph()->NewNode(mcgraph()->machine()->Load(memtype),
- MemBuffer(offset), index, *effect_, *control_);
+ MemBuffer(offset), index, Effect(), Control());
}
} else {
// TODO(eholk): Support unaligned loads with trap handlers.
DCHECK(!use_trap_handler());
load = graph()->NewNode(mcgraph()->machine()->UnalignedLoad(memtype),
- MemBuffer(offset), index, *effect_, *control_);
+ MemBuffer(offset), index, Effect(), Control());
}
- *effect_ = load;
+ SetEffect(load);
#if defined(V8_TARGET_BIG_ENDIAN)
load = BuildChangeEndiannessLoad(load, memtype, type);
@@ -3270,13 +3252,13 @@ Node* WasmGraphBuilder::StoreMem(MachineRepresentation mem_rep, Node* index,
if (use_trap_handler()) {
store =
graph()->NewNode(mcgraph()->machine()->ProtectedStore(mem_rep),
- MemBuffer(offset), index, val, *effect_, *control_);
+ MemBuffer(offset), index, val, Effect(), Control());
SetSourcePosition(store, position);
} else {
StoreRepresentation rep(mem_rep, kNoWriteBarrier);
store =
graph()->NewNode(mcgraph()->machine()->Store(rep), MemBuffer(offset),
- index, val, *effect_, *control_);
+ index, val, Effect(), Control());
}
} else {
// TODO(eholk): Support unaligned stores with trap handlers.
@@ -3284,10 +3266,10 @@ Node* WasmGraphBuilder::StoreMem(MachineRepresentation mem_rep, Node* index,
UnalignedStoreRepresentation rep(mem_rep);
store =
graph()->NewNode(mcgraph()->machine()->UnalignedStore(rep),
- MemBuffer(offset), index, val, *effect_, *control_);
+ MemBuffer(offset), index, val, Effect(), Control());
}
- *effect_ = store;
+ SetEffect(store);
if (FLAG_wasm_trace_memory) {
TraceMemoryOperation(true, mem_rep, index, offset, position);
@@ -3322,42 +3304,38 @@ Node* WasmGraphBuilder::BuildAsmjsLoadMem(MachineType type, Node* index) {
DCHECK_NOT_NULL(mem_start);
DCHECK_NOT_NULL(mem_size);
- // Asm.js semantics are defined along the lines of typed arrays, hence OOB
+ // Asm.js semantics are defined in terms of typed arrays, hence OOB
// reads return {undefined} coerced to the result type (0 for integers, NaN
// for float and double).
// Note that we check against the memory size ignoring the size of the
// stored value, which is conservative if misaligned. Technically, asm.js
// should never have misaligned accesses.
+ index = Uint32ToUintptr(index);
Diamond bounds_check(
graph(), mcgraph()->common(),
- graph()->NewNode(mcgraph()->machine()->Uint32LessThan(), index, mem_size),
+ graph()->NewNode(mcgraph()->machine()->UintLessThan(), index, mem_size),
BranchHint::kTrue);
- bounds_check.Chain(*control_);
+ bounds_check.Chain(Control());
if (untrusted_code_mitigations_) {
// Condition the index with the memory mask.
Node* mem_mask = instance_cache_->mem_mask;
DCHECK_NOT_NULL(mem_mask);
- index =
- graph()->NewNode(mcgraph()->machine()->Word32And(), index, mem_mask);
+ index = graph()->NewNode(mcgraph()->machine()->WordAnd(), index, mem_mask);
}
- index = Uint32ToUintptr(index);
Node* load = graph()->NewNode(mcgraph()->machine()->Load(type), mem_start,
- index, *effect_, bounds_check.if_true);
- Node* value_phi =
- bounds_check.Phi(type.representation(), load,
- GetAsmJsOOBValue(type.representation(), mcgraph()));
- Node* effect_phi = bounds_check.EffectPhi(load, *effect_);
- *effect_ = effect_phi;
- *control_ = bounds_check.merge;
- return value_phi;
+ index, Effect(), bounds_check.if_true);
+ SetEffect(bounds_check.EffectPhi(load, Effect()));
+ SetControl(bounds_check.merge);
+ return bounds_check.Phi(type.representation(), load,
+ GetAsmJsOOBValue(type.representation(), mcgraph()));
}
Node* WasmGraphBuilder::Uint32ToUintptr(Node* node) {
if (mcgraph()->machine()->Is32()) return node;
// Fold instances of ChangeUint32ToUint64(IntConstant) directly.
- UintPtrMatcher matcher(node);
+ Uint32Matcher matcher(node);
if (matcher.HasValue()) {
uintptr_t value = matcher.Value();
return mcgraph()->IntPtrConstant(bit_cast<intptr_t>(value));
@@ -3381,7 +3359,7 @@ Node* WasmGraphBuilder::BuildAsmjsStoreMem(MachineType type, Node* index,
graph(), mcgraph()->common(),
graph()->NewNode(mcgraph()->machine()->Uint32LessThan(), index, mem_size),
BranchHint::kTrue);
- bounds_check.Chain(*control_);
+ bounds_check.Chain(Control());
if (untrusted_code_mitigations_) {
// Condition the index with the memory mask.
@@ -3394,11 +3372,10 @@ Node* WasmGraphBuilder::BuildAsmjsStoreMem(MachineType type, Node* index,
index = Uint32ToUintptr(index);
const Operator* store_op = mcgraph()->machine()->Store(StoreRepresentation(
type.representation(), WriteBarrierKind::kNoWriteBarrier));
- Node* store = graph()->NewNode(store_op, mem_start, index, val, *effect_,
+ Node* store = graph()->NewNode(store_op, mem_start, index, val, Effect(),
bounds_check.if_true);
- Node* effect_phi = bounds_check.EffectPhi(store, *effect_);
- *effect_ = effect_phi;
- *control_ = bounds_check.merge;
+ SetEffect(bounds_check.EffectPhi(store, Effect()));
+ SetControl(bounds_check.merge);
return val;
}
@@ -3921,17 +3898,16 @@ Node* WasmGraphBuilder::Simd8x16ShuffleOp(const uint8_t shuffle[16],
Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
uint32_t alignment, uint32_t offset,
wasm::WasmCodePosition position) {
- // TODO(gdeepti): Add alignment validation, traps on misalignment
Node* node;
switch (opcode) {
#define BUILD_ATOMIC_BINOP(Name, Operation, Type, Prefix) \
case wasm::kExpr##Name: { \
- Node* index = \
- BoundsCheckMem(wasm::ValueTypes::MemSize(MachineType::Type()), \
- inputs[0], offset, position, kNeedsBoundsCheck); \
+ Node* index = CheckBoundsAndAlignment( \
+ wasm::ValueTypes::MemSize(MachineType::Type()), inputs[0], offset, \
+ position); \
node = graph()->NewNode( \
mcgraph()->machine()->Prefix##Atomic##Operation(MachineType::Type()), \
- MemBuffer(offset), index, inputs[1], *effect_, *control_); \
+ MemBuffer(offset), index, inputs[1], Effect(), Control()); \
break; \
}
ATOMIC_BINOP_LIST(BUILD_ATOMIC_BINOP)
@@ -3939,39 +3915,39 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
#define BUILD_ATOMIC_CMP_EXCHG(Name, Type, Prefix) \
case wasm::kExpr##Name: { \
- Node* index = \
- BoundsCheckMem(wasm::ValueTypes::MemSize(MachineType::Type()), \
- inputs[0], offset, position, kNeedsBoundsCheck); \
+ Node* index = CheckBoundsAndAlignment( \
+ wasm::ValueTypes::MemSize(MachineType::Type()), inputs[0], offset, \
+ position); \
node = graph()->NewNode( \
mcgraph()->machine()->Prefix##AtomicCompareExchange( \
MachineType::Type()), \
- MemBuffer(offset), index, inputs[1], inputs[2], *effect_, *control_); \
+ MemBuffer(offset), index, inputs[1], inputs[2], Effect(), Control()); \
break; \
}
ATOMIC_CMP_EXCHG_LIST(BUILD_ATOMIC_CMP_EXCHG)
#undef BUILD_ATOMIC_CMP_EXCHG
-#define BUILD_ATOMIC_LOAD_OP(Name, Type, Prefix) \
- case wasm::kExpr##Name: { \
- Node* index = \
- BoundsCheckMem(wasm::ValueTypes::MemSize(MachineType::Type()), \
- inputs[0], offset, position, kNeedsBoundsCheck); \
- node = graph()->NewNode( \
- mcgraph()->machine()->Prefix##AtomicLoad(MachineType::Type()), \
- MemBuffer(offset), index, *effect_, *control_); \
- break; \
+#define BUILD_ATOMIC_LOAD_OP(Name, Type, Prefix) \
+ case wasm::kExpr##Name: { \
+ Node* index = CheckBoundsAndAlignment( \
+ wasm::ValueTypes::MemSize(MachineType::Type()), inputs[0], offset, \
+ position); \
+ node = graph()->NewNode( \
+ mcgraph()->machine()->Prefix##AtomicLoad(MachineType::Type()), \
+ MemBuffer(offset), index, Effect(), Control()); \
+ break; \
}
ATOMIC_LOAD_LIST(BUILD_ATOMIC_LOAD_OP)
#undef BUILD_ATOMIC_LOAD_OP
#define BUILD_ATOMIC_STORE_OP(Name, Type, Rep, Prefix) \
case wasm::kExpr##Name: { \
- Node* index = \
- BoundsCheckMem(wasm::ValueTypes::MemSize(MachineType::Type()), \
- inputs[0], offset, position, kNeedsBoundsCheck); \
+ Node* index = CheckBoundsAndAlignment( \
+ wasm::ValueTypes::MemSize(MachineType::Type()), inputs[0], offset, \
+ position); \
node = graph()->NewNode( \
mcgraph()->machine()->Prefix##AtomicStore(MachineRepresentation::Rep), \
- MemBuffer(offset), index, inputs[1], *effect_, *control_); \
+ MemBuffer(offset), index, inputs[1], Effect(), Control()); \
break; \
}
ATOMIC_STORE_LIST(BUILD_ATOMIC_STORE_OP)
@@ -3979,8 +3955,7 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
default:
FATAL_UNSUPPORTED_OPCODE(opcode);
}
- *effect_ = node;
- return node;
+ return SetEffect(node);
}
#undef ATOMIC_BINOP_LIST
@@ -4067,13 +4042,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
allocate_heap_number_operator_.set(common->Call(call_descriptor));
}
Node* heap_number = graph()->NewNode(allocate_heap_number_operator_.get(),
- target, *effect_, control);
- Node* store =
+ target, Effect(), control);
+ SetEffect(
graph()->NewNode(machine->Store(StoreRepresentation(
MachineRepresentation::kFloat64, kNoWriteBarrier)),
heap_number, BuildHeapNumberValueIndexConstant(),
- value, heap_number, control);
- *effect_ = store;
+ value, heap_number, control));
return heap_number;
}
@@ -4088,9 +4062,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
}
Node* BuildLoadHeapNumberValue(Node* value) {
- return *effect_ = graph()->NewNode(
- mcgraph()->machine()->Load(MachineType::Float64()), value,
- BuildHeapNumberValueIndexConstant(), *effect_, *control_);
+ return SetEffect(graph()->NewNode(
+ mcgraph()->machine()->Load(MachineType::Float64()), value,
+ BuildHeapNumberValueIndexConstant(), Effect(), Control()));
}
Node* BuildHeapNumberValueIndexConstant() {
@@ -4106,8 +4080,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
}
DCHECK(SmiValuesAre31Bits());
- Node* effect = *effect_;
- Node* control = *control_;
+ Node* effect = Effect();
+ Node* control = Control();
Node* add = graph()->NewNode(machine->Int32AddWithOverflow(), value, value,
graph()->start());
@@ -4118,18 +4092,17 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* if_true = graph()->NewNode(common->IfTrue(), branch);
Node* vtrue = BuildAllocateHeapNumberWithValue(
graph()->NewNode(machine->ChangeInt32ToFloat64(), value), if_true);
- Node* etrue = *effect_;
+ Node* etrue = Effect();
Node* if_false = graph()->NewNode(common->IfFalse(), branch);
Node* vfalse = graph()->NewNode(common->Projection(0), add, if_false);
vfalse = BuildChangeInt32ToIntPtr(vfalse);
- Node* merge = graph()->NewNode(common->Merge(2), if_true, if_false);
- Node* phi = graph()->NewNode(common->Phi(MachineRepresentation::kTagged, 2),
- vtrue, vfalse, merge);
- *effect_ = graph()->NewNode(common->EffectPhi(2), etrue, effect, merge);
- *control_ = merge;
- return phi;
+ Node* merge =
+ SetControl(graph()->NewNode(common->Merge(2), if_true, if_false));
+ SetEffect(graph()->NewNode(common->EffectPhi(2), etrue, effect, merge));
+ return graph()->NewNode(common->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, merge);
}
Node* BuildChangeFloat64ToTagged(Node* value) {
@@ -4147,8 +4120,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// For potential Smi values, depending on whether Smis are 31 or 32 bit, we
// still need to check whether the value fits in a Smi.
- Node* effect = *effect_;
- Node* control = *control_;
+ Node* effect = Effect();
+ Node* control = Control();
Node* value32 = graph()->NewNode(machine->RoundFloat64ToInt32(), value);
Node* check_i32 = graph()->NewNode(
machine->Float64Equal(), value,
@@ -4210,14 +4183,13 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Allocate the box for the {value}.
Node* vbox = BuildAllocateHeapNumberWithValue(value, if_box);
- Node* ebox = *effect_;
+ Node* ebox = Effect();
- Node* merge = graph()->NewNode(common->Merge(2), if_smi, if_box);
- value = graph()->NewNode(common->Phi(MachineRepresentation::kTagged, 2),
- vsmi, vbox, merge);
- *effect_ = graph()->NewNode(common->EffectPhi(2), effect, ebox, merge);
- *control_ = merge;
- return value;
+ Node* merge =
+ SetControl(graph()->NewNode(common->Merge(2), if_smi, if_box));
+ SetEffect(graph()->NewNode(common->EffectPhi(2), effect, ebox, merge));
+ return graph()->NewNode(common->Phi(MachineRepresentation::kTagged, 2),
+ vsmi, vbox, merge);
}
int AddArgumentNodes(Node** args, int pos, int param_count,
@@ -4241,14 +4213,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
wasm::WasmCode::kWasmToNumber, RelocInfo::WASM_STUB_CALL)
: jsgraph()->HeapConstant(BUILTIN_CODE(isolate_, ToNumber));
- Node* result =
+ Node* result = SetEffect(
graph()->NewNode(mcgraph()->common()->Call(call_descriptor), stub_code,
- node, js_context, *effect_, *control_);
+ node, js_context, Effect(), Control()));
SetSourcePosition(result, 1);
- *effect_ = result;
-
return result;
}
@@ -4266,25 +4236,25 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* check_heap_object = BuildTestHeapObject(value);
Diamond is_heap_object(graph(), common, check_heap_object,
BranchHint::kFalse);
- is_heap_object.Chain(*control_);
+ is_heap_object.Chain(Control());
- *control_ = is_heap_object.if_true;
- Node* orig_effect = *effect_;
+ SetControl(is_heap_object.if_true);
+ Node* orig_effect = Effect();
- Node* undefined_node = *effect_ =
+ Node* undefined_node =
LOAD_INSTANCE_FIELD(UndefinedValue, MachineType::TaggedPointer());
Node* check_undefined =
graph()->NewNode(machine->WordEqual(), value, undefined_node);
- Node* effect_tagged = *effect_;
+ Node* effect_tagged = Effect();
Diamond is_undefined(graph(), common, check_undefined, BranchHint::kFalse);
is_undefined.Nest(is_heap_object, true);
- *control_ = is_undefined.if_false;
+ SetControl(is_undefined.if_false);
Node* vheap_number = BuildLoadHeapNumberValue(value);
- Node* effect_undefined = *effect_;
+ Node* effect_undefined = Effect();
- *control_ = is_undefined.merge;
+ SetControl(is_undefined.merge);
Node* vundefined =
mcgraph()->Float64Constant(std::numeric_limits<double>::quiet_NaN());
Node* vtagged = is_undefined.Phi(MachineRepresentation::kFloat64,
@@ -4295,8 +4265,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// If input is Smi: just convert to float64.
Node* vfrom_smi = BuildChangeSmiToFloat64(value);
- *control_ = is_heap_object.merge;
- *effect_ = is_heap_object.EffectPhi(effect_tagged, orig_effect);
+ SetControl(is_heap_object.merge);
+ SetEffect(is_heap_object.EffectPhi(effect_tagged, orig_effect));
return is_heap_object.Phi(MachineRepresentation::kFloat64, vtagged,
vfrom_smi);
}
@@ -4356,63 +4326,69 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return num;
}
- Node* BuildModifyThreadInWasmFlag(bool new_value) {
- // TODO(eholk): generate code to modify the thread-local storage directly,
- // rather than calling the runtime.
- if (!use_trap_handler()) {
- return *control_;
- }
-
- // Using two functions instead of taking the new value as a parameter saves
- // one instruction on each call to set up the parameter.
- ExternalReference ref =
- new_value ? ExternalReference::wasm_set_thread_in_wasm_flag()
- : ExternalReference::wasm_clear_thread_in_wasm_flag();
- MachineSignature sig(0, 0, nullptr);
- return BuildCCall(
- &sig, graph()->NewNode(mcgraph()->common()->ExternalConstant(ref)));
+ void BuildModifyThreadInWasmFlag(bool new_value) {
+ if (!trap_handler::IsTrapHandlerEnabled()) return;
+ Node* thread_in_wasm_flag_address_address =
+ graph()->NewNode(mcgraph()->common()->ExternalConstant(
+ ExternalReference::wasm_thread_in_wasm_flag_address_address(
+ isolate_)));
+ Node* thread_in_wasm_flag_address = SetEffect(graph()->NewNode(
+ mcgraph()->machine()->Load(LoadRepresentation(MachineType::Pointer())),
+ thread_in_wasm_flag_address_address, mcgraph()->Int32Constant(0),
+ Effect(), Control()));
+ SetEffect(graph()->NewNode(
+ mcgraph()->machine()->Store(StoreRepresentation(
+ MachineRepresentation::kWord32, kNoWriteBarrier)),
+ thread_in_wasm_flag_address, mcgraph()->Int32Constant(0),
+ mcgraph()->Int32Constant(new_value ? 1 : 0), Effect(), Control()));
}
Node* BuildLoadFunctionDataFromExportedFunction(Node* closure) {
- Node* shared = *effect_ = graph()->NewNode(
+ Node* shared = SetEffect(graph()->NewNode(
jsgraph()->machine()->Load(MachineType::AnyTagged()), closure,
jsgraph()->Int32Constant(JSFunction::kSharedFunctionInfoOffset -
kHeapObjectTag),
- *effect_, *control_);
- Node* function_data = *effect_ = graph()->NewNode(
+ Effect(), Control()));
+ return SetEffect(graph()->NewNode(
jsgraph()->machine()->Load(MachineType::AnyTagged()), shared,
jsgraph()->Int32Constant(SharedFunctionInfo::kFunctionDataOffset -
kHeapObjectTag),
- *effect_, *control_);
- return function_data;
+ Effect(), Control()));
}
Node* BuildLoadInstanceFromExportedFunctionData(Node* function_data) {
- Node* instance = *effect_ = graph()->NewNode(
+ return SetEffect(graph()->NewNode(
jsgraph()->machine()->Load(MachineType::AnyTagged()), function_data,
jsgraph()->Int32Constant(WasmExportedFunctionData::kInstanceOffset -
kHeapObjectTag),
- *effect_, *control_);
- return instance;
+ Effect(), Control()));
}
Node* BuildLoadFunctionIndexFromExportedFunctionData(Node* function_data) {
- Node* function_index_smi = *effect_ = graph()->NewNode(
+ Node* function_index_smi = SetEffect(graph()->NewNode(
jsgraph()->machine()->Load(MachineType::AnyTagged()), function_data,
jsgraph()->Int32Constant(
WasmExportedFunctionData::kFunctionIndexOffset - kHeapObjectTag),
- *effect_, *control_);
+ Effect(), Control()));
Node* function_index = BuildChangeSmiToInt32(function_index_smi);
return function_index;
}
+ Node* BuildLoadJumpTableOffsetFromExportedFunctionData(Node* function_data) {
+ Node* jump_table_offset_smi = SetEffect(graph()->NewNode(
+ jsgraph()->machine()->Load(MachineType::AnyTagged()), function_data,
+ jsgraph()->Int32Constant(
+ WasmExportedFunctionData::kJumpTableOffsetOffset - kHeapObjectTag),
+ Effect(), Control()));
+ Node* jump_table_offset = BuildChangeSmiToInt32(jump_table_offset_smi);
+ return jump_table_offset;
+ }
+
void BuildJSToWasmWrapper(bool is_import) {
const int wasm_count = static_cast<int>(sig_->parameter_count());
// Build the start and the JS parameter nodes.
- Node* start = Start(wasm_count + 5);
- *control_ = start;
- *effect_ = start;
+ SetEffect(SetControl(Start(wasm_count + 5)));
// Create the js_closure and js_context parameters.
Node* js_closure =
@@ -4456,28 +4432,22 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Set the ThreadInWasm flag before we do the actual call.
BuildModifyThreadInWasmFlag(true);
- // Load function index from {WasmExportedFunctionData}.
- Node* function_index =
- BuildLoadFunctionIndexFromExportedFunctionData(function_data);
-
if (is_import) {
// Call to an imported function.
+ // Load function index from {WasmExportedFunctionData}.
+ Node* function_index =
+ BuildLoadFunctionIndexFromExportedFunctionData(function_data);
BuildImportWasmCall(sig_, args, &rets, wasm::kNoCodePosition,
function_index);
} else {
// Call to a wasm function defined in this module.
- // The call target is the jump table slot for that function. This is
- // {jump_table + (func_index - num_imports) * kJumpTableSlotSize}
- // == {jump_table_adjusted + func_index * kJumpTableSlotSize}.
- Node* jump_table_adjusted =
- LOAD_INSTANCE_FIELD(JumpTableAdjustedStart, MachineType::Pointer());
- Node* jump_table_offset = graph()->NewNode(
- mcgraph()->machine()->IntMul(), Uint32ToUintptr(function_index),
- mcgraph()->IntPtrConstant(
- wasm::JumpTableAssembler::kJumpTableSlotSize));
- Node* jump_table_slot =
- graph()->NewNode(mcgraph()->machine()->IntAdd(), jump_table_adjusted,
- jump_table_offset);
+ // The call target is the jump table slot for that function.
+ Node* jump_table_start =
+ LOAD_INSTANCE_FIELD(JumpTableStart, MachineType::Pointer());
+ Node* jump_table_offset =
+ BuildLoadJumpTableOffsetFromExportedFunctionData(function_data);
+ Node* jump_table_slot = graph()->NewNode(
+ mcgraph()->machine()->IntAdd(), jump_table_start, jump_table_offset);
args[0] = jump_table_slot;
BuildWasmCall(sig_, args, &rets, wasm::kNoCodePosition, nullptr,
@@ -4498,9 +4468,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
int wasm_count = static_cast<int>(sig_->parameter_count());
// Build the start and the parameter nodes.
- Node* start = Start(wasm_count + 3);
- *effect_ = start;
- *control_ = start;
+ SetEffect(SetControl(Start(wasm_count + 3)));
// Create the instance_node from the passed parameter.
instance_node_.set(Param(wasm::kWasmInstanceParameterIndex));
@@ -4532,11 +4500,11 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
if (target->IsJSFunction()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(target);
FieldAccess field_access = AccessBuilder::ForJSFunctionContext();
- Node* function_context = graph()->NewNode(
+ Node* function_context = SetEffect(graph()->NewNode(
mcgraph()->machine()->Load(MachineType::TaggedPointer()),
callable_node,
mcgraph()->Int32Constant(field_access.offset - field_access.tag()),
- *effect_, *control_);
+ Effect(), Control()));
if (!IsClassConstructor(function->shared()->kind())) {
if (function->shared()->internal_formal_parameter_count() ==
@@ -4562,8 +4530,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
args[pos++] = undefined_node; // new target
args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count
args[pos++] = function_context;
- args[pos++] = *effect_;
- args[pos++] = *control_;
+ args[pos++] = Effect();
+ args[pos++] = Control();
call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
pos, args);
@@ -4594,8 +4562,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Convert wasm numbers to JS values.
pos = AddArgumentNodes(args, pos, wasm_count, sig_);
args[pos++] = function_context;
- args[pos++] = *effect_;
- args[pos++] = *control_;
+ args[pos++] = Effect();
+ args[pos++] = Control();
call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
pos, args);
}
@@ -4625,14 +4593,14 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// the target is a native function, or if the target is a callable
// JSObject, which can only be constructed by the runtime.
args[pos++] = native_context;
- args[pos++] = *effect_;
- args[pos++] = *control_;
+ args[pos++] = Effect();
+ args[pos++] = Control();
call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor), pos,
args);
}
- *effect_ = call;
+ SetEffect(call);
SetSourcePosition(call, 0);
// Convert the return value back.
@@ -4650,9 +4618,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
int param_count = static_cast<int>(sig_->parameter_count());
// Build the start and the parameter nodes.
- Node* start = Start(param_count + 3);
- *effect_ = start;
- *control_ = start;
+ SetEffect(SetControl(Start(param_count + 3)));
// Create the instance_node from the passed parameter.
instance_node_.set(Param(wasm::kWasmInstanceParameterIndex));
@@ -4685,9 +4651,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
for (int i = 0; i < param_count; ++i) {
wasm::ValueType type = sig_->GetParam(i);
// Start from the parameter with index 1 to drop the instance_node.
- *effect_ = graph()->NewNode(GetSafeStoreOperator(offset, type),
- arg_buffer, Int32Constant(offset),
- Param(i + 1), *effect_, *control_);
+ SetEffect(graph()->NewNode(GetSafeStoreOperator(offset, type), arg_buffer,
+ Int32Constant(offset), Param(i + 1), Effect(),
+ Control()));
offset += wasm::ValueTypes::ElementSizeInBytes(type);
}
DCHECK_EQ(args_size_bytes, offset);
@@ -4709,9 +4675,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
DCHECK_EQ(1, sig_->return_count());
MachineType load_rep =
wasm::ValueTypes::MachineTypeFor(sig_->GetReturn());
- Node* val =
+ Node* val = SetEffect(
graph()->NewNode(mcgraph()->machine()->Load(load_rep), arg_buffer,
- Int32Constant(0), *effect_, *control_);
+ Int32Constant(0), Effect(), Control()));
Return(val);
}
@@ -4720,9 +4686,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
void BuildCWasmEntry() {
// Build the start and the JS parameter nodes.
- Node* start = Start(CWasmEntryParameters::kNumParameters + 5);
- *control_ = start;
- *effect_ = start;
+ SetEffect(SetControl(Start(CWasmEntryParameters::kNumParameters + 5)));
// Create parameter nodes (offset by 1 for the receiver parameter).
Node* foreign_code_obj = Param(CWasmEntryParameters::kCodeObject + 1);
@@ -4730,7 +4694,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* code_obj = graph()->NewNode(
machine->Load(MachineType::Pointer()), foreign_code_obj,
Int32Constant(Foreign::kForeignAddressOffset - kHeapObjectTag),
- *effect_, *control_);
+ Effect(), Control());
Node* instance_node = Param(CWasmEntryParameters::kWasmInstance + 1);
Node* arg_buffer = Param(CWasmEntryParameters::kArgumentsBuffer + 1);
@@ -4744,24 +4708,22 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
int offset = 0;
for (wasm::ValueType type : sig_->parameters()) {
- Node* arg_load =
+ Node* arg_load = SetEffect(
graph()->NewNode(GetSafeLoadOperator(offset, type), arg_buffer,
- Int32Constant(offset), *effect_, *control_);
- *effect_ = arg_load;
+ Int32Constant(offset), Effect(), Control()));
args[pos++] = arg_load;
offset += wasm::ValueTypes::ElementSizeInBytes(type);
}
- args[pos++] = *effect_;
- args[pos++] = *control_;
+ args[pos++] = Effect();
+ args[pos++] = Control();
DCHECK_EQ(arg_count, pos);
// Call the wasm code.
auto call_descriptor = GetWasmCallDescriptor(mcgraph()->zone(), sig_);
- Node* call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
- arg_count, args);
- *effect_ = call;
+ Node* call = SetEffect(graph()->NewNode(
+ mcgraph()->common()->Call(call_descriptor), arg_count, args));
// Store the return value.
DCHECK_GE(1, sig_->return_count());
@@ -4769,10 +4731,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
StoreRepresentation store_rep(
wasm::ValueTypes::MachineRepresentationFor(sig_->GetReturn()),
kNoWriteBarrier);
- Node* store =
- graph()->NewNode(mcgraph()->machine()->Store(store_rep), arg_buffer,
- Int32Constant(0), call, *effect_, *control_);
- *effect_ = store;
+ SetEffect(graph()->NewNode(mcgraph()->machine()->Store(store_rep),
+ arg_buffer, Int32Constant(0), call, Effect(),
+ Control()));
}
Return(jsgraph()->SmiConstant(0));
@@ -4804,6 +4765,8 @@ MaybeHandle<Code> CompileJSToWasmWrapper(
Isolate* isolate, const wasm::NativeModule* native_module,
wasm::FunctionSig* sig, bool is_import,
wasm::UseTrapHandler use_trap_handler) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
+ "CompileJSToWasmWrapper");
const wasm::WasmModule* module = native_module->module();
//----------------------------------------------------------------------------
@@ -4877,6 +4840,8 @@ MaybeHandle<Code> CompileWasmToJSWrapper(
Isolate* isolate, Handle<JSReceiver> target, wasm::FunctionSig* sig,
uint32_t index, wasm::ModuleOrigin origin,
wasm::UseTrapHandler use_trap_handler) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
+ "CompileWasmToJSWrapper");
//----------------------------------------------------------------------------
// Create the Graph
//----------------------------------------------------------------------------
@@ -5082,14 +5047,14 @@ MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
TurbofanWasmCompilationUnit::TurbofanWasmCompilationUnit(
wasm::WasmCompilationUnit* wasm_unit)
- : wasm_unit_(wasm_unit),
- wasm_compilation_data_(wasm_unit->env_->runtime_exception_support) {}
+ : wasm_unit_(wasm_unit) {}
// Clears unique_ptrs, but (part of) the type is forward declared in the header.
TurbofanWasmCompilationUnit::~TurbofanWasmCompilationUnit() = default;
SourcePositionTable* TurbofanWasmCompilationUnit::BuildGraphForWasmFunction(
- double* decode_ms, MachineGraph* mcgraph, NodeOriginTable* node_origins) {
+ wasm::WasmFeatures* detected, double* decode_ms, MachineGraph* mcgraph,
+ NodeOriginTable* node_origins) {
base::ElapsedTimer decode_timer;
if (FLAG_trace_wasm_decode_time) {
decode_timer.Start();
@@ -5100,9 +5065,10 @@ SourcePositionTable* TurbofanWasmCompilationUnit::BuildGraphForWasmFunction(
new (mcgraph->zone()) SourcePositionTable(mcgraph->graph());
WasmGraphBuilder builder(wasm_unit_->env_, mcgraph->zone(), mcgraph,
wasm_unit_->func_body_.sig, source_position_table);
- graph_construction_result_ =
- wasm::BuildTFGraph(wasm_unit_->wasm_engine_->allocator(), &builder,
- wasm_unit_->func_body_, node_origins);
+ graph_construction_result_ = wasm::BuildTFGraph(
+ wasm_unit_->wasm_engine_->allocator(),
+ wasm_unit_->native_module_->enabled_features(), wasm_unit_->env_->module,
+ &builder, detected, wasm_unit_->func_body_, node_origins);
if (graph_construction_result_.failed()) {
if (FLAG_trace_wasm_compiler) {
StdoutStream{} << "Compilation failed: "
@@ -5114,7 +5080,7 @@ SourcePositionTable* TurbofanWasmCompilationUnit::BuildGraphForWasmFunction(
builder.LowerInt64();
if (builder.has_simd() &&
- (!CpuFeatures::SupportsWasmSimd128() || wasm_unit_->lower_simd_)) {
+ (!CpuFeatures::SupportsWasmSimd128() || wasm_unit_->env_->lower_simd)) {
SimdScalarLowering(
mcgraph,
CreateMachineSignature(mcgraph->zone(), wasm_unit_->func_body_.sig))
@@ -5155,7 +5121,8 @@ Vector<const char> GetDebugName(Zone* zone, wasm::WasmName name, int index) {
} // namespace
-void TurbofanWasmCompilationUnit::ExecuteCompilation() {
+void TurbofanWasmCompilationUnit::ExecuteCompilation(
+ wasm::WasmFeatures* detected) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
"ExecuteTurbofanCompilation");
double decode_ms = 0;
@@ -5178,13 +5145,16 @@ void TurbofanWasmCompilationUnit::ExecuteCompilation() {
GetDebugName(&compilation_zone, wasm_unit_->func_name_,
wasm_unit_->func_index_),
&compilation_zone, Code::WASM_FUNCTION);
+ if (wasm_unit_->env_->runtime_exception_support) {
+ info.SetWasmRuntimeExceptionSupport();
+ }
NodeOriginTable* node_origins = info.trace_turbo_json_enabled()
? new (&graph_zone)
NodeOriginTable(mcgraph->graph())
: nullptr;
SourcePositionTable* source_positions =
- BuildGraphForWasmFunction(&decode_ms, mcgraph, node_origins);
+ BuildGraphForWasmFunction(detected, &decode_ms, mcgraph, node_origins);
if (graph_construction_result_.failed()) {
ok_ = false;
@@ -5212,8 +5182,7 @@ void TurbofanWasmCompilationUnit::ExecuteCompilation() {
std::unique_ptr<OptimizedCompilationJob> job(
Pipeline::NewWasmCompilationJob(
&info, wasm_unit_->wasm_engine_, mcgraph, call_descriptor,
- source_positions, node_origins, &wasm_compilation_data_,
- wasm_unit_->func_body_,
+ source_positions, node_origins, wasm_unit_->func_body_,
const_cast<wasm::WasmModule*>(wasm_unit_->env_->module),
wasm_unit_->native_module_, wasm_unit_->func_index_,
wasm_unit_->env_->module->origin));
@@ -5233,6 +5202,7 @@ void TurbofanWasmCompilationUnit::ExecuteCompilation() {
}
if (ok_) wasm_code_ = info.wasm_code();
}
+ if (ok_) wasm_unit_->native_module()->PublishCode(wasm_code_);
}
wasm::WasmCode* TurbofanWasmCompilationUnit::FinishCompilation(
@@ -5255,8 +5225,6 @@ wasm::WasmCode* TurbofanWasmCompilationUnit::FinishCompilation(
return nullptr;
}
-
- wasm_unit_->native_module()->PublishCode(wasm_code_);
return wasm_code_;
}
@@ -5269,32 +5237,20 @@ class LinkageLocationAllocator {
const DoubleRegister (&fp)[kNumFpRegs])
: allocator_(wasm::LinkageAllocator(gp, fp)) {}
- LinkageLocation Next(MachineRepresentation type) {
- MachineType mach_type = MachineType::TypeForRepresentation(type);
- if (type == MachineRepresentation::kFloat32 ||
- type == MachineRepresentation::kFloat64) {
- if (allocator_.has_more_fp_regs()) {
- DoubleRegister reg = allocator_.NextFpReg();
-#if V8_TARGET_ARCH_ARM
- // Allocate floats using a double register, but modify the code to
- // reflect how ARM FP registers alias.
- // TODO(bbudge) Modify wasm linkage to allow use of all float regs.
- if (type == MachineRepresentation::kFloat32) {
- int float_reg_code = reg.code() * 2;
- DCHECK_GT(RegisterConfiguration::kMaxFPRegisters, float_reg_code);
- return LinkageLocation::ForRegister(
- DoubleRegister::from_code(float_reg_code).code(), mach_type);
- }
-#endif
- return LinkageLocation::ForRegister(reg.code(), mach_type);
+ LinkageLocation Next(MachineRepresentation rep) {
+ MachineType type = MachineType::TypeForRepresentation(rep);
+ if (IsFloatingPoint(rep)) {
+ if (allocator_.CanAllocateFP(rep)) {
+ int reg_code = allocator_.NextFpReg(rep);
+ return LinkageLocation::ForRegister(reg_code, type);
}
- } else if (allocator_.has_more_gp_regs()) {
- return LinkageLocation::ForRegister(allocator_.NextGpReg().code(),
- mach_type);
+ } else if (allocator_.CanAllocateGP()) {
+ int reg_code = allocator_.NextGpReg();
+ return LinkageLocation::ForRegister(reg_code, type);
}
// Cannot use register; use stack slot.
- int index = -1 - allocator_.NextStackSlot(type);
- return LinkageLocation::ForCallerFrameSlot(index, mach_type);
+ int index = -1 - allocator_.NextStackSlot(rep);
+ return LinkageLocation::ForCallerFrameSlot(index, type);
}
void SetStackOffset(int offset) { allocator_.SetStackOffset(offset); }
@@ -5331,7 +5287,11 @@ CallDescriptor* GetWasmCallDescriptor(
// Add return location(s).
LinkageLocationAllocator rets(wasm::kGpReturnRegisters,
wasm::kFpReturnRegisters);
- rets.SetStackOffset(params.NumStackSlots());
+
+ int parameter_slots = params.NumStackSlots();
+ if (kPadArguments) parameter_slots = RoundUp(parameter_slots, 2);
+
+ rets.SetStackOffset(parameter_slots);
const int return_count = static_cast<int>(locations.return_count_);
for (int i = 0; i < return_count; i++) {
@@ -5352,19 +5312,19 @@ CallDescriptor* GetWasmCallDescriptor(
CallDescriptor::Flags flags =
use_retpoline ? CallDescriptor::kRetpoline : CallDescriptor::kNoFlags;
- return new (zone) CallDescriptor( // --
- kind, // kind
- target_type, // target MachineType
- target_loc, // target location
- locations.Build(), // location_sig
- params.NumStackSlots(), // stack_parameter_count
- compiler::Operator::kNoProperties, // properties
- kCalleeSaveRegisters, // callee-saved registers
- kCalleeSaveFPRegisters, // callee-saved fp regs
- flags, // flags
- "wasm-call", // debug name
- 0, // allocatable registers
- rets.NumStackSlots() - params.NumStackSlots()); // stack_return_count
+ return new (zone) CallDescriptor( // --
+ kind, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ locations.Build(), // location_sig
+ parameter_slots, // stack_parameter_count
+ compiler::Operator::kNoProperties, // properties
+ kCalleeSaveRegisters, // callee-saved registers
+ kCalleeSaveFPRegisters, // callee-saved fp regs
+ flags, // flags
+ "wasm-call", // debug name
+ 0, // allocatable registers
+ rets.NumStackSlots() - parameter_slots); // stack_return_count
}
namespace {
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 0f6ee0304e..775c817242 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -10,7 +10,6 @@
// Clients of this interface shouldn't depend on lots of compiler internals.
// Do not include anything from src/compiler here!
#include "src/runtime/runtime.h"
-#include "src/trap-handler/trap-handler.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/wasm-module.h"
@@ -41,57 +40,27 @@ struct DecodeStruct;
typedef compiler::Node TFNode;
typedef compiler::MachineGraph TFGraph;
class WasmCode;
+struct WasmFeatures;
} // namespace wasm
namespace compiler {
-// Information about Wasm compilation that needs to be plumbed through the
-// different layers of the compiler.
-class WasmCompilationData {
- public:
- explicit WasmCompilationData(
- wasm::RuntimeExceptionSupport runtime_exception_support)
- : runtime_exception_support_(runtime_exception_support) {}
-
- void AddProtectedInstruction(uint32_t instr_offset, uint32_t landing_offset) {
- protected_instructions_.push_back({instr_offset, landing_offset});
- }
-
- OwnedVector<trap_handler::ProtectedInstructionData>
- GetProtectedInstructions() {
- return OwnedVector<trap_handler::ProtectedInstructionData>::Of(
- protected_instructions_);
- }
-
- wasm::RuntimeExceptionSupport runtime_exception_support() const {
- return runtime_exception_support_;
- }
-
- private:
- std::vector<trap_handler::ProtectedInstructionData> protected_instructions_;
-
- // See ModuleEnv::runtime_exception_support_.
- wasm::RuntimeExceptionSupport runtime_exception_support_;
-
- DISALLOW_COPY_AND_ASSIGN(WasmCompilationData);
-};
-
class TurbofanWasmCompilationUnit {
public:
explicit TurbofanWasmCompilationUnit(wasm::WasmCompilationUnit* wasm_unit);
~TurbofanWasmCompilationUnit();
- SourcePositionTable* BuildGraphForWasmFunction(double* decode_ms,
+ SourcePositionTable* BuildGraphForWasmFunction(wasm::WasmFeatures* detected,
+ double* decode_ms,
MachineGraph* mcgraph,
NodeOriginTable* node_origins);
- void ExecuteCompilation();
+ void ExecuteCompilation(wasm::WasmFeatures* detected);
wasm::WasmCode* FinishCompilation(wasm::ErrorThrower*);
private:
wasm::WasmCompilationUnit* const wasm_unit_;
- WasmCompilationData wasm_compilation_data_;
bool ok_ = true;
wasm::WasmCode* wasm_code_ = nullptr;
wasm::Result<wasm::DecodeStruct*> graph_construction_result_;
@@ -270,8 +239,22 @@ class WasmGraphBuilder {
this->instance_node_ = instance_node;
}
- Node* Control() { return *control_; }
- Node* Effect() { return *effect_; }
+ Node* Control() {
+ DCHECK_NOT_NULL(*control_);
+ return *control_;
+ }
+ Node* Effect() {
+ DCHECK_NOT_NULL(*effect_);
+ return *effect_;
+ }
+ Node* SetControl(Node* node) {
+ *control_ = node;
+ return node;
+ }
+ Node* SetEffect(Node* node) {
+ *effect_ = node;
+ return node;
+ }
void set_control_ptr(Node** control) { this->control_ = control; }
@@ -369,6 +352,8 @@ class WasmGraphBuilder {
// BoundsCheckMem receives a uint32 {index} node and returns a ptrsize index.
Node* BoundsCheckMem(uint8_t access_size, Node* index, uint32_t offset,
wasm::WasmCodePosition, EnforceBoundsCheck);
+ Node* CheckBoundsAndAlignment(uint8_t access_size, Node* index,
+ uint32_t offset, wasm::WasmCodePosition);
Node* Uint32ToUintptr(Node*);
const Operator* GetSafeLoadOperator(int offset, wasm::ValueType type);
const Operator* GetSafeStoreOperator(int offset, wasm::ValueType type);
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/x64/code-generator-x64.cc
index 548e3eb416..2ccb56907d 100644
--- a/deps/v8/src/compiler/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/x64/code-generator-x64.cc
@@ -12,6 +12,8 @@
#include "src/compiler/osr.h"
#include "src/heap/heap-inl.h"
#include "src/optimized-compilation-info.h"
+#include "src/wasm/wasm-code-manager.h"
+#include "src/wasm/wasm-objects.h"
#include "src/x64/assembler-x64.h"
#include "src/x64/macro-assembler-x64.h"
@@ -1211,6 +1213,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Popcntl(i.OutputRegister(), i.InputOperand(0));
}
break;
+ case kX64Bswap:
+ __ bswapq(i.OutputRegister());
+ break;
+ case kX64Bswap32:
+ __ bswapl(i.OutputRegister());
+ break;
case kSSEFloat32Cmp:
ASSEMBLE_SSE_BINOP(Ucomiss);
break;
@@ -3226,7 +3234,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
}
}
-void CodeGenerator::FinishCode() {}
+void CodeGenerator::FinishCode() { tasm()->PatchConstPool(); }
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
diff --git a/deps/v8/src/compiler/x64/instruction-codes-x64.h b/deps/v8/src/compiler/x64/instruction-codes-x64.h
index 88474b2494..6a9e313f4e 100644
--- a/deps/v8/src/compiler/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/x64/instruction-codes-x64.h
@@ -56,6 +56,8 @@ namespace compiler {
V(X64Tzcnt32) \
V(X64Popcnt) \
V(X64Popcnt32) \
+ V(X64Bswap) \
+ V(X64Bswap32) \
V(LFence) \
V(SSEFloat32Cmp) \
V(SSEFloat32Add) \
diff --git a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
index c3c0d3a2a5..b1f380badf 100644
--- a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
@@ -54,6 +54,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Tzcnt32:
case kX64Popcnt:
case kX64Popcnt32:
+ case kX64Bswap:
+ case kX64Bswap32:
case kSSEFloat32Cmp:
case kSSEFloat32Add:
case kSSEFloat32Sub:
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index b3dfb91991..b5d7fa6d55 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -197,6 +197,17 @@ class X64OperandGenerator final : public OperandGenerator {
}
}
+ InstructionOperand GetEffectiveIndexOperand(Node* index,
+ AddressingMode* mode) {
+ if (CanBeImmediate(index)) {
+ *mode = kMode_MRI;
+ return UseImmediate(index);
+ } else {
+ *mode = kMode_MR1;
+ return UseUniqueRegister(index);
+ }
+ }
+
bool CanBeBetterLeftOperand(Node* node) const {
return !selector()->IsLive(node);
}
@@ -329,17 +340,10 @@ void InstructionSelector::VisitStore(Node* node) {
if (write_barrier_kind != kNoWriteBarrier) {
DCHECK(CanBeTaggedPointer(store_rep.representation()));
AddressingMode addressing_mode;
- InstructionOperand inputs[3];
- size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(base);
- if (g.CanBeImmediate(index)) {
- inputs[input_count++] = g.UseImmediate(index);
- addressing_mode = kMode_MRI;
- } else {
- inputs[input_count++] = g.UseUniqueRegister(index);
- addressing_mode = kMode_MR1;
- }
- inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionOperand inputs[] = {
+ g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode),
+ g.UseUniqueRegister(value)};
RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
switch (write_barrier_kind) {
case kNoWriteBarrier:
@@ -356,11 +360,10 @@ void InstructionSelector::VisitStore(Node* node) {
break;
}
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
- size_t const temp_count = arraysize(temps);
InstructionCode code = kArchStoreWithWriteBarrier;
code |= AddressingModeField::encode(addressing_mode);
code |= MiscField::encode(static_cast<int>(record_write_mode));
- Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
+ Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps), temps);
} else {
ArchOpcode opcode = GetStoreOpcode(store_rep);
InstructionOperand inputs[4];
@@ -791,9 +794,15 @@ void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64Bswap, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)));
+}
-void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64Bswap32, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)));
+}
void InstructionSelector::VisitInt32Add(Node* node) {
X64OperandGenerator g(this);
@@ -1827,16 +1836,9 @@ void VisitAtomicBinop(InstructionSelector* selector, Node* node,
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
AddressingMode addressing_mode;
- InstructionOperand index_operand;
- if (g.CanBeImmediate(index)) {
- index_operand = g.UseImmediate(index);
- addressing_mode = kMode_MRI;
- } else {
- index_operand = g.UseUniqueRegister(index);
- addressing_mode = kMode_MR1;
- }
- InstructionOperand inputs[] = {g.UseUniqueRegister(value),
- g.UseUniqueRegister(base), index_operand};
+ InstructionOperand inputs[] = {
+ g.UseUniqueRegister(value), g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
InstructionOperand outputs[] = {g.DefineAsFixed(node, rax)};
InstructionOperand temps[] = {g.TempRegister()};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
@@ -1853,17 +1855,10 @@ void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
Node* old_value = node->InputAt(2);
Node* new_value = node->InputAt(3);
AddressingMode addressing_mode;
- InstructionOperand index_operand;
- if (g.CanBeImmediate(index)) {
- index_operand = g.UseImmediate(index);
- addressing_mode = kMode_MRI;
- } else {
- index_operand = g.UseUniqueRegister(index);
- addressing_mode = kMode_MR1;
- }
- InstructionOperand inputs[] = {g.UseFixed(old_value, rax),
- g.UseUniqueRegister(new_value),
- g.UseUniqueRegister(base), index_operand};
+ InstructionOperand inputs[] = {
+ g.UseFixed(old_value, rax), g.UseUniqueRegister(new_value),
+ g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
InstructionOperand outputs[] = {g.DefineAsFixed(node, rax)};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
@@ -1877,16 +1872,9 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node,
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
AddressingMode addressing_mode;
- InstructionOperand index_operand;
- if (g.CanBeImmediate(index)) {
- index_operand = g.UseImmediate(index);
- addressing_mode = kMode_MRI;
- } else {
- index_operand = g.UseUniqueRegister(index);
- addressing_mode = kMode_MR1;
- }
- InstructionOperand inputs[] = {g.UseUniqueRegister(value),
- g.UseUniqueRegister(base), index_operand};
+ InstructionOperand inputs[] = {
+ g.UseUniqueRegister(value), g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
InstructionOperand outputs[] = {g.DefineSameAsFirst(node)};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
@@ -2320,7 +2308,7 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
opcode = kWord32AtomicExchangeInt8;
@@ -2340,7 +2328,7 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
}
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Uint8()) {
opcode = kX64Word64AtomicExchangeUint8;
@@ -2358,7 +2346,7 @@ void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
}
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
@@ -2378,7 +2366,7 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Uint8()) {
opcode = kX64Word64AtomicCompareExchangeUint8;
@@ -2398,7 +2386,7 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
ArchOpcode uint16_op, ArchOpcode word32_op) {
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
opcode = int8_op;
@@ -2434,7 +2422,7 @@ VISIT_ATOMIC_BINOP(Xor)
void InstructionSelector::VisitWord64AtomicBinaryOperation(
Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
ArchOpcode word64_op) {
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Uint8()) {
opcode = uint8_op;