summaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/compiler')
-rw-r--r--deps/v8/src/compiler/OWNERS3
-rw-r--r--deps/v8/src/compiler/access-builder.cc23
-rw-r--r--deps/v8/src/compiler/access-builder.h7
-rw-r--r--deps/v8/src/compiler/access-info.cc29
-rw-r--r--deps/v8/src/compiler/access-info.h4
-rw-r--r--deps/v8/src/compiler/backend/arm/code-generator-arm.cc81
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-codes-arm.h1
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc1
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc68
-rw-r--r--deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc204
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h9
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc10
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc220
-rw-r--r--deps/v8/src/compiler/backend/code-generator-impl.h8
-rw-r--r--deps/v8/src/compiler/backend/code-generator.cc2
-rw-r--r--deps/v8/src/compiler/backend/code-generator.h2
-rw-r--r--deps/v8/src/compiler/backend/frame-elider.cc2
-rw-r--r--deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc335
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h20
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc20
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc156
-rw-r--r--deps/v8/src/compiler/backend/instruction-scheduler.cc2
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector-impl.h25
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.cc44
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.h15
-rw-r--r--deps/v8/src/compiler/backend/instruction.cc15
-rw-r--r--deps/v8/src/compiler/backend/instruction.h35
-rw-r--r--deps/v8/src/compiler/backend/jump-threading.cc12
-rw-r--r--deps/v8/src/compiler/backend/jump-threading.h13
-rw-r--r--deps/v8/src/compiler/backend/mips/code-generator-mips.cc70
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-codes-mips.h1
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc1
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc6
-rw-r--r--deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc70
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h1
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc1
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc6
-rw-r--r--deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc18
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc10
-rw-r--r--deps/v8/src/compiler/backend/register-allocator-verifier.cc12
-rw-r--r--deps/v8/src/compiler/backend/register-allocator-verifier.h1
-rw-r--r--deps/v8/src/compiler/backend/register-allocator.cc344
-rw-r--r--deps/v8/src/compiler/backend/register-allocator.h55
-rw-r--r--deps/v8/src/compiler/backend/s390/code-generator-s390.cc5
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc87
-rw-r--r--deps/v8/src/compiler/backend/x64/code-generator-x64.cc343
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-codes-x64.h7
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc7
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc136
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc52
-rw-r--r--deps/v8/src/compiler/c-linkage.cc6
-rw-r--r--deps/v8/src/compiler/code-assembler.cc40
-rw-r--r--deps/v8/src/compiler/code-assembler.h390
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.cc33
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.h11
-rw-r--r--deps/v8/src/compiler/decompression-elimination.cc39
-rw-r--r--deps/v8/src/compiler/decompression-elimination.h5
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc184
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.cc5
-rw-r--r--deps/v8/src/compiler/frame-states.cc18
-rw-r--r--deps/v8/src/compiler/functional-list.h2
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc10
-rw-r--r--deps/v8/src/compiler/graph-assembler.h2
-rw-r--r--deps/v8/src/compiler/graph-visualizer.cc9
-rw-r--r--deps/v8/src/compiler/heap-refs.h63
-rw-r--r--deps/v8/src/compiler/int64-lowering.h2
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc31
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc2
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc66
-rw-r--r--deps/v8/src/compiler/js-create-lowering.h1
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc21
-rw-r--r--deps/v8/src/compiler/js-heap-broker.cc502
-rw-r--r--deps/v8/src/compiler/js-heap-broker.h32
-rw-r--r--deps/v8/src/compiler/js-heap-copy-reducer.cc176
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc56
-rw-r--r--deps/v8/src/compiler/js-inlining.cc23
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc288
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h15
-rw-r--r--deps/v8/src/compiler/js-operator.cc76
-rw-r--r--deps/v8/src/compiler/js-operator.h72
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.cc26
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.h7
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc14
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc13
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc4
-rw-r--r--deps/v8/src/compiler/machine-operator.cc10
-rw-r--r--deps/v8/src/compiler/machine-operator.h17
-rw-r--r--deps/v8/src/compiler/memory-lowering.cc551
-rw-r--r--deps/v8/src/compiler/memory-lowering.h136
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc546
-rw-r--r--deps/v8/src/compiler/memory-optimizer.h101
-rw-r--r--deps/v8/src/compiler/node-matchers.h14
-rw-r--r--deps/v8/src/compiler/node.h2
-rw-r--r--deps/v8/src/compiler/opcodes.h16
-rw-r--r--deps/v8/src/compiler/operator-properties.cc1
-rw-r--r--deps/v8/src/compiler/pipeline.cc347
-rw-r--r--deps/v8/src/compiler/pipeline.h2
-rw-r--r--deps/v8/src/compiler/processed-feedback.h31
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc5
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h14
-rw-r--r--deps/v8/src/compiler/representation-change.cc24
-rw-r--r--deps/v8/src/compiler/scheduler.cc2
-rw-r--r--deps/v8/src/compiler/select-lowering.cc38
-rw-r--r--deps/v8/src/compiler/select-lowering.h18
-rw-r--r--deps/v8/src/compiler/serializer-for-background-compilation.cc850
-rw-r--r--deps/v8/src/compiler/serializer-for-background-compilation.h7
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc64
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc42
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.cc17
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc29
-rw-r--r--deps/v8/src/compiler/simplified-operator.h6
-rw-r--r--deps/v8/src/compiler/store-store-elimination.cc365
-rw-r--r--deps/v8/src/compiler/store-store-elimination.h176
-rw-r--r--deps/v8/src/compiler/typer.cc12
-rw-r--r--deps/v8/src/compiler/types.cc40
-rw-r--r--deps/v8/src/compiler/verifier.cc10
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc386
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h80
-rw-r--r--deps/v8/src/compiler/zone-stats.h2
119 files changed, 5466 insertions, 3338 deletions
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index 50e2af7129..204c0ba115 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -8,11 +8,12 @@ tebbi@chromium.org
neis@chromium.org
mvstanton@chromium.org
mslekova@chromium.org
+jgruber@chromium.org
per-file wasm-*=ahaas@chromium.org
per-file wasm-*=bbudge@chromium.org
per-file wasm-*=binji@chromium.org
-per-file wasm-*=clemensh@chromium.org
+per-file wasm-*=clemensb@chromium.org
per-file wasm-*=gdeepti@chromium.org
per-file int64-lowering.*=ahaas@chromium.org
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index 7a72be8028..e6c5568af0 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -23,10 +23,9 @@ namespace internal {
namespace compiler {
// static
-FieldAccess AccessBuilder::ForExternalTaggedValue() {
- FieldAccess access = {kUntaggedBase, 0,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::Any(), MachineType::AnyTagged(),
+FieldAccess AccessBuilder::ForExternalIntPtr() {
+ FieldAccess access = {kUntaggedBase, 0, MaybeHandle<Name>(),
+ MaybeHandle<Map>(), Type::Any(), MachineType::IntPtr(),
kNoWriteBarrier};
return access;
}
@@ -109,7 +108,6 @@ FieldAccess AccessBuilder::ForJSObjectElements() {
return access;
}
-
// static
FieldAccess AccessBuilder::ForJSObjectInObjectProperty(const MapRef& map,
int index) {
@@ -185,7 +183,6 @@ FieldAccess AccessBuilder::ForJSFunctionContext() {
return access;
}
-
// static
FieldAccess AccessBuilder::ForJSFunctionSharedFunctionInfo() {
FieldAccess access = {
@@ -296,7 +293,6 @@ FieldAccess AccessBuilder::ForJSGeneratorObjectInputOrDebugPos() {
return access;
}
-
// static
FieldAccess AccessBuilder::ForJSGeneratorObjectParametersAndRegisters() {
FieldAccess access = {
@@ -478,7 +474,6 @@ FieldAccess AccessBuilder::ForJSDateField(JSDate::FieldIndex index) {
return access;
}
-
// static
FieldAccess AccessBuilder::ForJSIteratorResultDone() {
FieldAccess access = {
@@ -489,7 +484,6 @@ FieldAccess AccessBuilder::ForJSIteratorResultDone() {
return access;
}
-
// static
FieldAccess AccessBuilder::ForJSIteratorResultValue() {
FieldAccess access = {
@@ -540,7 +534,6 @@ FieldAccess AccessBuilder::ForJSRegExpSource() {
return access;
}
-
// static
FieldAccess AccessBuilder::ForFixedArrayLength() {
FieldAccess access = {kTaggedBase,
@@ -600,7 +593,6 @@ FieldAccess AccessBuilder::ForMapBitField3() {
return access;
}
-
// static
FieldAccess AccessBuilder::ForMapDescriptors() {
FieldAccess access = {
@@ -611,7 +603,6 @@ FieldAccess AccessBuilder::ForMapDescriptors() {
return access;
}
-
// static
FieldAccess AccessBuilder::ForMapInstanceType() {
FieldAccess access = {
@@ -621,7 +612,6 @@ FieldAccess AccessBuilder::ForMapInstanceType() {
return access;
}
-
// static
FieldAccess AccessBuilder::ForMapPrototype() {
FieldAccess access = {
@@ -810,7 +800,7 @@ FieldAccess AccessBuilder::ForJSStringIteratorString() {
// static
FieldAccess AccessBuilder::ForJSStringIteratorIndex() {
FieldAccess access = {kTaggedBase,
- JSStringIterator::kNextIndexOffset,
+ JSStringIterator::kIndexOffset,
Handle<Name>(),
MaybeHandle<Map>(),
TypeCache::Get()->kStringLengthType,
@@ -829,7 +819,6 @@ FieldAccess AccessBuilder::ForArgumentsLength() {
return access;
}
-
// static
FieldAccess AccessBuilder::ForArgumentsCallee() {
FieldAccess access = {
@@ -840,7 +829,6 @@ FieldAccess AccessBuilder::ForArgumentsCallee() {
return access;
}
-
// static
FieldAccess AccessBuilder::ForFixedArraySlot(
size_t index, WriteBarrierKind write_barrier_kind) {
@@ -852,7 +840,6 @@ FieldAccess AccessBuilder::ForFixedArraySlot(
return access;
}
-
// static
FieldAccess AccessBuilder::ForCellValue() {
FieldAccess access = {kTaggedBase, Cell::kValueOffset,
@@ -937,7 +924,7 @@ ElementAccess AccessBuilder::ForStackArgument() {
ElementAccess access = {
kUntaggedBase,
CommonFrameConstants::kFixedFrameSizeAboveFp - kSystemPointerSize,
- Type::NonInternal(), MachineType::AnyTagged(),
+ Type::NonInternal(), MachineType::Pointer(),
WriteBarrierKind::kNoWriteBarrier};
return access;
}
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index 231e75f819..4aa69e3726 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -24,11 +24,8 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// ===========================================================================
// Access to external values (based on external references).
- // Provides access to a tagged field identified by an external reference.
- static FieldAccess ForExternalTaggedValue();
-
- // Provides access to an uint8 field identified by an external reference.
- static FieldAccess ForExternalUint8Value();
+ // Provides access to an IntPtr field identified by an external reference.
+ static FieldAccess ForExternalIntPtr();
// ===========================================================================
// Access to heap object fields and elements (based on tagged pointer).
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 269ef90375..dcdd1de831 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -31,9 +31,9 @@ bool CanInlinePropertyAccess(Handle<Map> map) {
// We can inline property access to prototypes of all primitives, except
// the special Oddball ones that have no wrapper counterparts (i.e. Null,
// Undefined and TheHole).
- STATIC_ASSERT(ODDBALL_TYPE == LAST_PRIMITIVE_TYPE);
+ STATIC_ASSERT(ODDBALL_TYPE == LAST_PRIMITIVE_HEAP_OBJECT_TYPE);
if (map->IsBooleanMap()) return true;
- if (map->instance_type() < LAST_PRIMITIVE_TYPE) return true;
+ if (map->instance_type() < LAST_PRIMITIVE_HEAP_OBJECT_TYPE) return true;
return map->IsJSObjectMap() && !map->is_dictionary_map() &&
!map->has_named_interceptor() &&
// TODO(verwaest): Whitelist contexts to which we have access.
@@ -323,8 +323,8 @@ bool AccessInfoFactory::ComputeElementAccessInfos(
PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
Handle<Map> receiver_map, Handle<Map> map, MaybeHandle<JSObject> holder,
- int descriptor, AccessMode access_mode) const {
- DCHECK_NE(descriptor, DescriptorArray::kNotFound);
+ InternalIndex descriptor, AccessMode access_mode) const {
+ DCHECK(descriptor.is_found());
Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
PropertyDetails const details = descriptors->GetDetails(descriptor);
int index = descriptors->GetFieldIndex(descriptor);
@@ -351,6 +351,11 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
descriptor));
} else if (details_representation.IsDouble()) {
field_type = type_cache_->kFloat64;
+ if (!FLAG_unbox_double_fields) {
+ unrecorded_dependencies.push_back(
+ dependencies()->FieldRepresentationDependencyOffTheRecord(
+ map_ref, descriptor));
+ }
} else if (details_representation.IsHeapObject()) {
// Extract the field type from the property details (make sure its
// representation is TaggedPointer to reflect the heap object case).
@@ -408,9 +413,9 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
PropertyAccessInfo AccessInfoFactory::ComputeAccessorDescriptorAccessInfo(
Handle<Map> receiver_map, Handle<Name> name, Handle<Map> map,
- MaybeHandle<JSObject> holder, int descriptor,
+ MaybeHandle<JSObject> holder, InternalIndex descriptor,
AccessMode access_mode) const {
- DCHECK_NE(descriptor, DescriptorArray::kNotFound);
+ DCHECK(descriptor.is_found());
Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
SLOW_DCHECK(descriptor == descriptors->Search(*name, *map));
if (map->instance_type() == JS_MODULE_NAMESPACE_TYPE) {
@@ -497,8 +502,8 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
while (true) {
// Lookup the named property on the {map}.
Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
- int const number = descriptors->Search(*name, *map);
- if (number != DescriptorArray::kNotFound) {
+ InternalIndex const number = descriptors->Search(*name, *map);
+ if (number.is_found()) {
PropertyDetails const details = descriptors->GetDetails(number);
if (access_mode == AccessMode::kStore ||
access_mode == AccessMode::kStoreInLiteral) {
@@ -762,7 +767,7 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
}
Handle<Map> transition_map(transition, isolate());
- int const number = transition_map->LastAdded();
+ InternalIndex const number = transition_map->LastAdded();
PropertyDetails const details =
transition_map->instance_descriptors().GetDetails(number);
// Don't bother optimizing stores to read-only properties.
@@ -789,6 +794,12 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
transition_map_ref, number));
} else if (details_representation.IsDouble()) {
field_type = type_cache_->kFloat64;
+ if (!FLAG_unbox_double_fields) {
+ transition_map_ref.SerializeOwnDescriptor(number);
+ unrecorded_dependencies.push_back(
+ dependencies()->FieldRepresentationDependencyOffTheRecord(
+ transition_map_ref, number));
+ }
} else if (details_representation.IsHeapObject()) {
// Extract the field type from the property details (make sure its
// representation is TaggedPointer to reflect the heap object case).
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
index e2f6e6d453..59101e2cc9 100644
--- a/deps/v8/src/compiler/access-info.h
+++ b/deps/v8/src/compiler/access-info.h
@@ -204,11 +204,11 @@ class AccessInfoFactory final {
PropertyAccessInfo ComputeDataFieldAccessInfo(Handle<Map> receiver_map,
Handle<Map> map,
MaybeHandle<JSObject> holder,
- int descriptor,
+ InternalIndex descriptor,
AccessMode access_mode) const;
PropertyAccessInfo ComputeAccessorDescriptorAccessInfo(
Handle<Map> receiver_map, Handle<Name> name, Handle<Map> map,
- MaybeHandle<JSObject> holder, int descriptor,
+ MaybeHandle<JSObject> holder, InternalIndex descriptor,
AccessMode access_mode) const;
void MergePropertyAccessInfos(ZoneVector<PropertyAccessInfo> infos,
diff --git a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
index 65a569d755..3fe5361083 100644
--- a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
@@ -44,7 +44,7 @@ class ArmOperandConverter final : public InstructionOperandConverter {
UNREACHABLE();
}
- Operand InputImmediate(size_t index) {
+ Operand InputImmediate(size_t index) const {
return ToImmediate(instr_->InputAt(index));
}
@@ -111,7 +111,7 @@ class ArmOperandConverter final : public InstructionOperandConverter {
return InputOffset(&first_index);
}
- Operand ToImmediate(InstructionOperand* operand) {
+ Operand ToImmediate(InstructionOperand* operand) const {
Constant constant = ToConstant(operand);
switch (constant.type()) {
case Constant::kInt32:
@@ -153,9 +153,6 @@ class ArmOperandConverter final : public InstructionOperandConverter {
NeonMemOperand NeonInputOperand(size_t first_index) {
const size_t index = first_index;
switch (AddressingModeField::decode(instr_->opcode())) {
- case kMode_Offset_RR:
- return NeonMemOperand(InputRegister(index + 0),
- InputRegister(index + 1));
case kMode_Operand2_R:
return NeonMemOperand(InputRegister(index + 0));
default:
@@ -309,9 +306,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(
- CodeGenerator* codegen, InstructionCode opcode,
- ArmOperandConverter& i) { // NOLINT(runtime/references)
+void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
+ InstructionCode opcode,
+ ArmOperandConverter const& i) {
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessPoisoned) {
@@ -320,10 +317,10 @@ void EmitWordLoadPoisoningIfNeeded(
}
}
-void ComputePoisonedAddressForLoad(
- CodeGenerator* codegen, InstructionCode opcode,
- ArmOperandConverter& i, // NOLINT(runtime/references)
- Register address) {
+void ComputePoisonedAddressForLoad(CodeGenerator* codegen,
+ InstructionCode opcode,
+ ArmOperandConverter const& i,
+ Register address) {
DCHECK_EQ(kMemoryAccessPoisoned,
static_cast<MemoryAccessMode>(MiscField::decode(opcode)));
switch (AddressingModeField::decode(opcode)) {
@@ -1798,6 +1795,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vneg(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
+ case kArmF32x4Sqrt: {
+ QwNeonRegister dst = i.OutputSimd128Register();
+ QwNeonRegister src1 = i.InputSimd128Register(0);
+ DCHECK_EQ(dst, q0);
+ DCHECK_EQ(src1, q0);
+#define S_FROM_Q(reg, lane) SwVfpRegister::from_code(reg.code() * 4 + lane)
+ __ vsqrt(S_FROM_Q(dst, 0), S_FROM_Q(src1, 0));
+ __ vsqrt(S_FROM_Q(dst, 1), S_FROM_Q(src1, 1));
+ __ vsqrt(S_FROM_Q(dst, 2), S_FROM_Q(src1, 2));
+ __ vsqrt(S_FROM_Q(dst, 3), S_FROM_Q(src1, 3));
+#undef S_FROM_Q
+ break;
+ }
case kArmF32x4RecipApprox: {
__ vrecpe(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
@@ -1919,14 +1929,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmI32x4Shl: {
QwNeonRegister tmp = i.TempSimd128Register(0);
- __ vdup(Neon32, tmp, i.InputRegister(1));
+ Register shift = i.TempRegister(1);
+ // Take shift value modulo 32.
+ __ and_(shift, i.InputRegister(1), Operand(31));
+ __ vdup(Neon32, tmp, shift);
__ vshl(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
tmp);
break;
}
case kArmI32x4ShrS: {
QwNeonRegister tmp = i.TempSimd128Register(0);
- __ vdup(Neon32, tmp, i.InputRegister(1));
+ Register shift = i.TempRegister(1);
+ // Take shift value modulo 32.
+ __ and_(shift, i.InputRegister(1), Operand(31));
+ __ vdup(Neon32, tmp, shift);
__ vneg(Neon32, tmp, tmp);
__ vshl(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
tmp);
@@ -1998,7 +2014,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmI32x4ShrU: {
QwNeonRegister tmp = i.TempSimd128Register(0);
- __ vdup(Neon32, tmp, i.InputRegister(1));
+ Register shift = i.TempRegister(1);
+ // Take shift value modulo 32.
+ __ and_(shift, i.InputRegister(1), Operand(31));
+ __ vdup(Neon32, tmp, shift);
__ vneg(Neon32, tmp, tmp);
__ vshl(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0),
tmp);
@@ -2029,7 +2048,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmI16x8ExtractLane: {
- __ ExtractLane(i.OutputRegister(), i.InputSimd128Register(0), NeonS16,
+ __ ExtractLane(i.OutputRegister(), i.InputSimd128Register(0), NeonU16,
i.InputInt8(1));
break;
}
@@ -2054,14 +2073,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmI16x8Shl: {
QwNeonRegister tmp = i.TempSimd128Register(0);
- __ vdup(Neon16, tmp, i.InputRegister(1));
+ Register shift = i.TempRegister(1);
+ // Take shift value modulo 16.
+ __ and_(shift, i.InputRegister(1), Operand(15));
+ __ vdup(Neon16, tmp, shift);
__ vshl(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
tmp);
break;
}
case kArmI16x8ShrS: {
QwNeonRegister tmp = i.TempSimd128Register(0);
- __ vdup(Neon16, tmp, i.InputRegister(1));
+ Register shift = i.TempRegister(1);
+ // Take shift value modulo 16.
+ __ and_(shift, i.InputRegister(1), Operand(15));
+ __ vdup(Neon16, tmp, shift);
__ vneg(Neon16, tmp, tmp);
__ vshl(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
tmp);
@@ -2142,7 +2167,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmI16x8ShrU: {
QwNeonRegister tmp = i.TempSimd128Register(0);
- __ vdup(Neon16, tmp, i.InputRegister(1));
+ Register shift = i.TempRegister(1);
+ // Take shift value modulo 16.
+ __ and_(shift, i.InputRegister(1), Operand(15));
+ __ vdup(Neon16, tmp, shift);
__ vneg(Neon16, tmp, tmp);
__ vshl(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
tmp);
@@ -2186,7 +2214,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmI8x16ExtractLane: {
- __ ExtractLane(i.OutputRegister(), i.InputSimd128Register(0), NeonS8,
+ __ ExtractLane(i.OutputRegister(), i.InputSimd128Register(0), NeonU8,
i.InputInt8(1));
break;
}
@@ -2201,6 +2229,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmI8x16Shl: {
QwNeonRegister tmp = i.TempSimd128Register(0);
+ Register shift = i.TempRegister(1);
+ // Take shift value modulo 8.
+ __ and_(shift, i.InputRegister(1), Operand(7));
__ vdup(Neon8, tmp, i.InputRegister(1));
__ vshl(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
tmp);
@@ -2208,7 +2239,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmI8x16ShrS: {
QwNeonRegister tmp = i.TempSimd128Register(0);
- __ vdup(Neon8, tmp, i.InputRegister(1));
+ Register shift = i.TempRegister(1);
+ // Take shift value modulo 8.
+ __ and_(shift, i.InputRegister(1), Operand(7));
+ __ vdup(Neon8, tmp, shift);
__ vneg(Neon8, tmp, tmp);
__ vshl(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
tmp);
@@ -2275,7 +2309,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmI8x16ShrU: {
QwNeonRegister tmp = i.TempSimd128Register(0);
- __ vdup(Neon8, tmp, i.InputRegister(1));
+ Register shift = i.TempRegister(1);
+ // Take shift value modulo 8.
+ __ and_(shift, i.InputRegister(1), Operand(7));
+ __ vdup(Neon8, tmp, shift);
__ vneg(Neon8, tmp, tmp);
__ vshl(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
tmp);
diff --git a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
index 3551e26aea..d398ec0ed6 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
@@ -135,6 +135,7 @@ namespace compiler {
V(ArmF32x4UConvertI32x4) \
V(ArmF32x4Abs) \
V(ArmF32x4Neg) \
+ V(ArmF32x4Sqrt) \
V(ArmF32x4RecipApprox) \
V(ArmF32x4RecipSqrtApprox) \
V(ArmF32x4Add) \
diff --git a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
index 1d7cf61dfe..92be55dcc3 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
@@ -115,6 +115,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmF32x4UConvertI32x4:
case kArmF32x4Abs:
case kArmF32x4Neg:
+ case kArmF32x4Sqrt:
case kArmF32x4RecipApprox:
case kArmF32x4RecipSqrtApprox:
case kArmF32x4Add:
diff --git a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
index ce74faa4a6..303648051f 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/base/adapters.h"
#include "src/base/bits.h"
#include "src/base/enum-set.h"
+#include "src/base/iterator.h"
#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
@@ -94,7 +94,7 @@ void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
void VisitSimdShiftRRR(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
ArmOperandGenerator g(selector);
- InstructionOperand temps[] = {g.TempSimd128Register()};
+ InstructionOperand temps[] = {g.TempSimd128Register(), g.TempRegister()};
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)),
g.UseRegister(node->InputAt(1)), arraysize(temps), temps);
@@ -352,6 +352,26 @@ void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode div_opcode,
}
}
+// Adds the base and offset into a register, then change the addressing
+// mode of opcode_return to use this register. Certain instructions, e.g.
+// vld1 and vst1, when given two registers, will post-increment the offset, i.e.
+// perform the operation at base, then add offset to base. What we intend is to
+// access at (base+offset).
+void EmitAddBeforeS128LoadStore(InstructionSelector* selector,
+ InstructionCode* opcode_return,
+ size_t* input_count_return,
+ InstructionOperand* inputs) {
+ DCHECK(*opcode_return == kArmVld1S128 || *opcode_return == kArmVst1S128);
+ ArmOperandGenerator g(selector);
+ InstructionOperand addr = g.TempRegister();
+ InstructionCode op = kArmAdd;
+ op |= AddressingModeField::encode(kMode_Operand2_R);
+ selector->Emit(op, 1, &addr, 2, inputs);
+ *opcode_return |= AddressingModeField::encode(kMode_Operand2_R);
+ *input_count_return -= 1;
+ inputs[0] = addr;
+}
+
void EmitLoad(InstructionSelector* selector, InstructionCode opcode,
InstructionOperand* output, Node* base, Node* index) {
ArmOperandGenerator g(selector);
@@ -368,7 +388,11 @@ void EmitLoad(InstructionSelector* selector, InstructionCode opcode,
input_count = 3;
} else {
inputs[1] = g.UseRegister(index);
- opcode |= AddressingModeField::encode(kMode_Offset_RR);
+ if (opcode == kArmVld1S128) {
+ EmitAddBeforeS128LoadStore(selector, &opcode, &input_count, &inputs[0]);
+ } else {
+ opcode |= AddressingModeField::encode(kMode_Offset_RR);
+ }
}
selector->Emit(opcode, 1, output, input_count, inputs);
}
@@ -386,7 +410,12 @@ void EmitStore(InstructionSelector* selector, InstructionCode opcode,
input_count = 4;
} else {
inputs[input_count++] = g.UseRegister(index);
- opcode |= AddressingModeField::encode(kMode_Offset_RR);
+ if (opcode == kArmVst1S128) {
+ // Inputs are value, base, index, only care about base and index.
+ EmitAddBeforeS128LoadStore(selector, &opcode, &input_count, &inputs[1]);
+ } else {
+ opcode |= AddressingModeField::encode(kMode_Offset_RR);
+ }
}
selector->Emit(opcode, 0, nullptr, input_count, inputs);
}
@@ -596,8 +625,7 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
Emit(kArmVmovF32U32, g.DefineAsRegister(node), temp);
return;
}
- case MachineRepresentation::kFloat64:
- case MachineRepresentation::kSimd128: {
+ case MachineRepresentation::kFloat64: {
// Compute the address of the least-significant byte of the FP value.
// We assume that the base node is unlikely to be an encodable immediate
// or the result of a shift operation, so only consider the addressing
@@ -623,13 +651,10 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
if (CpuFeatures::IsSupported(NEON)) {
// With NEON we can load directly from the calculated address.
- InstructionCode op = load_rep == MachineRepresentation::kFloat64
- ? kArmVld1F64
- : kArmVld1S128;
+ InstructionCode op = kArmVld1F64;
op |= AddressingModeField::encode(kMode_Operand2_R);
Emit(op, g.DefineAsRegister(node), addr);
} else {
- DCHECK_NE(MachineRepresentation::kSimd128, load_rep);
// Load both halves and move to an FP register.
InstructionOperand fp_lo = g.TempRegister();
InstructionOperand fp_hi = g.TempRegister();
@@ -670,8 +695,7 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
EmitStore(this, kArmStr, input_count, inputs, index);
return;
}
- case MachineRepresentation::kFloat64:
- case MachineRepresentation::kSimd128: {
+ case MachineRepresentation::kFloat64: {
if (CpuFeatures::IsSupported(NEON)) {
InstructionOperand address = g.TempRegister();
{
@@ -697,13 +721,10 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
inputs[input_count++] = g.UseRegister(value);
inputs[input_count++] = address;
- InstructionCode op = store_rep == MachineRepresentation::kFloat64
- ? kArmVst1F64
- : kArmVst1S128;
+ InstructionCode op = kArmVst1F64;
op |= AddressingModeField::encode(kMode_Operand2_R);
Emit(op, 0, nullptr, input_count, inputs);
} else {
- DCHECK_NE(MachineRepresentation::kSimd128, store_rep);
// Store a 64-bit floating point value using two 32-bit integer stores.
// Computing the store address here would require three live temporary
// registers (fp<63:32>, fp<31:0>, address), so compute base + 4 after
@@ -942,7 +963,8 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
uint32_t lsb = m.right().Value();
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
- uint32_t value = (mleft.right().Value() >> lsb) << lsb;
+ uint32_t value = static_cast<uint32_t>(mleft.right().Value() >> lsb)
+ << lsb;
uint32_t width = base::bits::CountPopulation(value);
uint32_t msb = base::bits::CountLeadingZeros32(value);
if ((width != 0) && (msb + width + lsb == 32)) {
@@ -1119,6 +1141,10 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
VisitRR(this, kArmRev, node);
}
+void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
+ UNREACHABLE();
+}
+
void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitInt32Add(Node* node) {
@@ -2513,6 +2539,14 @@ SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
#undef SIMD_VISIT_BINOP
#undef SIMD_BINOP_LIST
+void InstructionSelector::VisitF32x4Sqrt(Node* node) {
+ ArmOperandGenerator g(this);
+ // Use fixed registers in the lower 8 Q-registers so we can directly access
+ // mapped registers S0-S31.
+ Emit(kArmF32x4Sqrt, g.DefineAsFixed(node, q0),
+ g.UseFixed(node->InputAt(0), q0));
+}
+
void InstructionSelector::VisitF32x4Div(Node* node) {
ArmOperandGenerator g(this);
// Use fixed registers in the lower 8 Q-registers so we can directly access
diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
index 66ca7f6cf0..6f65c905dd 100644
--- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -376,9 +376,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(
- CodeGenerator* codegen, InstructionCode opcode, Instruction* instr,
- Arm64OperandConverter& i) { // NOLINT(runtime/references)
+void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
+ InstructionCode opcode, Instruction* instr,
+ Arm64OperandConverter const& i) {
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessPoisoned) {
@@ -389,6 +389,36 @@ void EmitWordLoadPoisoningIfNeeded(
}
}
+void EmitMaybePoisonedFPLoad(CodeGenerator* codegen, InstructionCode opcode,
+ Arm64OperandConverter* i, VRegister output_reg) {
+ const MemoryAccessMode access_mode =
+ static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ AddressingMode address_mode = AddressingModeField::decode(opcode);
+ if (access_mode == kMemoryAccessPoisoned && address_mode != kMode_Root) {
+ UseScratchRegisterScope temps(codegen->tasm());
+ Register address = temps.AcquireX();
+ switch (address_mode) {
+ case kMode_MRI: // Fall through.
+ case kMode_MRR:
+ codegen->tasm()->Add(address, i->InputRegister(0), i->InputOperand(1));
+ break;
+ case kMode_Operand2_R_LSL_I:
+ codegen->tasm()->Add(address, i->InputRegister(0),
+ i->InputOperand2_64(1));
+ break;
+ default:
+ // Note: we don't need poisoning for kMode_Root loads as those loads
+ // target a fixed offset from root register which is set once when
+ // initializing the vm.
+ UNREACHABLE();
+ }
+ codegen->tasm()->And(address, address, Operand(kSpeculationPoisonRegister));
+ codegen->tasm()->Ldr(output_reg, MemOperand(address));
+ } else {
+ codegen->tasm()->Ldr(output_reg, i->MemoryOperand());
+ }
+}
+
} // namespace
#define ASSEMBLE_SHIFT(asm_instr, width) \
@@ -1198,6 +1228,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Sxtw:
__ Sxtw(i.OutputRegister(), i.InputRegister32(0));
break;
+ case kArm64Sbfx:
+ __ Sbfx(i.OutputRegister(), i.InputRegister(0), i.InputInt6(1),
+ i.InputInt6(2));
+ break;
case kArm64Sbfx32:
__ Sbfx(i.OutputRegister32(), i.InputRegister32(0), i.InputInt5(1),
i.InputInt5(2));
@@ -1586,6 +1620,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Str:
__ Str(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
+ case kArm64StrCompressTagged:
+ __ StoreTaggedField(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
+ break;
case kArm64DecompressSigned: {
__ DecompressTaggedSigned(i.OutputRegister(), i.InputRegister(0));
break;
@@ -1599,13 +1636,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64LdrS:
- __ Ldr(i.OutputDoubleRegister().S(), i.MemoryOperand());
+ EmitMaybePoisonedFPLoad(this, opcode, &i, i.OutputDoubleRegister().S());
break;
case kArm64StrS:
__ Str(i.InputFloat32OrZeroRegister(0), i.MemoryOperand(1));
break;
case kArm64LdrD:
- __ Ldr(i.OutputDoubleRegister(), i.MemoryOperand());
+ EmitMaybePoisonedFPLoad(this, opcode, &i, i.OutputDoubleRegister());
break;
case kArm64StrD:
__ Str(i.InputFloat64OrZeroRegister(0), i.MemoryOperand(1));
@@ -1616,9 +1653,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64StrQ:
__ Str(i.InputSimd128Register(0), i.MemoryOperand(1));
break;
- case kArm64StrCompressTagged:
- __ StoreTaggedField(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
- break;
case kArm64DmbIsh:
__ Dmb(InnerShareable, BarrierAll);
break;
@@ -1794,6 +1828,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
SIMD_UNOP_CASE(kArm64F64x2Abs, Fabs, 2D);
SIMD_UNOP_CASE(kArm64F64x2Neg, Fneg, 2D);
+ SIMD_UNOP_CASE(kArm64F64x2Sqrt, Fsqrt, 2D);
SIMD_BINOP_CASE(kArm64F64x2Add, Fadd, 2D);
SIMD_BINOP_CASE(kArm64F64x2Sub, Fsub, 2D);
SIMD_BINOP_CASE(kArm64F64x2Mul, Fmul, 2D);
@@ -1818,6 +1853,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0).V2D());
break;
}
+ case kArm64F64x2Qfma: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ Fmla(i.OutputSimd128Register().V2D(), i.InputSimd128Register(1).V2D(),
+ i.InputSimd128Register(2).V2D());
+ break;
+ }
+ case kArm64F64x2Qfms: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ Fmls(i.OutputSimd128Register().V2D(), i.InputSimd128Register(1).V2D(),
+ i.InputSimd128Register(2).V2D());
+ break;
+ }
case kArm64F32x4Splat: {
__ Dup(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).S(), 0);
break;
@@ -1840,6 +1887,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_UNOP_CASE(kArm64F32x4UConvertI32x4, Ucvtf, 4S);
SIMD_UNOP_CASE(kArm64F32x4Abs, Fabs, 4S);
SIMD_UNOP_CASE(kArm64F32x4Neg, Fneg, 4S);
+ SIMD_UNOP_CASE(kArm64F32x4Sqrt, Fsqrt, 4S);
SIMD_UNOP_CASE(kArm64F32x4RecipApprox, Frecpe, 4S);
SIMD_UNOP_CASE(kArm64F32x4RecipSqrtApprox, Frsqrte, 4S);
SIMD_BINOP_CASE(kArm64F32x4Add, Fadd, 4S);
@@ -1867,6 +1915,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0).V4S());
break;
}
+ case kArm64F32x4Qfma: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ Fmla(i.OutputSimd128Register().V4S(), i.InputSimd128Register(1).V4S(),
+ i.InputSimd128Register(2).V4S());
+ break;
+ }
+ case kArm64F32x4Qfms: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ Fmls(i.OutputSimd128Register().V4S(), i.InputSimd128Register(1).V4S(),
+ i.InputSimd128Register(2).V4S());
+ break;
+ }
case kArm64I64x2Splat: {
__ Dup(i.OutputSimd128Register().V2D(), i.InputRegister64(0));
break;
@@ -1888,14 +1948,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_UNOP_CASE(kArm64I64x2Neg, Neg, 2D);
case kArm64I64x2Shl: {
VRegister tmp = i.TempSimd128Register(0);
- __ Dup(tmp.V2D(), i.InputRegister64(1));
+ Register shift = i.TempRegister(1);
+ // Take shift value modulo 64.
+ __ And(shift, i.InputRegister64(1), 63);
+ __ Dup(tmp.V2D(), shift);
__ Sshl(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).V2D(),
tmp.V2D());
break;
}
case kArm64I64x2ShrS: {
VRegister tmp = i.TempSimd128Register(0);
- __ Dup(tmp.V2D(), i.InputRegister64(1));
+ Register shift = i.TempRegister(1);
+ // Take shift value modulo 64.
+ __ And(shift, i.InputRegister64(1), 63);
+ __ Dup(tmp.V2D(), shift);
__ Neg(tmp.V2D(), tmp.V2D());
__ Sshl(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).V2D(),
tmp.V2D());
@@ -1903,6 +1969,65 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
SIMD_BINOP_CASE(kArm64I64x2Add, Add, 2D);
SIMD_BINOP_CASE(kArm64I64x2Sub, Sub, 2D);
+ case kArm64I64x2Mul: {
+ UseScratchRegisterScope scope(tasm());
+ VRegister dst = i.OutputSimd128Register();
+ VRegister src1 = i.InputSimd128Register(0);
+ VRegister src2 = i.InputSimd128Register(1);
+ VRegister tmp1 = scope.AcquireSameSizeAs(dst);
+ VRegister tmp2 = scope.AcquireSameSizeAs(dst);
+ VRegister tmp3 = i.ToSimd128Register(instr->TempAt(0));
+
+ // This 2x64-bit multiplication is performed with several 32-bit
+ // multiplications.
+
+ // 64-bit numbers x and y, can be represented as:
+ // x = a + 2^32(b)
+ // y = c + 2^32(d)
+
+ // A 64-bit multiplication is:
+ // x * y = ac + 2^32(ad + bc) + 2^64(bd)
+ // note: `2^64(bd)` can be ignored, the value is too large to fit in
+ // 64-bits.
+
+ // This sequence implements a 2x64bit multiply, where the registers
+ // `src1` and `src2` are split up into 32-bit components:
+ // src1 = |d|c|b|a|
+ // src2 = |h|g|f|e|
+ //
+ // src1 * src2 = |cg + 2^32(ch + dg)|ae + 2^32(af + be)|
+
+ // Reverse the 32-bit elements in the 64-bit words.
+ // tmp2 = |g|h|e|f|
+ __ Rev64(tmp2.V4S(), src2.V4S());
+
+ // Calculate the high half components.
+ // tmp2 = |dg|ch|be|af|
+ __ Mul(tmp2.V4S(), tmp2.V4S(), src1.V4S());
+
+ // Extract the low half components of src1.
+ // tmp1 = |c|a|
+ __ Xtn(tmp1.V2S(), src1.V2D());
+
+ // Sum the respective high half components.
+ // tmp2 = |dg+ch|be+af||dg+ch|be+af|
+ __ Addp(tmp2.V4S(), tmp2.V4S(), tmp2.V4S());
+
+ // Extract the low half components of src2.
+ // tmp3 = |g|e|
+ __ Xtn(tmp3.V2S(), src2.V2D());
+
+ // Shift the high half components, into the high half.
+ // dst = |dg+ch << 32|be+af << 32|
+ __ Shll(dst.V2D(), tmp2.V2S(), 32);
+
+ // Multiply the low components together, and accumulate with the high
+ // half.
+ // dst = |dst[1] + cg|dst[0] + ae|
+ __ Umlal(dst.V2D(), tmp3.V2S(), tmp1.V2S());
+
+ break;
+ }
SIMD_BINOP_CASE(kArm64I64x2Eq, Cmeq, 2D);
case kArm64I64x2Ne: {
VRegister dst = i.OutputSimd128Register().V2D();
@@ -1915,7 +2040,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_BINOP_CASE(kArm64I64x2GeS, Cmge, 2D);
case kArm64I64x2ShrU: {
VRegister tmp = i.TempSimd128Register(0);
- __ Dup(tmp.V2D(), i.InputRegister64(1));
+ Register shift = i.TempRegister(1);
+ // Take shift value modulo 64.
+ __ And(shift, i.InputRegister64(1), 63);
+ __ Dup(tmp.V2D(), shift);
__ Neg(tmp.V2D(), tmp.V2D());
__ Ushl(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).V2D(),
tmp.V2D());
@@ -1947,14 +2075,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_UNOP_CASE(kArm64I32x4Neg, Neg, 4S);
case kArm64I32x4Shl: {
VRegister tmp = i.TempSimd128Register(0);
- __ Dup(tmp.V4S(), i.InputRegister32(1));
+ Register shift = i.TempRegister32(1);
+ // Take shift value modulo 32.
+ __ And(shift, i.InputRegister32(1), 31);
+ __ Dup(tmp.V4S(), shift);
__ Sshl(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(),
tmp.V4S());
break;
}
case kArm64I32x4ShrS: {
VRegister tmp = i.TempSimd128Register(0);
- __ Dup(tmp.V4S(), i.InputRegister32(1));
+ Register shift = i.TempRegister32(1);
+ // Take shift value modulo 32.
+ __ And(shift, i.InputRegister32(1), 31);
+ __ Dup(tmp.V4S(), shift);
__ Neg(tmp.V4S(), tmp.V4S());
__ Sshl(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(),
tmp.V4S());
@@ -1981,7 +2115,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_WIDENING_UNOP_CASE(kArm64I32x4UConvertI16x8High, Uxtl2, 4S, 8H);
case kArm64I32x4ShrU: {
VRegister tmp = i.TempSimd128Register(0);
- __ Dup(tmp.V4S(), i.InputRegister32(1));
+ Register shift = i.TempRegister32(1);
+ // Take shift value modulo 32.
+ __ And(shift, i.InputRegister32(1), 31);
+ __ Dup(tmp.V4S(), shift);
__ Neg(tmp.V4S(), tmp.V4S());
__ Ushl(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(),
tmp.V4S());
@@ -1996,7 +2133,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64I16x8ExtractLane: {
- __ Smov(i.OutputRegister32(), i.InputSimd128Register(0).V8H(),
+ __ Umov(i.OutputRegister32(), i.InputSimd128Register(0).V8H(),
i.InputInt8(1));
break;
}
@@ -2014,14 +2151,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_UNOP_CASE(kArm64I16x8Neg, Neg, 8H);
case kArm64I16x8Shl: {
VRegister tmp = i.TempSimd128Register(0);
- __ Dup(tmp.V8H(), i.InputRegister32(1));
+ Register shift = i.TempRegister32(1);
+ // Take shift value modulo 16.
+ __ And(shift, i.InputRegister32(1), 15);
+ __ Dup(tmp.V8H(), shift);
__ Sshl(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(),
tmp.V8H());
break;
}
case kArm64I16x8ShrS: {
VRegister tmp = i.TempSimd128Register(0);
- __ Dup(tmp.V8H(), i.InputRegister32(1));
+ Register shift = i.TempRegister32(1);
+ // Take shift value modulo 16.
+ __ And(shift, i.InputRegister32(1), 15);
+ __ Dup(tmp.V8H(), shift);
__ Neg(tmp.V8H(), tmp.V8H());
__ Sshl(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(),
tmp.V8H());
@@ -2070,7 +2213,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArm64I16x8ShrU: {
VRegister tmp = i.TempSimd128Register(0);
- __ Dup(tmp.V8H(), i.InputRegister32(1));
+ Register shift = i.TempRegister32(1);
+ // Take shift value modulo 16.
+ __ And(shift, i.InputRegister32(1), 15);
+ __ Dup(tmp.V8H(), shift);
__ Neg(tmp.V8H(), tmp.V8H());
__ Ushl(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(),
tmp.V8H());
@@ -2101,7 +2247,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64I8x16ExtractLane: {
- __ Smov(i.OutputRegister32(), i.InputSimd128Register(0).V16B(),
+ __ Umov(i.OutputRegister32(), i.InputSimd128Register(0).V16B(),
i.InputInt8(1));
break;
}
@@ -2117,14 +2263,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_UNOP_CASE(kArm64I8x16Neg, Neg, 16B);
case kArm64I8x16Shl: {
VRegister tmp = i.TempSimd128Register(0);
- __ Dup(tmp.V16B(), i.InputRegister32(1));
+ Register shift = i.TempRegister32(1);
+ // Take shift value modulo 8.
+ __ And(shift, i.InputRegister32(1), 7);
+ __ Dup(tmp.V16B(), shift);
__ Sshl(i.OutputSimd128Register().V16B(),
i.InputSimd128Register(0).V16B(), tmp.V16B());
break;
}
case kArm64I8x16ShrS: {
VRegister tmp = i.TempSimd128Register(0);
- __ Dup(tmp.V16B(), i.InputRegister32(1));
+ Register shift = i.TempRegister32(1);
+ // Take shift value modulo 8.
+ __ And(shift, i.InputRegister32(1), 7);
+ __ Dup(tmp.V16B(), shift);
__ Neg(tmp.V16B(), tmp.V16B());
__ Sshl(i.OutputSimd128Register().V16B(),
i.InputSimd128Register(0).V16B(), tmp.V16B());
@@ -2163,7 +2315,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_BINOP_CASE(kArm64I8x16GeS, Cmge, 16B);
case kArm64I8x16ShrU: {
VRegister tmp = i.TempSimd128Register(0);
- __ Dup(tmp.V16B(), i.InputRegister32(1));
+ Register shift = i.TempRegister32(1);
+ // Take shift value modulo 8.
+ __ And(shift, i.InputRegister32(1), 7);
+ __ Dup(tmp.V16B(), shift);
__ Neg(tmp.V16B(), tmp.V16B());
__ Ushl(i.OutputSimd128Register().V16B(),
i.InputSimd128Register(0).V16B(), tmp.V16B());
@@ -2277,6 +2432,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1).V16B(), i.InputInt4(2));
break;
}
+ case kArm64S8x16Swizzle: {
+ __ Tbl(i.OutputSimd128Register().V16B(), i.InputSimd128Register(0).V16B(),
+ i.InputSimd128Register(1).V16B());
+ break;
+ }
case kArm64S8x16Shuffle: {
Simd128Register dst = i.OutputSimd128Register().V16B(),
src0 = i.InputSimd128Register(0).V16B(),
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
index 4b56e402c1..880a3fbf9e 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
@@ -70,6 +70,7 @@ namespace compiler {
V(Arm64Sxtb) \
V(Arm64Sxth) \
V(Arm64Sxtw) \
+ V(Arm64Sbfx) \
V(Arm64Sbfx32) \
V(Arm64Ubfx) \
V(Arm64Ubfx32) \
@@ -175,6 +176,7 @@ namespace compiler {
V(Arm64F64x2ReplaceLane) \
V(Arm64F64x2Abs) \
V(Arm64F64x2Neg) \
+ V(Arm64F64x2Sqrt) \
V(Arm64F64x2Add) \
V(Arm64F64x2Sub) \
V(Arm64F64x2Mul) \
@@ -185,6 +187,8 @@ namespace compiler {
V(Arm64F64x2Ne) \
V(Arm64F64x2Lt) \
V(Arm64F64x2Le) \
+ V(Arm64F64x2Qfma) \
+ V(Arm64F64x2Qfms) \
V(Arm64F32x4Splat) \
V(Arm64F32x4ExtractLane) \
V(Arm64F32x4ReplaceLane) \
@@ -192,6 +196,7 @@ namespace compiler {
V(Arm64F32x4UConvertI32x4) \
V(Arm64F32x4Abs) \
V(Arm64F32x4Neg) \
+ V(Arm64F32x4Sqrt) \
V(Arm64F32x4RecipApprox) \
V(Arm64F32x4RecipSqrtApprox) \
V(Arm64F32x4Add) \
@@ -205,6 +210,8 @@ namespace compiler {
V(Arm64F32x4Ne) \
V(Arm64F32x4Lt) \
V(Arm64F32x4Le) \
+ V(Arm64F32x4Qfma) \
+ V(Arm64F32x4Qfms) \
V(Arm64I64x2Splat) \
V(Arm64I64x2ExtractLane) \
V(Arm64I64x2ReplaceLane) \
@@ -213,6 +220,7 @@ namespace compiler {
V(Arm64I64x2ShrS) \
V(Arm64I64x2Add) \
V(Arm64I64x2Sub) \
+ V(Arm64I64x2Mul) \
V(Arm64I64x2Eq) \
V(Arm64I64x2Ne) \
V(Arm64I64x2GtS) \
@@ -331,6 +339,7 @@ namespace compiler {
V(Arm64S8x16TransposeLeft) \
V(Arm64S8x16TransposeRight) \
V(Arm64S8x16Concat) \
+ V(Arm64S8x16Swizzle) \
V(Arm64S8x16Shuffle) \
V(Arm64S32x2Reverse) \
V(Arm64S16x4Reverse) \
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
index 7cba2d50ea..b0f9202968 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
@@ -71,6 +71,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Sxth:
case kArm64Sxth32:
case kArm64Sxtw:
+ case kArm64Sbfx:
case kArm64Sbfx32:
case kArm64Ubfx:
case kArm64Ubfx32:
@@ -142,6 +143,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64F64x2ReplaceLane:
case kArm64F64x2Abs:
case kArm64F64x2Neg:
+ case kArm64F64x2Sqrt:
case kArm64F64x2Add:
case kArm64F64x2Sub:
case kArm64F64x2Mul:
@@ -152,6 +154,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64F64x2Ne:
case kArm64F64x2Lt:
case kArm64F64x2Le:
+ case kArm64F64x2Qfma:
+ case kArm64F64x2Qfms:
case kArm64F32x4Splat:
case kArm64F32x4ExtractLane:
case kArm64F32x4ReplaceLane:
@@ -159,6 +163,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64F32x4UConvertI32x4:
case kArm64F32x4Abs:
case kArm64F32x4Neg:
+ case kArm64F32x4Sqrt:
case kArm64F32x4RecipApprox:
case kArm64F32x4RecipSqrtApprox:
case kArm64F32x4Add:
@@ -172,6 +177,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64F32x4Ne:
case kArm64F32x4Lt:
case kArm64F32x4Le:
+ case kArm64F32x4Qfma:
+ case kArm64F32x4Qfms:
case kArm64I64x2Splat:
case kArm64I64x2ExtractLane:
case kArm64I64x2ReplaceLane:
@@ -180,6 +187,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64I64x2ShrS:
case kArm64I64x2Add:
case kArm64I64x2Sub:
+ case kArm64I64x2Mul:
case kArm64I64x2Eq:
case kArm64I64x2Ne:
case kArm64I64x2GtS:
@@ -298,6 +306,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64S8x16TransposeLeft:
case kArm64S8x16TransposeRight:
case kArm64S8x16Concat:
+ case kArm64S8x16Swizzle:
case kArm64S8x16Shuffle:
case kArm64S32x2Reverse:
case kArm64S16x4Reverse:
@@ -439,6 +448,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
case kArm64Clz:
case kArm64Clz32:
+ case kArm64Sbfx:
case kArm64Sbfx32:
case kArm64Sxtb32:
case kArm64Sxth32:
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
index 4abbd68c49..53a289fe6a 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
@@ -153,7 +153,7 @@ void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
void VisitSimdShiftRRR(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
Arm64OperandGenerator g(selector);
- InstructionOperand temps[] = {g.TempSimd128Register()};
+ InstructionOperand temps[] = {g.TempSimd128Register(), g.TempRegister()};
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)),
g.UseRegister(node->InputAt(1)), arraysize(temps), temps);
@@ -499,6 +499,7 @@ void VisitAddSub(InstructionSelector* selector, Node* node, ArchOpcode opcode,
Arm64OperandGenerator g(selector);
Matcher m(node);
if (m.right().HasValue() && (m.right().Value() < 0) &&
+ (m.right().Value() > std::numeric_limits<int>::min()) &&
g.CanBeImmediate(-m.right().Value(), kArithmeticImm)) {
selector->Emit(negate_opcode, g.DefineAsRegister(node),
g.UseRegister(m.left().node()),
@@ -627,9 +628,24 @@ void InstructionSelector::VisitLoad(Node* node) {
#else
UNREACHABLE();
#endif
+#ifdef V8_COMPRESS_POINTERS
+ case MachineRepresentation::kTaggedSigned:
+ opcode = kArm64LdrDecompressTaggedSigned;
+ immediate_mode = kLoadStoreImm32;
+ break;
+ case MachineRepresentation::kTaggedPointer:
+ opcode = kArm64LdrDecompressTaggedPointer;
+ immediate_mode = kLoadStoreImm32;
+ break;
+ case MachineRepresentation::kTagged:
+ opcode = kArm64LdrDecompressAnyTagged;
+ immediate_mode = kLoadStoreImm32;
+ break;
+#else
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
+#endif
case MachineRepresentation::kWord64:
opcode = kArm64Ldr;
immediate_mode = kLoadStoreImm64;
@@ -723,7 +739,7 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed:
#ifdef V8_COMPRESS_POINTERS
- opcode = kArm64StrW;
+ opcode = kArm64StrCompressTagged;
immediate_mode = kLoadStoreImm32;
break;
#else
@@ -731,7 +747,11 @@ void InstructionSelector::VisitStore(Node* node) {
#endif
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kTagged:
+ opcode = kArm64StrCompressTagged;
+ immediate_mode =
+ COMPRESS_POINTERS_BOOL ? kLoadStoreImm32 : kLoadStoreImm64;
+ break;
case MachineRepresentation::kWord64:
opcode = kArm64Str;
immediate_mode = kLoadStoreImm64;
@@ -770,6 +790,10 @@ void InstructionSelector::VisitProtectedStore(Node* node) {
UNIMPLEMENTED();
}
+void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
+ UNREACHABLE();
+}
+
// Architecture supports unaligned access, therefore VisitLoad is used instead
void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
@@ -1048,7 +1072,8 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
if (mleft.right().HasValue() && mleft.right().Value() != 0) {
// Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
// shifted into the least-significant bits.
- uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
+ uint32_t mask = static_cast<uint32_t>(mleft.right().Value() >> lsb)
+ << lsb;
unsigned mask_width = base::bits::CountPopulation(mask);
unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_msb + mask_width + lsb) == 32) {
@@ -1091,7 +1116,8 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
if (mleft.right().HasValue() && mleft.right().Value() != 0) {
// Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
// shifted into the least-significant bits.
- uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
+ uint64_t mask = static_cast<uint64_t>(mleft.right().Value() >> lsb)
+ << lsb;
unsigned mask_width = base::bits::CountPopulation(mask);
unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
if ((mask_msb + mask_width + lsb) == 64) {
@@ -1240,7 +1266,8 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
V(Float32Max, kArm64Float32Max) \
V(Float64Max, kArm64Float64Max) \
V(Float32Min, kArm64Float32Min) \
- V(Float64Min, kArm64Float64Min)
+ V(Float64Min, kArm64Float64Min) \
+ V(S8x16Swizzle, kArm64S8x16Swizzle)
#define RR_VISITOR(Name, opcode) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -1572,9 +1599,22 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
return;
}
EmitLoad(this, value, opcode, immediate_mode, rep, node);
- } else {
- VisitRR(this, kArm64Sxtw, node);
+ return;
+ }
+
+ if (value->opcode() == IrOpcode::kWord32Sar && CanCover(node, value)) {
+ Int32BinopMatcher m(value);
+ if (m.right().HasValue()) {
+ Arm64OperandGenerator g(this);
+ // Mask the shift amount, to keep the same semantics as Word32Sar.
+ int right = m.right().Value() & 0x1F;
+ Emit(kArm64Sbfx, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(right), g.TempImmediate(32 - right));
+ return;
+ }
}
+
+ VisitRR(this, kArm64Sxtw, node);
}
void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
@@ -1830,31 +1870,6 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->EmitWithContinuation(opcode, left, right, cont);
}
-// Shared routine for multiple word compare operations.
-void VisitWordCompare(InstructionSelector* selector, Node* node,
- InstructionCode opcode, FlagsContinuation* cont,
- ImmediateMode immediate_mode) {
- Arm64OperandGenerator g(selector);
-
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
-
- // If one of the two inputs is an immediate, make sure it's on the right.
- if (!g.CanBeImmediate(right, immediate_mode) &&
- g.CanBeImmediate(left, immediate_mode)) {
- cont->Commute();
- std::swap(left, right);
- }
-
- if (g.CanBeImmediate(right, immediate_mode)) {
- VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
- cont);
- } else {
- VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
- cont);
- }
-}
-
// This function checks whether we can convert:
// ((a <op> b) cmp 0), b.<cond>
// to:
@@ -1986,9 +2001,35 @@ void EmitBranchOrDeoptimize(InstructionSelector* selector,
selector->EmitWithContinuation(opcode, value, cont);
}
+template <int N>
+struct CbzOrTbzMatchTrait {};
+
+template <>
+struct CbzOrTbzMatchTrait<32> {
+ using IntegralType = uint32_t;
+ using BinopMatcher = Int32BinopMatcher;
+ static constexpr IrOpcode::Value kAndOpcode = IrOpcode::kWord32And;
+ static constexpr ArchOpcode kTestAndBranchOpcode = kArm64TestAndBranch32;
+ static constexpr ArchOpcode kCompareAndBranchOpcode =
+ kArm64CompareAndBranch32;
+ static constexpr unsigned kSignBit = kWSignBit;
+};
+
+template <>
+struct CbzOrTbzMatchTrait<64> {
+ using IntegralType = uint64_t;
+ using BinopMatcher = Int64BinopMatcher;
+ static constexpr IrOpcode::Value kAndOpcode = IrOpcode::kWord64And;
+ static constexpr ArchOpcode kTestAndBranchOpcode = kArm64TestAndBranch;
+ static constexpr ArchOpcode kCompareAndBranchOpcode = kArm64CompareAndBranch;
+ static constexpr unsigned kSignBit = kXSignBit;
+};
+
// Try to emit TBZ, TBNZ, CBZ or CBNZ for certain comparisons of {node}
// against {value}, depending on the condition.
-bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, uint32_t value,
+template <int N>
+bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node,
+ typename CbzOrTbzMatchTrait<N>::IntegralType value,
Node* user, FlagsCondition cond, FlagsContinuation* cont) {
// Branch poisoning requires flags to be set, so when it's enabled for
// a particular branch, we shouldn't be applying the cbz/tbz optimization.
@@ -2007,28 +2048,33 @@ bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, uint32_t value,
if (cont->IsDeoptimize()) return false;
Arm64OperandGenerator g(selector);
cont->Overwrite(MapForTbz(cond));
- Int32Matcher m(node);
- if (m.IsFloat64ExtractHighWord32() && selector->CanCover(user, node)) {
- // SignedLessThan(Float64ExtractHighWord32(x), 0) and
- // SignedGreaterThanOrEqual(Float64ExtractHighWord32(x), 0) essentially
- // check the sign bit of a 64-bit floating point value.
- InstructionOperand temp = g.TempRegister();
- selector->Emit(kArm64U64MoveFloat64, temp,
- g.UseRegister(node->InputAt(0)));
- selector->EmitWithContinuation(kArm64TestAndBranch, temp,
- g.TempImmediate(63), cont);
- return true;
+
+ if (N == 32) {
+ Int32Matcher m(node);
+ if (m.IsFloat64ExtractHighWord32() && selector->CanCover(user, node)) {
+ // SignedLessThan(Float64ExtractHighWord32(x), 0) and
+ // SignedGreaterThanOrEqual(Float64ExtractHighWord32(x), 0)
+ // essentially check the sign bit of a 64-bit floating point value.
+ InstructionOperand temp = g.TempRegister();
+ selector->Emit(kArm64U64MoveFloat64, temp,
+ g.UseRegister(node->InputAt(0)));
+ selector->EmitWithContinuation(kArm64TestAndBranch, temp,
+ g.TempImmediate(kDSignBit), cont);
+ return true;
+ }
}
- selector->EmitWithContinuation(kArm64TestAndBranch32, g.UseRegister(node),
- g.TempImmediate(31), cont);
+
+ selector->EmitWithContinuation(
+ CbzOrTbzMatchTrait<N>::kTestAndBranchOpcode, g.UseRegister(node),
+ g.TempImmediate(CbzOrTbzMatchTrait<N>::kSignBit), cont);
return true;
}
case kEqual:
case kNotEqual: {
- if (node->opcode() == IrOpcode::kWord32And) {
+ if (node->opcode() == CbzOrTbzMatchTrait<N>::kAndOpcode) {
// Emit a tbz/tbnz if we are comparing with a single-bit mask:
- // Branch(Word32Equal(Word32And(x, 1 << N), 1 << N), true, false)
- Int32BinopMatcher m_and(node);
+ // Branch(WordEqual(WordAnd(x, 1 << N), 1 << N), true, false)
+ typename CbzOrTbzMatchTrait<N>::BinopMatcher m_and(node);
if (cont->IsBranch() && base::bits::IsPowerOfTwo(value) &&
m_and.right().Is(value) && selector->CanCover(user, node)) {
Arm64OperandGenerator g(selector);
@@ -2036,7 +2082,8 @@ bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, uint32_t value,
// the opposite here so negate the condition.
cont->Negate();
selector->EmitWithContinuation(
- kArm64TestAndBranch32, g.UseRegister(m_and.left().node()),
+ CbzOrTbzMatchTrait<N>::kTestAndBranchOpcode,
+ g.UseRegister(m_and.left().node()),
g.TempImmediate(base::bits::CountTrailingZeros(value)), cont);
return true;
}
@@ -2048,7 +2095,8 @@ bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, uint32_t value,
if (value != 0) return false;
Arm64OperandGenerator g(selector);
cont->Overwrite(MapForCbz(cond));
- EmitBranchOrDeoptimize(selector, kArm64CompareAndBranch32,
+ EmitBranchOrDeoptimize(selector,
+ CbzOrTbzMatchTrait<N>::kCompareAndBranchOpcode,
g.UseRegister(node), cont);
return true;
}
@@ -2057,20 +2105,50 @@ bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, uint32_t value,
}
}
+// Shared routine for multiple word compare operations.
+void VisitWordCompare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont,
+ ImmediateMode immediate_mode) {
+ Arm64OperandGenerator g(selector);
+
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+
+ // If one of the two inputs is an immediate, make sure it's on the right.
+ if (!g.CanBeImmediate(right, immediate_mode) &&
+ g.CanBeImmediate(left, immediate_mode)) {
+ cont->Commute();
+ std::swap(left, right);
+ }
+
+ if (opcode == kArm64Cmp && !cont->IsPoisoned()) {
+ Int64Matcher m(right);
+ if (m.HasValue()) {
+ if (TryEmitCbzOrTbz<64>(selector, left, m.Value(), node,
+ cont->condition(), cont)) {
+ return;
+ }
+ }
+ }
+
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseOperand(right, immediate_mode), cont);
+}
+
void VisitWord32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Int32BinopMatcher m(node);
FlagsCondition cond = cont->condition();
if (!cont->IsPoisoned()) {
if (m.right().HasValue()) {
- if (TryEmitCbzOrTbz(selector, m.left().node(), m.right().Value(), node,
- cond, cont)) {
+ if (TryEmitCbzOrTbz<32>(selector, m.left().node(), m.right().Value(),
+ node, cond, cont)) {
return;
}
} else if (m.left().HasValue()) {
FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
- if (TryEmitCbzOrTbz(selector, m.right().node(), m.left().Value(), node,
- commuted_cond, cont)) {
+ if (TryEmitCbzOrTbz<32>(selector, m.right().node(), m.left().Value(),
+ node, commuted_cond, cont)) {
return;
}
}
@@ -2378,13 +2456,6 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
if (CanCover(value, left) && left->opcode() == IrOpcode::kWord64And) {
return VisitWordCompare(this, left, kArm64Tst, cont, kLogical64Imm);
}
- // Merge the Word64Equal(x, 0) comparison into a cbz instruction.
- if ((cont->IsBranch() || cont->IsDeoptimize()) &&
- !cont->IsPoisoned()) {
- EmitBranchOrDeoptimize(this, kArm64CompareAndBranch,
- g.UseRegister(left), cont);
- return;
- }
}
return VisitWordCompare(this, value, kArm64Cmp, cont, kArithmeticImm);
}
@@ -3054,10 +3125,12 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
#define SIMD_UNOP_LIST(V) \
V(F64x2Abs, kArm64F64x2Abs) \
V(F64x2Neg, kArm64F64x2Neg) \
+ V(F64x2Sqrt, kArm64F64x2Sqrt) \
V(F32x4SConvertI32x4, kArm64F32x4SConvertI32x4) \
V(F32x4UConvertI32x4, kArm64F32x4UConvertI32x4) \
V(F32x4Abs, kArm64F32x4Abs) \
V(F32x4Neg, kArm64F32x4Neg) \
+ V(F32x4Sqrt, kArm64F32x4Sqrt) \
V(F32x4RecipApprox, kArm64F32x4RecipApprox) \
V(F32x4RecipSqrtApprox, kArm64F32x4RecipSqrtApprox) \
V(I64x2Neg, kArm64I64x2Neg) \
@@ -3236,6 +3309,14 @@ SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
#undef SIMD_VISIT_BINOP
#undef SIMD_BINOP_LIST
+void InstructionSelector::VisitI64x2Mul(Node* node) {
+ Arm64OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempSimd128Register()};
+ Emit(kArm64I64x2Mul, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+ arraysize(temps), temps);
+}
+
void InstructionSelector::VisitS128Select(Node* node) {
Arm64OperandGenerator g(this);
Emit(kArm64S128Select, g.DefineSameAsFirst(node),
@@ -3243,6 +3324,19 @@ void InstructionSelector::VisitS128Select(Node* node) {
g.UseRegister(node->InputAt(2)));
}
+#define VISIT_SIMD_QFMOP(op) \
+ void InstructionSelector::Visit##op(Node* node) { \
+ Arm64OperandGenerator g(this); \
+ Emit(kArm64##op, g.DefineSameAsFirst(node), \
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), \
+ g.UseRegister(node->InputAt(2))); \
+ }
+VISIT_SIMD_QFMOP(F64x2Qfma)
+VISIT_SIMD_QFMOP(F64x2Qfms)
+VISIT_SIMD_QFMOP(F32x4Qfma)
+VISIT_SIMD_QFMOP(F32x4Qfms)
+#undef VISIT_SIMD_QFMOP
+
namespace {
struct ShuffleEntry {
diff --git a/deps/v8/src/compiler/backend/code-generator-impl.h b/deps/v8/src/compiler/backend/code-generator-impl.h
index 2bfb009980..530dc0a813 100644
--- a/deps/v8/src/compiler/backend/code-generator-impl.h
+++ b/deps/v8/src/compiler/backend/code-generator-impl.h
@@ -26,7 +26,7 @@ class InstructionOperandConverter {
// -- Instruction operand accesses with conversions --------------------------
- Register InputRegister(size_t index) {
+ Register InputRegister(size_t index) const {
return ToRegister(instr_->InputAt(index));
}
@@ -96,7 +96,7 @@ class InstructionOperandConverter {
return ToRpoNumber(instr_->InputAt(index));
}
- Register OutputRegister(size_t index = 0) {
+ Register OutputRegister(size_t index = 0) const {
return ToRegister(instr_->OutputAt(index));
}
@@ -130,7 +130,7 @@ class InstructionOperandConverter {
return ToConstant(op).ToRpoNumber();
}
- Register ToRegister(InstructionOperand* op) {
+ Register ToRegister(InstructionOperand* op) const {
return LocationOperand::cast(op)->GetRegister();
}
@@ -146,7 +146,7 @@ class InstructionOperandConverter {
return LocationOperand::cast(op)->GetSimd128Register();
}
- Constant ToConstant(InstructionOperand* op) {
+ Constant ToConstant(InstructionOperand* op) const {
if (op->IsImmediate()) {
return gen_->instructions()->GetImmediate(ImmediateOperand::cast(op));
}
diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc
index e7702bcdf6..43eb4a1f15 100644
--- a/deps/v8/src/compiler/backend/code-generator.cc
+++ b/deps/v8/src/compiler/backend/code-generator.cc
@@ -4,7 +4,7 @@
#include "src/compiler/backend/code-generator.h"
-#include "src/base/adapters.h"
+#include "src/base/iterator.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/optimized-compilation-info.h"
diff --git a/deps/v8/src/compiler/backend/code-generator.h b/deps/v8/src/compiler/backend/code-generator.h
index e9ebf67590..d56b1edae0 100644
--- a/deps/v8/src/compiler/backend/code-generator.h
+++ b/deps/v8/src/compiler/backend/code-generator.h
@@ -5,6 +5,8 @@
#ifndef V8_COMPILER_BACKEND_CODE_GENERATOR_H_
#define V8_COMPILER_BACKEND_CODE_GENERATOR_H_
+#include <memory>
+
#include "src/base/optional.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/safepoint-table.h"
diff --git a/deps/v8/src/compiler/backend/frame-elider.cc b/deps/v8/src/compiler/backend/frame-elider.cc
index 064501b097..293fc9352c 100644
--- a/deps/v8/src/compiler/backend/frame-elider.cc
+++ b/deps/v8/src/compiler/backend/frame-elider.cc
@@ -4,7 +4,7 @@
#include "src/compiler/backend/frame-elider.h"
-#include "src/base/adapters.h"
+#include "src/base/iterator.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
index 4542da643b..068268a3da 100644
--- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
@@ -479,17 +479,18 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ opcode(i.OutputSimd128Register(), i.InputOperand(1), imm); \
}
-#define ASSEMBLE_SIMD_ALL_TRUE(opcode) \
- do { \
- Register dst = i.OutputRegister(); \
- Operand src = i.InputOperand(0); \
- Register tmp = i.TempRegister(0); \
- __ mov(tmp, Immediate(1)); \
- __ xor_(dst, dst); \
- __ Pxor(kScratchDoubleReg, kScratchDoubleReg); \
- __ opcode(kScratchDoubleReg, src); \
- __ Ptest(kScratchDoubleReg, kScratchDoubleReg); \
- __ cmov(zero, dst, tmp); \
+#define ASSEMBLE_SIMD_ALL_TRUE(opcode) \
+ do { \
+ Register dst = i.OutputRegister(); \
+ Operand src = i.InputOperand(0); \
+ Register tmp = i.TempRegister(0); \
+ XMMRegister tmp_simd = i.TempSimd128Register(1); \
+ __ mov(tmp, Immediate(1)); \
+ __ xor_(dst, dst); \
+ __ Pxor(tmp_simd, tmp_simd); \
+ __ opcode(tmp_simd, src); \
+ __ Ptest(tmp_simd, tmp_simd); \
+ __ cmov(zero, dst, tmp); \
} while (false)
void CodeGenerator::AssembleDeconstructFrame() {
@@ -1266,16 +1267,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kSSEFloat32Abs: {
// TODO(bmeurer): Use 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psrlq(kScratchDoubleReg, 33);
- __ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
+ XMMRegister tmp = i.TempSimd128Register(0);
+ __ pcmpeqd(tmp, tmp);
+ __ psrlq(tmp, 33);
+ __ andps(i.OutputDoubleRegister(), tmp);
break;
}
case kSSEFloat32Neg: {
// TODO(bmeurer): Use 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psllq(kScratchDoubleReg, 31);
- __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
+ XMMRegister tmp = i.TempSimd128Register(0);
+ __ pcmpeqd(tmp, tmp);
+ __ psllq(tmp, 31);
+ __ xorps(i.OutputDoubleRegister(), tmp);
break;
}
case kSSEFloat32Round: {
@@ -1444,16 +1447,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kSSEFloat64Abs: {
// TODO(bmeurer): Use 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psrlq(kScratchDoubleReg, 1);
- __ andpd(i.OutputDoubleRegister(), kScratchDoubleReg);
+ XMMRegister tmp = i.TempSimd128Register(0);
+ __ pcmpeqd(tmp, tmp);
+ __ psrlq(tmp, 1);
+ __ andpd(i.OutputDoubleRegister(), tmp);
break;
}
case kSSEFloat64Neg: {
// TODO(bmeurer): Use 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psllq(kScratchDoubleReg, 63);
- __ xorpd(i.OutputDoubleRegister(), kScratchDoubleReg);
+ XMMRegister tmp = i.TempSimd128Register(0);
+ __ pcmpeqd(tmp, tmp);
+ __ psllq(tmp, 63);
+ __ xorpd(i.OutputDoubleRegister(), tmp);
break;
}
case kSSEFloat64Sqrt:
@@ -1476,13 +1481,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cvttss2si(i.OutputRegister(), i.InputOperand(0));
break;
case kSSEFloat32ToUint32:
- __ Cvttss2ui(i.OutputRegister(), i.InputOperand(0), kScratchDoubleReg);
+ __ Cvttss2ui(i.OutputRegister(), i.InputOperand(0),
+ i.TempSimd128Register(0));
break;
case kSSEFloat64ToInt32:
__ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
break;
case kSSEFloat64ToUint32:
- __ Cvttsd2ui(i.OutputRegister(), i.InputOperand(0), kScratchDoubleReg);
+ __ Cvttsd2ui(i.OutputRegister(), i.InputOperand(0),
+ i.TempSimd128Register(0));
break;
case kSSEInt32ToFloat32:
__ cvtsi2ss(i.OutputDoubleRegister(), i.InputOperand(0));
@@ -1577,34 +1584,38 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kAVXFloat32Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psrlq(kScratchDoubleReg, 33);
+ XMMRegister tmp = i.TempSimd128Register(0);
+ __ pcmpeqd(tmp, tmp);
+ __ psrlq(tmp, 33);
CpuFeatureScope avx_scope(tasm(), AVX);
- __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
+ __ vandps(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
break;
}
case kAVXFloat32Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psllq(kScratchDoubleReg, 31);
+ XMMRegister tmp = i.TempSimd128Register(0);
+ __ pcmpeqd(tmp, tmp);
+ __ psllq(tmp, 31);
CpuFeatureScope avx_scope(tasm(), AVX);
- __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
+ __ vxorps(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
break;
}
case kAVXFloat64Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psrlq(kScratchDoubleReg, 1);
+ XMMRegister tmp = i.TempSimd128Register(0);
+ __ pcmpeqd(tmp, tmp);
+ __ psrlq(tmp, 1);
CpuFeatureScope avx_scope(tasm(), AVX);
- __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
+ __ vandpd(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
break;
}
case kAVXFloat64Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psllq(kScratchDoubleReg, 63);
+ XMMRegister tmp = i.TempSimd128Register(0);
+ __ pcmpeqd(tmp, tmp);
+ __ psllq(tmp, 63);
CpuFeatureScope avx_scope(tasm(), AVX);
- __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
+ __ vxorpd(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
break;
}
case kSSEFloat64SilenceNaN:
@@ -1825,6 +1836,164 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kSSEF64x2Splat: {
+ DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ XMMRegister dst = i.OutputSimd128Register();
+ __ shufpd(dst, dst, 0x0);
+ break;
+ }
+ case kAVXF64x2Splat: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister src = i.InputDoubleRegister(0);
+ __ vshufpd(i.OutputSimd128Register(), src, src, 0x0);
+ break;
+ }
+ case kSSEF64x2ExtractLane: {
+ DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ XMMRegister dst = i.OutputDoubleRegister();
+ int8_t lane = i.InputInt8(1);
+ if (lane != 0) {
+ DCHECK_LT(lane, 4);
+ __ shufpd(dst, dst, lane);
+ }
+ break;
+ }
+ case kAVXF64x2ExtractLane: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister dst = i.OutputDoubleRegister();
+ XMMRegister src = i.InputSimd128Register(0);
+ int8_t lane = i.InputInt8(1);
+ if (lane == 0) {
+ if (dst != src) __ vmovapd(dst, src);
+ } else {
+ DCHECK_LT(lane, 4);
+ __ vshufpd(dst, src, src, lane);
+ }
+ break;
+ }
+ case kSSEF64x2ReplaceLane: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ int8_t lane = i.InputInt8(1);
+ DoubleRegister rep = i.InputDoubleRegister(2);
+
+ // insertps takes a mask which contains (high to low):
+ // - 2 bit specifying source float element to copy
+ // - 2 bit specifying destination float element to write to
+ // - 4 bits specifying which elements of the destination to zero
+ DCHECK_LT(lane, 2);
+ if (lane == 0) {
+ __ insertps(dst, rep, 0b00000000);
+ __ insertps(dst, rep, 0b01010000);
+ } else {
+ __ insertps(dst, rep, 0b00100000);
+ __ insertps(dst, rep, 0b01110000);
+ }
+ break;
+ }
+ case kAVXF64x2ReplaceLane: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ int8_t lane = i.InputInt8(1);
+ DoubleRegister rep = i.InputDoubleRegister(2);
+
+ DCHECK_LT(lane, 2);
+ if (lane == 0) {
+ __ vinsertps(dst, src, rep, 0b00000000);
+ __ vinsertps(dst, src, rep, 0b01010000);
+ } else {
+ __ vinsertps(dst, src, rep, 0b10100000);
+ __ vinsertps(dst, src, rep, 0b11110000);
+ }
+ break;
+ }
+ case kIA32F64x2Sqrt: {
+ __ Sqrtpd(i.OutputSimd128Register(), i.InputOperand(0));
+ break;
+ }
+ case kIA32F64x2Add: {
+ __ Addpd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kIA32F64x2Sub: {
+ __ Subpd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kIA32F64x2Mul: {
+ __ Mulpd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kIA32F64x2Div: {
+ __ Divpd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kIA32F64x2Min: {
+ Operand src1 = i.InputOperand(1);
+ XMMRegister dst = i.OutputSimd128Register(),
+ src = i.InputSimd128Register(0),
+ tmp = i.TempSimd128Register(0);
+ // The minpd instruction doesn't propagate NaNs and +0's in its first
+ // operand. Perform minpd in both orders, merge the resuls, and adjust.
+ __ Movapd(tmp, src1);
+ __ Minpd(tmp, tmp, src);
+ __ Minpd(dst, src, src1);
+ // propagate -0's and NaNs, which may be non-canonical.
+ __ Orpd(tmp, dst);
+ // Canonicalize NaNs by quieting and clearing the payload.
+ __ Cmpunordpd(dst, dst, tmp);
+ __ Orpd(tmp, dst);
+ __ Psrlq(dst, 13);
+ __ Andnpd(dst, tmp);
+ break;
+ }
+ case kIA32F64x2Max: {
+ Operand src1 = i.InputOperand(1);
+ XMMRegister dst = i.OutputSimd128Register(),
+ src = i.InputSimd128Register(0),
+ tmp = i.TempSimd128Register(0);
+ // The maxpd instruction doesn't propagate NaNs and +0's in its first
+ // operand. Perform maxpd in both orders, merge the resuls, and adjust.
+ __ Movapd(tmp, src1);
+ __ Maxpd(tmp, tmp, src);
+ __ Maxpd(dst, src, src1);
+ // Find discrepancies.
+ __ Xorpd(dst, tmp);
+ // Propagate NaNs, which may be non-canonical.
+ __ Orpd(tmp, dst);
+ // Propagate sign discrepancy and (subtle) quiet NaNs.
+ __ Subpd(tmp, tmp, dst);
+ // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
+ __ Cmpunordpd(dst, dst, tmp);
+ __ Psrlq(dst, 13);
+ __ Andnpd(dst, tmp);
+ break;
+ }
+ case kIA32F64x2Eq: {
+ __ Cmpeqpd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kIA32F64x2Ne: {
+ __ Cmpneqpd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kIA32F64x2Lt: {
+ __ Cmpltpd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kIA32F64x2Le: {
+ __ Cmplepd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
case kSSEF32x4Splat: {
DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
XMMRegister dst = i.OutputSimd128Register();
@@ -1951,6 +2120,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(0));
break;
}
+ case kSSEF32x4Sqrt: {
+ __ sqrtps(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kAVXF32x4Sqrt: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vsqrtps(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
case kIA32F32x4RecipApprox: {
__ Rcpps(i.OutputSimd128Register(), i.InputOperand(0));
break;
@@ -2212,28 +2390,40 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kSSEI32x4Shl: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
XMMRegister tmp = i.TempSimd128Register(0);
- __ movd(tmp, i.InputRegister(1));
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 32.
+ __ and_(shift, 31);
+ __ movd(tmp, shift);
__ pslld(i.OutputSimd128Register(), tmp);
break;
}
case kAVXI32x4Shl: {
CpuFeatureScope avx_scope(tasm(), AVX);
XMMRegister tmp = i.TempSimd128Register(0);
- __ movd(tmp, i.InputRegister(1));
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 32.
+ __ and_(shift, 31);
+ __ movd(tmp, shift);
__ vpslld(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp);
break;
}
case kSSEI32x4ShrS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
XMMRegister tmp = i.TempSimd128Register(0);
- __ movd(tmp, i.InputRegister(1));
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 32.
+ __ and_(shift, 31);
+ __ movd(tmp, shift);
__ psrad(i.OutputSimd128Register(), tmp);
break;
}
case kAVXI32x4ShrS: {
CpuFeatureScope avx_scope(tasm(), AVX);
XMMRegister tmp = i.TempSimd128Register(0);
- __ movd(tmp, i.InputRegister(1));
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 32.
+ __ and_(shift, 31);
+ __ movd(tmp, shift);
__ vpsrad(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp);
break;
}
@@ -2430,14 +2620,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kSSEI32x4ShrU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
XMMRegister tmp = i.TempSimd128Register(0);
- __ movd(tmp, i.InputRegister(1));
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 32.
+ __ and_(shift, 31);
+ __ movd(tmp, shift);
__ psrld(i.OutputSimd128Register(), tmp);
break;
}
case kAVXI32x4ShrU: {
CpuFeatureScope avx_scope(tasm(), AVX);
XMMRegister tmp = i.TempSimd128Register(0);
- __ movd(tmp, i.InputRegister(1));
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 32.
+ __ and_(shift, 31);
+ __ movd(tmp, shift);
__ vpsrld(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp);
break;
}
@@ -2514,7 +2710,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIA32I16x8ExtractLane: {
Register dst = i.OutputRegister();
__ Pextrw(dst, i.InputSimd128Register(0), i.InputInt8(1));
- __ movsx_w(dst, dst);
break;
}
case kSSEI16x8ReplaceLane: {
@@ -2553,28 +2748,40 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kSSEI16x8Shl: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
XMMRegister tmp = i.TempSimd128Register(0);
- __ movd(tmp, i.InputRegister(1));
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 16.
+ __ and_(shift, 15);
+ __ movd(tmp, shift);
__ psllw(i.OutputSimd128Register(), tmp);
break;
}
case kAVXI16x8Shl: {
CpuFeatureScope avx_scope(tasm(), AVX);
XMMRegister tmp = i.TempSimd128Register(0);
- __ movd(tmp, i.InputRegister(1));
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 16.
+ __ and_(shift, 15);
+ __ movd(tmp, shift);
__ vpsllw(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp);
break;
}
case kSSEI16x8ShrS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
XMMRegister tmp = i.TempSimd128Register(0);
- __ movd(tmp, i.InputRegister(1));
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 16.
+ __ and_(shift, 15);
+ __ movd(tmp, shift);
__ psraw(i.OutputSimd128Register(), tmp);
break;
}
case kAVXI16x8ShrS: {
CpuFeatureScope avx_scope(tasm(), AVX);
XMMRegister tmp = i.TempSimd128Register(0);
- __ movd(tmp, i.InputRegister(1));
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 16.
+ __ and_(shift, 15);
+ __ movd(tmp, shift);
__ vpsraw(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp);
break;
}
@@ -2745,14 +2952,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kSSEI16x8ShrU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
XMMRegister tmp = i.TempSimd128Register(0);
- __ movd(tmp, i.InputRegister(1));
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 16.
+ __ and_(shift, 15);
+ __ movd(tmp, shift);
__ psrlw(i.OutputSimd128Register(), tmp);
break;
}
case kAVXI16x8ShrU: {
CpuFeatureScope avx_scope(tasm(), AVX);
XMMRegister tmp = i.TempSimd128Register(0);
- __ movd(tmp, i.InputRegister(1));
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 16.
+ __ and_(shift, 15);
+ __ movd(tmp, shift);
__ vpsrlw(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp);
break;
}
@@ -2875,7 +3088,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIA32I8x16ExtractLane: {
Register dst = i.OutputRegister();
__ Pextrb(dst, i.InputSimd128Register(0), i.InputInt8(1));
- __ movsx_b(dst, dst);
break;
}
case kSSEI8x16ReplaceLane: {
@@ -2919,6 +3131,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register shift = i.InputRegister(1);
Register tmp = i.ToRegister(instr->TempAt(0));
XMMRegister tmp_simd = i.TempSimd128Register(1);
+ // Take shift value modulo 8.
+ __ and_(shift, 7);
// Mask off the unwanted bits before word-shifting.
__ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
__ mov(tmp, shift);
@@ -2938,6 +3152,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register shift = i.InputRegister(1);
Register tmp = i.ToRegister(instr->TempAt(0));
XMMRegister tmp_simd = i.TempSimd128Register(1);
+ // Take shift value modulo 8.
+ __ and_(shift, 7);
// Mask off the unwanted bits before word-shifting.
__ vpcmpeqw(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ mov(tmp, shift);
@@ -2959,6 +3175,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ punpckhbw(kScratchDoubleReg, dst);
__ punpcklbw(dst, dst);
__ mov(tmp, i.InputRegister(1));
+ // Take shift value modulo 8.
+ __ and_(tmp, 7);
__ add(tmp, Immediate(8));
__ movd(tmp_simd, tmp);
__ psraw(kScratchDoubleReg, tmp_simd);
@@ -3223,6 +3441,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ punpckhbw(kScratchDoubleReg, dst);
__ punpcklbw(dst, dst);
__ mov(tmp, i.InputRegister(1));
+ // Take shift value modulo 8.
+ __ and_(tmp, 7);
__ add(tmp, Immediate(8));
__ movd(tmp_simd, tmp);
__ psrlw(kScratchDoubleReg, tmp_simd);
@@ -3365,6 +3585,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vxorps(dst, kScratchDoubleReg, i.InputSimd128Register(2));
break;
}
+ case kIA32S8x16Swizzle: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister mask = i.TempSimd128Register(0);
+
+ // Out-of-range indices should return 0, add 112 so that any value > 15
+ // saturates to 128 (top bit set), so pshufb will zero that lane.
+ __ Move(mask, (uint32_t)0x70707070);
+ __ Pshufd(mask, mask, 0x0);
+ __ Paddusb(mask, i.InputSimd128Register(1));
+ __ Pshufb(dst, mask);
+ break;
+ }
case kIA32S8x16Shuffle: {
XMMRegister dst = i.OutputSimd128Register();
Operand src0 = i.InputOperand(0);
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
index 7530c716b8..a77fb8cd37 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
@@ -116,6 +116,23 @@ namespace compiler {
V(IA32PushSimd128) \
V(IA32Poke) \
V(IA32Peek) \
+ V(SSEF64x2Splat) \
+ V(AVXF64x2Splat) \
+ V(SSEF64x2ExtractLane) \
+ V(AVXF64x2ExtractLane) \
+ V(SSEF64x2ReplaceLane) \
+ V(AVXF64x2ReplaceLane) \
+ V(IA32F64x2Sqrt) \
+ V(IA32F64x2Add) \
+ V(IA32F64x2Sub) \
+ V(IA32F64x2Mul) \
+ V(IA32F64x2Div) \
+ V(IA32F64x2Min) \
+ V(IA32F64x2Max) \
+ V(IA32F64x2Eq) \
+ V(IA32F64x2Ne) \
+ V(IA32F64x2Lt) \
+ V(IA32F64x2Le) \
V(SSEF32x4Splat) \
V(AVXF32x4Splat) \
V(SSEF32x4ExtractLane) \
@@ -129,6 +146,8 @@ namespace compiler {
V(AVXF32x4Abs) \
V(SSEF32x4Neg) \
V(AVXF32x4Neg) \
+ V(SSEF32x4Sqrt) \
+ V(AVXF32x4Sqrt) \
V(IA32F32x4RecipApprox) \
V(IA32F32x4RecipSqrtApprox) \
V(SSEF32x4Add) \
@@ -313,6 +332,7 @@ namespace compiler {
V(AVXS128Xor) \
V(SSES128Select) \
V(AVXS128Select) \
+ V(IA32S8x16Swizzle) \
V(IA32S8x16Shuffle) \
V(IA32S32x4Swizzle) \
V(IA32S32x4Shuffle) \
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
index c2097a6691..287eb49a48 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
@@ -97,6 +97,23 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXFloat32Neg:
case kIA32BitcastFI:
case kIA32BitcastIF:
+ case kSSEF64x2Splat:
+ case kAVXF64x2Splat:
+ case kSSEF64x2ExtractLane:
+ case kAVXF64x2ExtractLane:
+ case kSSEF64x2ReplaceLane:
+ case kAVXF64x2ReplaceLane:
+ case kIA32F64x2Sqrt:
+ case kIA32F64x2Add:
+ case kIA32F64x2Sub:
+ case kIA32F64x2Mul:
+ case kIA32F64x2Div:
+ case kIA32F64x2Min:
+ case kIA32F64x2Max:
+ case kIA32F64x2Eq:
+ case kIA32F64x2Ne:
+ case kIA32F64x2Lt:
+ case kIA32F64x2Le:
case kSSEF32x4Splat:
case kAVXF32x4Splat:
case kSSEF32x4ExtractLane:
@@ -110,6 +127,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXF32x4Abs:
case kSSEF32x4Neg:
case kAVXF32x4Neg:
+ case kSSEF32x4Sqrt:
+ case kAVXF32x4Sqrt:
case kIA32F32x4RecipApprox:
case kIA32F32x4RecipSqrtApprox:
case kSSEF32x4Add:
@@ -294,6 +313,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXS128Xor:
case kSSES128Select:
case kAVXS128Select:
+ case kIA32S8x16Swizzle:
case kIA32S8x16Shuffle:
case kIA32S32x4Swizzle:
case kIA32S32x4Shuffle:
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
index ebef39a93a..a24727aba2 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/base/adapters.h"
+#include "src/base/iterator.h"
#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
@@ -200,12 +200,27 @@ namespace {
void VisitRO(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
IA32OperandGenerator g(selector);
- InstructionOperand temps[] = {g.TempRegister()};
Node* input = node->InputAt(0);
// We have to use a byte register as input to movsxb.
InstructionOperand input_op =
opcode == kIA32Movsxbl ? g.UseFixed(input, eax) : g.Use(input);
- selector->Emit(opcode, g.DefineAsRegister(node), input_op, arraysize(temps),
+ selector->Emit(opcode, g.DefineAsRegister(node), input_op);
+}
+
+void VisitROWithTemp(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ IA32OperandGenerator g(selector);
+ InstructionOperand temps[] = {g.TempRegister()};
+ selector->Emit(opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0)),
+ arraysize(temps), temps);
+}
+
+void VisitROWithTempSimd(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ IA32OperandGenerator g(selector);
+ InstructionOperand temps[] = {g.TempSimd128Register()};
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)), arraysize(temps),
temps);
}
@@ -231,10 +246,13 @@ void VisitRROFloat(InstructionSelector* selector, Node* node,
void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
IA32OperandGenerator g(selector);
+ InstructionOperand temps[] = {g.TempSimd128Register()};
if (selector->IsSupported(AVX)) {
- selector->Emit(avx_opcode, g.DefineAsRegister(node), g.Use(input));
+ selector->Emit(avx_opcode, g.DefineAsRegister(node), g.UseUnique(input),
+ arraysize(temps), temps);
} else {
- selector->Emit(sse_opcode, g.DefineSameAsFirst(node), g.UseRegister(input));
+ selector->Emit(sse_opcode, g.DefineSameAsFirst(node),
+ g.UseUniqueRegister(input), arraysize(temps), temps);
}
}
@@ -804,12 +822,8 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
V(ChangeFloat32ToFloat64, kSSEFloat32ToFloat64) \
V(RoundInt32ToFloat32, kSSEInt32ToFloat32) \
V(ChangeInt32ToFloat64, kSSEInt32ToFloat64) \
- V(ChangeUint32ToFloat64, kSSEUint32ToFloat64) \
V(TruncateFloat32ToInt32, kSSEFloat32ToInt32) \
- V(TruncateFloat32ToUint32, kSSEFloat32ToUint32) \
V(ChangeFloat64ToInt32, kSSEFloat64ToInt32) \
- V(ChangeFloat64ToUint32, kSSEFloat64ToUint32) \
- V(TruncateFloat64ToUint32, kSSEFloat64ToUint32) \
V(TruncateFloat64ToFloat32, kSSEFloat64ToFloat32) \
V(RoundFloat64ToInt32, kSSEFloat64ToInt32) \
V(BitcastFloat32ToInt32, kIA32BitcastFI) \
@@ -819,7 +833,15 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
V(Float64ExtractLowWord32, kSSEFloat64ExtractLowWord32) \
V(Float64ExtractHighWord32, kSSEFloat64ExtractHighWord32) \
V(SignExtendWord8ToInt32, kIA32Movsxbl) \
- V(SignExtendWord16ToInt32, kIA32Movsxwl)
+ V(SignExtendWord16ToInt32, kIA32Movsxwl) \
+ V(F64x2Sqrt, kIA32F64x2Sqrt)
+
+#define RO_WITH_TEMP_OP_LIST(V) V(ChangeUint32ToFloat64, kSSEUint32ToFloat64)
+
+#define RO_WITH_TEMP_SIMD_OP_LIST(V) \
+ V(TruncateFloat32ToUint32, kSSEFloat32ToUint32) \
+ V(ChangeFloat64ToUint32, kSSEFloat64ToUint32) \
+ V(TruncateFloat64ToUint32, kSSEFloat64ToUint32)
#define RR_OP_LIST(V) \
V(TruncateFloat64ToWord32, kArchTruncateDoubleToI) \
@@ -841,13 +863,23 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
V(Float32Mul, kAVXFloat32Mul, kSSEFloat32Mul) \
V(Float64Mul, kAVXFloat64Mul, kSSEFloat64Mul) \
V(Float32Div, kAVXFloat32Div, kSSEFloat32Div) \
- V(Float64Div, kAVXFloat64Div, kSSEFloat64Div)
+ V(Float64Div, kAVXFloat64Div, kSSEFloat64Div) \
+ V(F64x2Add, kIA32F64x2Add, kIA32F64x2Add) \
+ V(F64x2Sub, kIA32F64x2Sub, kIA32F64x2Sub) \
+ V(F64x2Mul, kIA32F64x2Mul, kIA32F64x2Mul) \
+ V(F64x2Div, kIA32F64x2Div, kIA32F64x2Div) \
+ V(F64x2Eq, kIA32F64x2Eq, kIA32F64x2Eq) \
+ V(F64x2Ne, kIA32F64x2Ne, kIA32F64x2Ne) \
+ V(F64x2Lt, kIA32F64x2Lt, kIA32F64x2Lt) \
+ V(F64x2Le, kIA32F64x2Le, kIA32F64x2Le)
#define FLOAT_UNOP_LIST(V) \
V(Float32Abs, kAVXFloat32Abs, kSSEFloat32Abs) \
V(Float64Abs, kAVXFloat64Abs, kSSEFloat64Abs) \
V(Float32Neg, kAVXFloat32Neg, kSSEFloat32Neg) \
- V(Float64Neg, kAVXFloat64Neg, kSSEFloat64Neg)
+ V(Float64Neg, kAVXFloat64Neg, kSSEFloat64Neg) \
+ V(F64x2Abs, kAVXFloat64Abs, kSSEFloat64Abs) \
+ V(F64x2Neg, kAVXFloat64Neg, kSSEFloat64Neg)
#define RO_VISITOR(Name, opcode) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -857,6 +889,22 @@ RO_OP_LIST(RO_VISITOR)
#undef RO_VISITOR
#undef RO_OP_LIST
+#define RO_WITH_TEMP_VISITOR(Name, opcode) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitROWithTemp(this, node, opcode); \
+ }
+RO_WITH_TEMP_OP_LIST(RO_WITH_TEMP_VISITOR)
+#undef RO_WITH_TEMP_VISITOR
+#undef RO_WITH_TEMP_OP_LIST
+
+#define RO_WITH_TEMP_SIMD_VISITOR(Name, opcode) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitROWithTempSimd(this, node, opcode); \
+ }
+RO_WITH_TEMP_SIMD_OP_LIST(RO_WITH_TEMP_SIMD_VISITOR)
+#undef RO_WITH_TEMP_SIMD_VISITOR
+#undef RO_WITH_TEMP_SIMD_OP_LIST
+
#define RR_VISITOR(Name, opcode) \
void InstructionSelector::Visit##Name(Node* node) { \
VisitRR(this, node, opcode); \
@@ -890,6 +938,10 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
Emit(kIA32Bswap, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)));
}
+void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
+ UNREACHABLE();
+}
+
void InstructionSelector::VisitInt32Add(Node* node) {
IA32OperandGenerator g(this);
@@ -1971,6 +2023,7 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
#define SIMD_UNOP_PREFIX_LIST(V) \
V(F32x4Abs) \
V(F32x4Neg) \
+ V(F32x4Sqrt) \
V(S128Not)
#define SIMD_ANYTRUE_LIST(V) \
@@ -1995,6 +2048,43 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I8x16ShrS) \
V(I8x16ShrU)
+void InstructionSelector::VisitF64x2Min(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempSimd128Register()};
+ InstructionOperand operand0 = g.UseUniqueRegister(node->InputAt(0));
+ InstructionOperand operand1 = g.UseUnique(node->InputAt(1));
+
+ if (IsSupported(AVX)) {
+ Emit(kIA32F64x2Min, g.DefineAsRegister(node), operand0, operand1,
+ arraysize(temps), temps);
+ } else {
+ Emit(kIA32F64x2Min, g.DefineSameAsFirst(node), operand0, operand1,
+ arraysize(temps), temps);
+ }
+}
+
+void InstructionSelector::VisitF64x2Max(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempSimd128Register()};
+ InstructionOperand operand0 = g.UseUniqueRegister(node->InputAt(0));
+ InstructionOperand operand1 = g.UseUnique(node->InputAt(1));
+ if (IsSupported(AVX)) {
+ Emit(kIA32F64x2Max, g.DefineAsRegister(node), operand0, operand1,
+ arraysize(temps), temps);
+ } else {
+ Emit(kIA32F64x2Max, g.DefineSameAsFirst(node), operand0, operand1,
+ arraysize(temps), temps);
+ }
+}
+
+void InstructionSelector::VisitF64x2Splat(Node* node) {
+ VisitRRSimd(this, node, kAVXF64x2Splat, kSSEF64x2Splat);
+}
+
+void InstructionSelector::VisitF64x2ExtractLane(Node* node) {
+ VisitRRISimd(this, node, kAVXF64x2ExtractLane, kSSEF64x2ExtractLane);
+}
+
void InstructionSelector::VisitF32x4Splat(Node* node) {
VisitRRSimd(this, node, kAVXF32x4Splat, kSSEF32x4Splat);
}
@@ -2086,6 +2176,28 @@ VISIT_SIMD_REPLACE_LANE(F32x4)
#undef VISIT_SIMD_REPLACE_LANE
#undef SIMD_INT_TYPES
+// The difference between this and VISIT_SIMD_REPLACE_LANE is that this forces
+// operand2 to be UseRegister, because the codegen relies on insertps using
+// registers.
+// TODO(v8:9764) Remove this UseRegister requirement
+#define VISIT_SIMD_REPLACE_LANE_USE_REG(Type) \
+ void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
+ IA32OperandGenerator g(this); \
+ InstructionOperand operand0 = g.UseRegister(node->InputAt(0)); \
+ InstructionOperand operand1 = \
+ g.UseImmediate(OpParameter<int32_t>(node->op())); \
+ InstructionOperand operand2 = g.UseRegister(node->InputAt(1)); \
+ if (IsSupported(AVX)) { \
+ Emit(kAVX##Type##ReplaceLane, g.DefineAsRegister(node), operand0, \
+ operand1, operand2); \
+ } else { \
+ Emit(kSSE##Type##ReplaceLane, g.DefineSameAsFirst(node), operand0, \
+ operand1, operand2); \
+ } \
+ }
+VISIT_SIMD_REPLACE_LANE_USE_REG(F64x2)
+#undef VISIT_SIMD_REPLACE_LANE_USE_REG
+
#define VISIT_SIMD_SHIFT(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
VisitRROSimdShift(this, node, kAVX##Opcode, kSSE##Opcode); \
@@ -2132,12 +2244,12 @@ SIMD_ANYTRUE_LIST(VISIT_SIMD_ANYTRUE)
#undef VISIT_SIMD_ANYTRUE
#undef SIMD_ANYTRUE_LIST
-#define VISIT_SIMD_ALLTRUE(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- IA32OperandGenerator g(this); \
- InstructionOperand temps[] = {g.TempRegister()}; \
- Emit(kIA32##Opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0)), \
- arraysize(temps), temps); \
+#define VISIT_SIMD_ALLTRUE(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ IA32OperandGenerator g(this); \
+ InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()}; \
+ Emit(kIA32##Opcode, g.DefineAsRegister(node), \
+ g.UseUnique(node->InputAt(0)), arraysize(temps), temps); \
}
SIMD_ALLTRUE_LIST(VISIT_SIMD_ALLTRUE)
#undef VISIT_SIMD_ALLTRUE
@@ -2489,6 +2601,14 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
Emit(opcode, 1, &dst, input_count, inputs, temp_count, temps);
}
+void InstructionSelector::VisitS8x16Swizzle(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempSimd128Register()};
+ Emit(kIA32S8x16Swizzle, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+ arraysize(temps), temps);
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/backend/instruction-scheduler.cc b/deps/v8/src/compiler/backend/instruction-scheduler.cc
index dc66813740..d4920cd575 100644
--- a/deps/v8/src/compiler/backend/instruction-scheduler.cc
+++ b/deps/v8/src/compiler/backend/instruction-scheduler.cc
@@ -4,7 +4,7 @@
#include "src/compiler/backend/instruction-scheduler.h"
-#include "src/base/adapters.h"
+#include "src/base/iterator.h"
#include "src/base/utils/random-number-generator.h"
#include "src/execution/isolate.h"
diff --git a/deps/v8/src/compiler/backend/instruction-selector-impl.h b/deps/v8/src/compiler/backend/instruction-selector-impl.h
index a3f62e7ba4..13ea049eba 100644
--- a/deps/v8/src/compiler/backend/instruction-selector-impl.h
+++ b/deps/v8/src/compiler/backend/instruction-selector-impl.h
@@ -29,8 +29,8 @@ inline bool operator<(const CaseInfo& l, const CaseInfo& r) {
// Helper struct containing data about a table or lookup switch.
class SwitchInfo {
public:
- SwitchInfo(ZoneVector<CaseInfo>& cases, // NOLINT(runtime/references)
- int32_t min_value, int32_t max_value, BasicBlock* default_branch)
+ SwitchInfo(ZoneVector<CaseInfo> const& cases, int32_t min_value,
+ int32_t max_value, BasicBlock* default_branch)
: cases_(cases),
min_value_(min_value),
max_value_(max_value),
@@ -193,17 +193,6 @@ class OperandGenerator {
reg.code(), GetVReg(node)));
}
- InstructionOperand UseExplicit(LinkageLocation location) {
- MachineRepresentation rep = InstructionSequence::DefaultRepresentation();
- if (location.IsRegister()) {
- return ExplicitOperand(LocationOperand::REGISTER, rep,
- location.AsRegister());
- } else {
- return ExplicitOperand(LocationOperand::STACK_SLOT, rep,
- location.GetLocation());
- }
- }
-
InstructionOperand UseImmediate(int immediate) {
return sequence()->AddImmediate(Constant(immediate));
}
@@ -275,6 +264,16 @@ class OperandGenerator {
InstructionOperand::kInvalidVirtualRegister);
}
+ template <typename FPRegType>
+ InstructionOperand TempFpRegister(FPRegType reg) {
+ UnallocatedOperand op =
+ UnallocatedOperand(UnallocatedOperand::FIXED_FP_REGISTER, reg.code(),
+ sequence()->NextVirtualRegister());
+ sequence()->MarkAsRepresentation(MachineRepresentation::kSimd128,
+ op.virtual_register());
+ return op;
+ }
+
InstructionOperand TempImmediate(int32_t imm) {
return sequence()->AddImmediate(Constant(imm));
}
diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc
index 43193ec2b1..22d81c0c55 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.cc
+++ b/deps/v8/src/compiler/backend/instruction-selector.cc
@@ -6,7 +6,7 @@
#include <limits>
-#include "src/base/adapters.h"
+#include "src/base/iterator.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/tick-counter.h"
#include "src/compiler/backend/instruction-selector-impl.h"
@@ -1439,6 +1439,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsWord64(node), VisitWord64ReverseBits(node);
case IrOpcode::kWord64ReverseBytes:
return MarkAsWord64(node), VisitWord64ReverseBytes(node);
+ case IrOpcode::kSimd128ReverseBytes:
+ return MarkAsSimd128(node), VisitSimd128ReverseBytes(node);
case IrOpcode::kInt64AbsWithOverflow:
return MarkAsWord64(node), VisitInt64AbsWithOverflow(node);
case IrOpcode::kWord64Equal:
@@ -1502,7 +1504,7 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kUint64Mod:
return MarkAsWord64(node), VisitUint64Mod(node);
case IrOpcode::kBitcastTaggedToWord:
- case IrOpcode::kBitcastTaggedSignedToWord:
+ case IrOpcode::kBitcastTaggedToWordForTagAndSmiBits:
return MarkAsRepresentation(MachineType::PointerRepresentation(), node),
VisitBitcastTaggedToWord(node);
case IrOpcode::kBitcastWordToTagged:
@@ -1857,6 +1859,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitF64x2Abs(node);
case IrOpcode::kF64x2Neg:
return MarkAsSimd128(node), VisitF64x2Neg(node);
+ case IrOpcode::kF64x2Sqrt:
+ return MarkAsSimd128(node), VisitF64x2Sqrt(node);
case IrOpcode::kF64x2Add:
return MarkAsSimd128(node), VisitF64x2Add(node);
case IrOpcode::kF64x2Sub:
@@ -1877,6 +1881,10 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitF64x2Lt(node);
case IrOpcode::kF64x2Le:
return MarkAsSimd128(node), VisitF64x2Le(node);
+ case IrOpcode::kF64x2Qfma:
+ return MarkAsSimd128(node), VisitF64x2Qfma(node);
+ case IrOpcode::kF64x2Qfms:
+ return MarkAsSimd128(node), VisitF64x2Qfms(node);
case IrOpcode::kF32x4Splat:
return MarkAsSimd128(node), VisitF32x4Splat(node);
case IrOpcode::kF32x4ExtractLane:
@@ -1891,6 +1899,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitF32x4Abs(node);
case IrOpcode::kF32x4Neg:
return MarkAsSimd128(node), VisitF32x4Neg(node);
+ case IrOpcode::kF32x4Sqrt:
+ return MarkAsSimd128(node), VisitF32x4Sqrt(node);
case IrOpcode::kF32x4RecipApprox:
return MarkAsSimd128(node), VisitF32x4RecipApprox(node);
case IrOpcode::kF32x4RecipSqrtApprox:
@@ -1917,6 +1927,10 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitF32x4Lt(node);
case IrOpcode::kF32x4Le:
return MarkAsSimd128(node), VisitF32x4Le(node);
+ case IrOpcode::kF32x4Qfma:
+ return MarkAsSimd128(node), VisitF32x4Qfma(node);
+ case IrOpcode::kF32x4Qfms:
+ return MarkAsSimd128(node), VisitF32x4Qfms(node);
case IrOpcode::kI64x2Splat:
return MarkAsSimd128(node), VisitI64x2Splat(node);
case IrOpcode::kI64x2ExtractLane:
@@ -2137,6 +2151,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitS128Not(node);
case IrOpcode::kS128Select:
return MarkAsSimd128(node), VisitS128Select(node);
+ case IrOpcode::kS8x16Swizzle:
+ return MarkAsSimd128(node), VisitS8x16Swizzle(node);
case IrOpcode::kS8x16Shuffle:
return MarkAsSimd128(node), VisitS8x16Shuffle(node);
case IrOpcode::kS1x2AnyTrue:
@@ -2286,8 +2302,8 @@ void InstructionSelector::VisitFloat64Tanh(Node* node) {
VisitFloat64Ieee754Unop(node, kIeee754Float64Tanh);
}
-void InstructionSelector::EmitTableSwitch(const SwitchInfo& sw,
- InstructionOperand& index_operand) {
+void InstructionSelector::EmitTableSwitch(
+ const SwitchInfo& sw, InstructionOperand const& index_operand) {
OperandGenerator g(this);
size_t input_count = 2 + sw.value_range();
DCHECK_LE(sw.value_range(), std::numeric_limits<size_t>::max() - 2);
@@ -2304,8 +2320,8 @@ void InstructionSelector::EmitTableSwitch(const SwitchInfo& sw,
Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
}
-void InstructionSelector::EmitLookupSwitch(const SwitchInfo& sw,
- InstructionOperand& value_operand) {
+void InstructionSelector::EmitLookupSwitch(
+ const SwitchInfo& sw, InstructionOperand const& value_operand) {
OperandGenerator g(this);
std::vector<CaseInfo> cases = sw.CasesSortedByOriginalOrder();
size_t input_count = 2 + sw.case_count() * 2;
@@ -2322,7 +2338,7 @@ void InstructionSelector::EmitLookupSwitch(const SwitchInfo& sw,
}
void InstructionSelector::EmitBinarySearchSwitch(
- const SwitchInfo& sw, InstructionOperand& value_operand) {
+ const SwitchInfo& sw, InstructionOperand const& value_operand) {
OperandGenerator g(this);
size_t input_count = 2 + sw.case_count() * 2;
DCHECK_LE(sw.case_count(), (std::numeric_limits<size_t>::max() - 2) / 2);
@@ -2607,21 +2623,25 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
#if !V8_TARGET_ARCH_X64
#if !V8_TARGET_ARCH_ARM64
+#if !V8_TARGET_ARCH_IA32
void InstructionSelector::VisitF64x2Splat(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2ExtractLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Abs(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Neg(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Sqrt(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitS8x16Swizzle(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Add(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Sub(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Mul(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Div(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF64x2Min(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF64x2Max(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Eq(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Ne(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Lt(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Le(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Min(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Max(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_IA32
void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
@@ -2630,6 +2650,7 @@ void InstructionSelector::VisitI64x2Shl(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2ShrS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2Add(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2Sub(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2Mul(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2Eq(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2Ne(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2ShrU(Node* node) { UNIMPLEMENTED(); }
@@ -2639,8 +2660,11 @@ void InstructionSelector::VisitI64x2GtU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2GeU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS1x2AnyTrue(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS1x2AllTrue(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Qfma(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Qfms(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF32x4Qfma(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF32x4Qfms(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM64
-void InstructionSelector::VisitI64x2Mul(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2MinS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2MaxS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2MinU(Node* node) { UNIMPLEMENTED(); }
diff --git a/deps/v8/src/compiler/backend/instruction-selector.h b/deps/v8/src/compiler/backend/instruction-selector.h
index eb3e098427..e951c90f95 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.h
+++ b/deps/v8/src/compiler/backend/instruction-selector.h
@@ -502,15 +502,12 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
FeedbackSource const& feedback,
Node* frame_state);
- void EmitTableSwitch(
- const SwitchInfo& sw,
- InstructionOperand& index_operand); // NOLINT(runtime/references)
- void EmitLookupSwitch(
- const SwitchInfo& sw,
- InstructionOperand& value_operand); // NOLINT(runtime/references)
- void EmitBinarySearchSwitch(
- const SwitchInfo& sw,
- InstructionOperand& value_operand); // NOLINT(runtime/references)
+ void EmitTableSwitch(const SwitchInfo& sw,
+ InstructionOperand const& index_operand);
+ void EmitLookupSwitch(const SwitchInfo& sw,
+ InstructionOperand const& value_operand);
+ void EmitBinarySearchSwitch(const SwitchInfo& sw,
+ InstructionOperand const& value_operand);
void TryRename(InstructionOperand* op);
int GetRename(int virtual_register);
diff --git a/deps/v8/src/compiler/backend/instruction.cc b/deps/v8/src/compiler/backend/instruction.cc
index 06158b0c72..076f1b596e 100644
--- a/deps/v8/src/compiler/backend/instruction.cc
+++ b/deps/v8/src/compiler/backend/instruction.cc
@@ -168,7 +168,6 @@ std::ostream& operator<<(std::ostream& os, const InstructionOperand& op) {
return os << "[immediate:" << imm.indexed_value() << "]";
}
}
- case InstructionOperand::EXPLICIT:
case InstructionOperand::ALLOCATED: {
LocationOperand allocated = LocationOperand::cast(op);
if (op.IsStackSlot()) {
@@ -192,9 +191,6 @@ std::ostream& operator<<(std::ostream& os, const InstructionOperand& op) {
os << "[" << Simd128Register::from_code(allocated.register_code())
<< "|R";
}
- if (allocated.IsExplicit()) {
- os << "|E";
- }
switch (allocated.representation()) {
case MachineRepresentation::kNone:
os << "|-";
@@ -294,17 +290,6 @@ void ParallelMove::PrepareInsertAfter(
if (replacement != nullptr) move->set_source(replacement->source());
}
-ExplicitOperand::ExplicitOperand(LocationKind kind, MachineRepresentation rep,
- int index)
- : LocationOperand(EXPLICIT, kind, rep, index) {
- DCHECK_IMPLIES(kind == REGISTER && !IsFloatingPoint(rep),
- GetRegConfig()->IsAllocatableGeneralCode(index));
- DCHECK_IMPLIES(kind == REGISTER && rep == MachineRepresentation::kFloat32,
- GetRegConfig()->IsAllocatableFloatCode(index));
- DCHECK_IMPLIES(kind == REGISTER && (rep == MachineRepresentation::kFloat64),
- GetRegConfig()->IsAllocatableDoubleCode(index));
-}
-
Instruction::Instruction(InstructionCode opcode)
: opcode_(opcode),
bit_field_(OutputCountField::encode(0) | InputCountField::encode(0) |
diff --git a/deps/v8/src/compiler/backend/instruction.h b/deps/v8/src/compiler/backend/instruction.h
index f5f7f64c51..321f069531 100644
--- a/deps/v8/src/compiler/backend/instruction.h
+++ b/deps/v8/src/compiler/backend/instruction.h
@@ -43,9 +43,8 @@ class V8_EXPORT_PRIVATE InstructionOperand {
CONSTANT,
IMMEDIATE,
// Location operand kinds.
- EXPLICIT,
ALLOCATED,
- FIRST_LOCATION_OPERAND_KIND = EXPLICIT
+ FIRST_LOCATION_OPERAND_KIND = ALLOCATED
// Location operand kinds must be last.
};
@@ -68,11 +67,6 @@ class V8_EXPORT_PRIVATE InstructionOperand {
// embedded directly in instructions, e.g. small integers and on some
// platforms Objects.
INSTRUCTION_OPERAND_PREDICATE(Immediate, IMMEDIATE)
- // ExplicitOperands do not participate in register allocation. They are
- // created by the instruction selector for direct access to registers and
- // stack slots, completely bypassing the register allocator. They are never
- // associated with a virtual register
- INSTRUCTION_OPERAND_PREDICATE(Explicit, EXPLICIT)
// AllocatedOperands are registers or stack slots that are assigned by the
// register allocator and are always associated with a virtual register.
INSTRUCTION_OPERAND_PREDICATE(Allocated, ALLOCATED)
@@ -515,19 +509,6 @@ class LocationOperand : public InstructionOperand {
using IndexField = BitField64<int32_t, 35, 29>;
};
-class V8_EXPORT_PRIVATE ExplicitOperand
- : public NON_EXPORTED_BASE(LocationOperand) {
- public:
- ExplicitOperand(LocationKind kind, MachineRepresentation rep, int index);
-
- static ExplicitOperand* New(Zone* zone, LocationKind kind,
- MachineRepresentation rep, int index) {
- return InstructionOperand::New(zone, ExplicitOperand(kind, rep, index));
- }
-
- INSTRUCTION_OPERAND_CASTS(ExplicitOperand, EXPLICIT)
-};
-
class AllocatedOperand : public LocationOperand {
public:
AllocatedOperand(LocationKind kind, MachineRepresentation rep, int index)
@@ -643,7 +624,7 @@ uint64_t InstructionOperand::GetCanonicalizedValue() const {
}
return InstructionOperand::KindField::update(
LocationOperand::RepresentationField::update(this->value_, canonical),
- LocationOperand::EXPLICIT);
+ LocationOperand::ALLOCATED);
}
return this->value_;
}
@@ -776,11 +757,11 @@ class V8_EXPORT_PRIVATE Instruction final {
public:
size_t OutputCount() const { return OutputCountField::decode(bit_field_); }
const InstructionOperand* OutputAt(size_t i) const {
- DCHECK(i < OutputCount());
+ DCHECK_LT(i, OutputCount());
return &operands_[i];
}
InstructionOperand* OutputAt(size_t i) {
- DCHECK(i < OutputCount());
+ DCHECK_LT(i, OutputCount());
return &operands_[i];
}
@@ -790,21 +771,21 @@ class V8_EXPORT_PRIVATE Instruction final {
size_t InputCount() const { return InputCountField::decode(bit_field_); }
const InstructionOperand* InputAt(size_t i) const {
- DCHECK(i < InputCount());
+ DCHECK_LT(i, InputCount());
return &operands_[OutputCount() + i];
}
InstructionOperand* InputAt(size_t i) {
- DCHECK(i < InputCount());
+ DCHECK_LT(i, InputCount());
return &operands_[OutputCount() + i];
}
size_t TempCount() const { return TempCountField::decode(bit_field_); }
const InstructionOperand* TempAt(size_t i) const {
- DCHECK(i < TempCount());
+ DCHECK_LT(i, TempCount());
return &operands_[OutputCount() + InputCount() + i];
}
InstructionOperand* TempAt(size_t i) {
- DCHECK(i < TempCount());
+ DCHECK_LT(i, TempCount());
return &operands_[OutputCount() + InputCount() + i];
}
diff --git a/deps/v8/src/compiler/backend/jump-threading.cc b/deps/v8/src/compiler/backend/jump-threading.cc
index dfb917a58c..ee195bf51e 100644
--- a/deps/v8/src/compiler/backend/jump-threading.cc
+++ b/deps/v8/src/compiler/backend/jump-threading.cc
@@ -69,11 +69,11 @@ bool IsBlockWithBranchPoisoning(InstructionSequence* code,
} // namespace
bool JumpThreading::ComputeForwarding(Zone* local_zone,
- ZoneVector<RpoNumber>& result,
+ ZoneVector<RpoNumber>* result,
InstructionSequence* code,
bool frame_at_start) {
ZoneStack<RpoNumber> stack(local_zone);
- JumpThreadingState state = {false, result, stack};
+ JumpThreadingState state = {false, *result, stack};
state.Clear(code->InstructionBlockCount());
// Iterate over the blocks forward, pushing the blocks onto the stack.
@@ -135,15 +135,15 @@ bool JumpThreading::ComputeForwarding(Zone* local_zone,
}
#ifdef DEBUG
- for (RpoNumber num : result) {
+ for (RpoNumber num : *result) {
DCHECK(num.IsValid());
}
#endif
if (FLAG_trace_turbo_jt) {
- for (int i = 0; i < static_cast<int>(result.size()); i++) {
+ for (int i = 0; i < static_cast<int>(result->size()); i++) {
TRACE("B%d ", i);
- int to = result[i].ToInt();
+ int to = (*result)[i].ToInt();
if (i != to) {
TRACE("-> B%d\n", to);
} else {
@@ -156,7 +156,7 @@ bool JumpThreading::ComputeForwarding(Zone* local_zone,
}
void JumpThreading::ApplyForwarding(Zone* local_zone,
- ZoneVector<RpoNumber>& result,
+ ZoneVector<RpoNumber> const& result,
InstructionSequence* code) {
if (!FLAG_turbo_jt) return;
diff --git a/deps/v8/src/compiler/backend/jump-threading.h b/deps/v8/src/compiler/backend/jump-threading.h
index ce60ebcb2e..ce9e394924 100644
--- a/deps/v8/src/compiler/backend/jump-threading.h
+++ b/deps/v8/src/compiler/backend/jump-threading.h
@@ -17,17 +17,14 @@ class V8_EXPORT_PRIVATE JumpThreading {
public:
// Compute the forwarding map of basic blocks to their ultimate destination.
// Returns {true} if there is at least one block that is forwarded.
- static bool ComputeForwarding(
- Zone* local_zone,
- ZoneVector<RpoNumber>& result, // NOLINT(runtime/references)
- InstructionSequence* code, bool frame_at_start);
+ static bool ComputeForwarding(Zone* local_zone, ZoneVector<RpoNumber>* result,
+ InstructionSequence* code, bool frame_at_start);
// Rewrite the instructions to forward jumps and branches.
// May also negate some branches.
- static void ApplyForwarding(
- Zone* local_zone,
- ZoneVector<RpoNumber>& forwarding, // NOLINT(runtime/references)
- InstructionSequence* code);
+ static void ApplyForwarding(Zone* local_zone,
+ ZoneVector<RpoNumber> const& forwarding,
+ InstructionSequence* code);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
index 239075392a..ee23402e69 100644
--- a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
@@ -265,34 +265,33 @@ Condition FlagsConditionToConditionTst(FlagsCondition condition) {
UNREACHABLE();
}
-FPUCondition FlagsConditionToConditionCmpFPU(
- bool& predicate, // NOLINT(runtime/references)
- FlagsCondition condition) {
+FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
+ FlagsCondition condition) {
switch (condition) {
case kEqual:
- predicate = true;
+ *predicate = true;
return EQ;
case kNotEqual:
- predicate = false;
+ *predicate = false;
return EQ;
case kUnsignedLessThan:
- predicate = true;
+ *predicate = true;
return OLT;
case kUnsignedGreaterThanOrEqual:
- predicate = false;
+ *predicate = false;
return OLT;
case kUnsignedLessThanOrEqual:
- predicate = true;
+ *predicate = true;
return OLE;
case kUnsignedGreaterThan:
- predicate = false;
+ *predicate = false;
return OLE;
case kUnorderedEqual:
case kUnorderedNotEqual:
- predicate = true;
+ *predicate = true;
break;
default:
- predicate = true;
+ *predicate = true;
break;
}
UNREACHABLE();
@@ -303,9 +302,9 @@ FPUCondition FlagsConditionToConditionCmpFPU(
<< "\""; \
UNIMPLEMENTED();
-void EmitWordLoadPoisoningIfNeeded(
- CodeGenerator* codegen, InstructionCode opcode, Instruction* instr,
- MipsOperandConverter& i) { // NOLINT(runtime/references)
+void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
+ InstructionCode opcode, Instruction* instr,
+ MipsOperandConverter const& i) {
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessPoisoned) {
@@ -780,12 +779,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
- Label return_location;
- if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
+ Label start_call;
+ bool isWasmCapiFunction =
+ linkage()->GetIncomingDescriptor()->IsWasmCapiFunction();
+ // from start_call to return address.
+ int offset = 40;
+#if V8_HOST_ARCH_MIPS
+ if (__ emit_debug_code()) {
+ offset += 16;
+ }
+#endif
+ if (isWasmCapiFunction) {
// Put the return address in a stack slot.
- __ LoadAddress(kScratchReg, &return_location);
- __ sw(kScratchReg,
- MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
+ __ mov(kScratchReg, ra);
+ __ bind(&start_call);
+ __ nal();
+ __ nop();
+ __ Addu(ra, ra, offset - 8); // 8 = nop + nal
+ __ sw(ra, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
+ __ mov(ra, kScratchReg);
}
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
@@ -794,7 +806,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
- __ bind(&return_location);
+ if (isWasmCapiFunction) {
+ CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call));
+ }
+
RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
@@ -1179,7 +1194,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
FPURegister right = i.InputOrZeroSingleRegister(1);
bool predicate;
FPUCondition cc =
- FlagsConditionToConditionCmpFPU(predicate, instr->flags_condition());
+ FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition());
if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
!__ IsDoubleZeroRegSet()) {
@@ -1239,7 +1254,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
FPURegister right = i.InputOrZeroDoubleRegister(1);
bool predicate;
FPUCondition cc =
- FlagsConditionToConditionCmpFPU(predicate, instr->flags_condition());
+ FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition());
if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
!__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
@@ -2038,6 +2053,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bnegi_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
break;
}
+ case kMipsF32x4Sqrt: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ fsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
case kMipsF32x4RecipApprox: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ frcp_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
@@ -3026,7 +3046,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
} else if (instr->arch_opcode() == kMipsCmpS ||
instr->arch_opcode() == kMipsCmpD) {
bool predicate;
- FlagsConditionToConditionCmpFPU(predicate, condition);
+ FlagsConditionToConditionCmpFPU(&predicate, condition);
if (predicate) {
__ BranchTrueF(tlabel);
} else {
@@ -3116,7 +3136,7 @@ void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
case kMipsCmpS:
case kMipsCmpD: {
bool predicate;
- FlagsConditionToConditionCmpFPU(predicate, condition);
+ FlagsConditionToConditionCmpFPU(&predicate, condition);
if (predicate) {
__ LoadZeroIfFPUCondition(kSpeculationPoisonRegister);
} else {
@@ -3314,7 +3334,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
__ Move(kDoubleRegZero, 0.0);
}
bool predicate;
- FlagsConditionToConditionCmpFPU(predicate, condition);
+ FlagsConditionToConditionCmpFPU(&predicate, condition);
if (!IsMipsArchVariant(kMips32r6)) {
__ li(result, Operand(1));
if (predicate) {
diff --git a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
index e8020d9e89..af0774f468 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
@@ -159,6 +159,7 @@ namespace compiler {
V(MipsI32x4MinU) \
V(MipsF32x4Abs) \
V(MipsF32x4Neg) \
+ V(MipsF32x4Sqrt) \
V(MipsF32x4RecipApprox) \
V(MipsF32x4RecipSqrtApprox) \
V(MipsF32x4Add) \
diff --git a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
index 4e6aef52f4..ba17ad2581 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
@@ -54,6 +54,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsF32x4Div:
case kMipsF32x4Ne:
case kMipsF32x4Neg:
+ case kMipsF32x4Sqrt:
case kMipsF32x4RecipApprox:
case kMipsF32x4RecipSqrtApprox:
case kMipsF32x4ReplaceLane:
diff --git a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
index bb47262c6c..7ee5c7c2c7 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/base/adapters.h"
#include "src/base/bits.h"
#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
@@ -781,6 +780,10 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
g.UseRegister(node->InputAt(0)));
}
+void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
+ UNREACHABLE();
+}
+
void InstructionSelector::VisitWord32Ctz(Node* node) {
MipsOperandGenerator g(this);
Emit(kMipsCtz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
@@ -2015,6 +2018,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(F32x4UConvertI32x4, kMipsF32x4UConvertI32x4) \
V(F32x4Abs, kMipsF32x4Abs) \
V(F32x4Neg, kMipsF32x4Neg) \
+ V(F32x4Sqrt, kMipsF32x4Sqrt) \
V(F32x4RecipApprox, kMipsF32x4RecipApprox) \
V(F32x4RecipSqrtApprox, kMipsF32x4RecipSqrtApprox) \
V(I32x4SConvertF32x4, kMipsI32x4SConvertF32x4) \
diff --git a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
index 5682bed71a..9cec463e87 100644
--- a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
@@ -278,42 +278,41 @@ Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
UNREACHABLE();
}
-FPUCondition FlagsConditionToConditionCmpFPU(
- bool& predicate, // NOLINT(runtime/references)
- FlagsCondition condition) {
+FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
+ FlagsCondition condition) {
switch (condition) {
case kEqual:
- predicate = true;
+ *predicate = true;
return EQ;
case kNotEqual:
- predicate = false;
+ *predicate = false;
return EQ;
case kUnsignedLessThan:
- predicate = true;
+ *predicate = true;
return OLT;
case kUnsignedGreaterThanOrEqual:
- predicate = false;
+ *predicate = false;
return OLT;
case kUnsignedLessThanOrEqual:
- predicate = true;
+ *predicate = true;
return OLE;
case kUnsignedGreaterThan:
- predicate = false;
+ *predicate = false;
return OLE;
case kUnorderedEqual:
case kUnorderedNotEqual:
- predicate = true;
+ *predicate = true;
break;
default:
- predicate = true;
+ *predicate = true;
break;
}
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(
- CodeGenerator* codegen, InstructionCode opcode, Instruction* instr,
- MipsOperandConverter& i) { // NOLINT(runtime/references)
+void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
+ InstructionCode opcode, Instruction* instr,
+ MipsOperandConverter const& i) {
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessPoisoned) {
@@ -758,12 +757,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
- Label return_location;
- if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
+ Label start_call;
+ bool isWasmCapiFunction =
+ linkage()->GetIncomingDescriptor()->IsWasmCapiFunction();
+ // from start_call to return address.
+ int offset = 48;
+#if V8_HOST_ARCH_MIPS64
+ if (__ emit_debug_code()) {
+ offset += 16;
+ }
+#endif
+ if (isWasmCapiFunction) {
// Put the return address in a stack slot.
- __ LoadAddress(kScratchReg, &return_location);
- __ sd(kScratchReg,
- MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
+ __ mov(kScratchReg, ra);
+ __ bind(&start_call);
+ __ nal();
+ __ nop();
+ __ Daddu(ra, ra, offset - 8); // 8 = nop + nal
+ __ sd(ra, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
+ __ mov(ra, kScratchReg);
}
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
@@ -772,7 +784,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
- __ bind(&return_location);
+ if (isWasmCapiFunction) {
+ CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call));
+ }
+
RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
@@ -1276,7 +1291,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
FPURegister right = i.InputOrZeroSingleRegister(1);
bool predicate;
FPUCondition cc =
- FlagsConditionToConditionCmpFPU(predicate, instr->flags_condition());
+ FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition());
if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
!__ IsDoubleZeroRegSet()) {
@@ -1339,7 +1354,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
FPURegister right = i.InputOrZeroDoubleRegister(1);
bool predicate;
FPUCondition cc =
- FlagsConditionToConditionCmpFPU(predicate, instr->flags_condition());
+ FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition());
if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
!__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
@@ -2233,6 +2248,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ftrunc_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
+ case kMips64F32x4Sqrt: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ fsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
case kMips64I32x4Neg: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
@@ -3151,7 +3171,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
} else if (instr->arch_opcode() == kMips64CmpS ||
instr->arch_opcode() == kMips64CmpD) {
bool predicate;
- FlagsConditionToConditionCmpFPU(predicate, condition);
+ FlagsConditionToConditionCmpFPU(&predicate, condition);
if (predicate) {
__ BranchTrueF(tlabel);
} else {
@@ -3261,7 +3281,7 @@ void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
case kMips64CmpS:
case kMips64CmpD: {
bool predicate;
- FlagsConditionToConditionCmpFPU(predicate, condition);
+ FlagsConditionToConditionCmpFPU(&predicate, condition);
if (predicate) {
__ LoadZeroIfFPUCondition(kSpeculationPoisonRegister);
} else {
@@ -3470,7 +3490,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
__ Move(kDoubleRegZero, 0.0);
}
bool predicate;
- FlagsConditionToConditionCmpFPU(predicate, condition);
+ FlagsConditionToConditionCmpFPU(&predicate, condition);
if (kArchVariant != kMips64r6) {
__ li(result, Operand(1));
if (predicate) {
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
index edc8924757..bcf3532b57 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
@@ -189,6 +189,7 @@ namespace compiler {
V(Mips64I32x4MinU) \
V(Mips64F32x4Abs) \
V(Mips64F32x4Neg) \
+ V(Mips64F32x4Sqrt) \
V(Mips64F32x4RecipApprox) \
V(Mips64F32x4RecipSqrtApprox) \
V(Mips64F32x4Add) \
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
index 880b424c41..fe2d33d1db 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
@@ -82,6 +82,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64F32x4Div:
case kMips64F32x4Ne:
case kMips64F32x4Neg:
+ case kMips64F32x4Sqrt:
case kMips64F32x4RecipApprox:
case kMips64F32x4RecipSqrtApprox:
case kMips64F32x4ReplaceLane:
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
index 9c717ab1e9..dfc0ff5bad 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/base/adapters.h"
#include "src/base/bits.h"
#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
@@ -823,6 +822,10 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
g.UseRegister(node->InputAt(0)));
}
+void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
+ UNREACHABLE();
+}
+
void InstructionSelector::VisitWord32Ctz(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64Ctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
@@ -2678,6 +2681,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(F32x4UConvertI32x4, kMips64F32x4UConvertI32x4) \
V(F32x4Abs, kMips64F32x4Abs) \
V(F32x4Neg, kMips64F32x4Neg) \
+ V(F32x4Sqrt, kMips64F32x4Sqrt) \
V(F32x4RecipApprox, kMips64F32x4RecipApprox) \
V(F32x4RecipSqrtApprox, kMips64F32x4RecipSqrtApprox) \
V(I32x4SConvertF32x4, kMips64I32x4SConvertF32x4) \
diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
index 5c69bc34a1..dde1804adb 100644
--- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
@@ -263,9 +263,8 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(
- CodeGenerator* codegen, Instruction* instr,
- PPCOperandConverter& i) { // NOLINT(runtime/references)
+void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
+ PPCOperandConverter const& i) {
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(instr->opcode()));
if (access_mode == kMemoryAccessPoisoned) {
@@ -1024,7 +1023,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Label start_call;
bool isWasmCapiFunction =
linkage()->GetIncomingDescriptor()->IsWasmCapiFunction();
+#if defined(_AIX)
+ // AIX/PPC64BE Linux uses a function descriptor
+ // and emits 2 extra Load instrcutions under CallCFunctionHelper.
+ constexpr int offset = 11 * kInstrSize;
+#else
constexpr int offset = 9 * kInstrSize;
+#endif
if (isWasmCapiFunction) {
__ mflr(r0);
__ bind(&start_call);
@@ -1043,9 +1048,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
// TODO(miladfar): In the above block, kScratchReg must be populated with
// the strictly-correct PC, which is the return address at this spot. The
- // offset is set to 36 (9 * kInstrSize) right now, which is counted from
- // where we are binding to the label and ends at this spot. If failed,
- // replace it with the correct offset suggested. More info on f5ab7d3.
+ // offset is set to 36 (9 * kInstrSize) on pLinux and 44 on AIX, which is
+ // counted from where we are binding to the label and ends at this spot.
+ // If failed, replace it with the correct offset suggested. More info on
+ // f5ab7d3.
if (isWasmCapiFunction)
CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call));
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
index ef8490a726..2ffd6495d7 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/base/adapters.h"
+#include "src/base/iterator.h"
#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
@@ -926,6 +926,12 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
g.UseRegister(node->InputAt(0)));
}
+void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
+ // TODO(miladfar): Implement the ppc selector for reversing SIMD bytes.
+ // Check if the input node is a Load and do a Load Reverse at once.
+ UNIMPLEMENTED();
+}
+
void InstructionSelector::VisitInt32Add(Node* node) {
VisitBinop<Int32BinopMatcher>(this, node, kPPC_Add32, kInt16Imm);
}
@@ -2283,6 +2289,8 @@ void InstructionSelector::VisitF32x4Sub(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Mul(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF32x4Sqrt(Node* node) { UNIMPLEMENTED(); }
+
void InstructionSelector::VisitF32x4Div(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); }
diff --git a/deps/v8/src/compiler/backend/register-allocator-verifier.cc b/deps/v8/src/compiler/backend/register-allocator-verifier.cc
index 53349c9c2b..17e0b8ca75 100644
--- a/deps/v8/src/compiler/backend/register-allocator-verifier.cc
+++ b/deps/v8/src/compiler/backend/register-allocator-verifier.cc
@@ -92,7 +92,7 @@ RegisterAllocatorVerifier::RegisterAllocatorVerifier(
void RegisterAllocatorVerifier::VerifyInput(
const OperandConstraint& constraint) {
CHECK_NE(kSameAsFirst, constraint.type_);
- if (constraint.type_ != kImmediate && constraint.type_ != kExplicit) {
+ if (constraint.type_ != kImmediate) {
CHECK_NE(InstructionOperand::kInvalidVirtualRegister,
constraint.virtual_register_);
}
@@ -102,14 +102,12 @@ void RegisterAllocatorVerifier::VerifyTemp(
const OperandConstraint& constraint) {
CHECK_NE(kSameAsFirst, constraint.type_);
CHECK_NE(kImmediate, constraint.type_);
- CHECK_NE(kExplicit, constraint.type_);
CHECK_NE(kConstant, constraint.type_);
}
void RegisterAllocatorVerifier::VerifyOutput(
const OperandConstraint& constraint) {
CHECK_NE(kImmediate, constraint.type_);
- CHECK_NE(kExplicit, constraint.type_);
CHECK_NE(InstructionOperand::kInvalidVirtualRegister,
constraint.virtual_register_);
}
@@ -149,8 +147,6 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
constraint->type_ = kConstant;
constraint->value_ = ConstantOperand::cast(op)->virtual_register();
constraint->virtual_register_ = constraint->value_;
- } else if (op->IsExplicit()) {
- constraint->type_ = kExplicit;
} else if (op->IsImmediate()) {
const ImmediateOperand* imm = ImmediateOperand::cast(op);
int value = imm->type() == ImmediateOperand::INLINE ? imm->inline_value()
@@ -235,9 +231,6 @@ void RegisterAllocatorVerifier::CheckConstraint(
case kFPRegister:
CHECK_WITH_MSG(op->IsFPRegister(), caller_info_);
return;
- case kExplicit:
- CHECK_WITH_MSG(op->IsExplicit(), caller_info_);
- return;
case kFixedRegister:
case kRegisterAndSlot:
CHECK_WITH_MSG(op->IsRegister(), caller_info_);
@@ -503,8 +496,7 @@ void RegisterAllocatorVerifier::VerifyGapMoves() {
instr_constraint.operand_constraints_;
size_t count = 0;
for (size_t i = 0; i < instr->InputCount(); ++i, ++count) {
- if (op_constraints[count].type_ == kImmediate ||
- op_constraints[count].type_ == kExplicit) {
+ if (op_constraints[count].type_ == kImmediate) {
continue;
}
int virtual_register = op_constraints[count].virtual_register_;
diff --git a/deps/v8/src/compiler/backend/register-allocator-verifier.h b/deps/v8/src/compiler/backend/register-allocator-verifier.h
index 68e69c0d16..7110c2eb42 100644
--- a/deps/v8/src/compiler/backend/register-allocator-verifier.h
+++ b/deps/v8/src/compiler/backend/register-allocator-verifier.h
@@ -188,7 +188,6 @@ class RegisterAllocatorVerifier final : public ZoneObject {
kRegisterOrSlot,
kRegisterOrSlotFP,
kRegisterOrSlotOrConstant,
- kExplicit,
kSameAsFirst,
kRegisterAndSlot
};
diff --git a/deps/v8/src/compiler/backend/register-allocator.cc b/deps/v8/src/compiler/backend/register-allocator.cc
index 21eef0485c..945554eb32 100644
--- a/deps/v8/src/compiler/backend/register-allocator.cc
+++ b/deps/v8/src/compiler/backend/register-allocator.cc
@@ -6,7 +6,7 @@
#include <iomanip>
-#include "src/base/adapters.h"
+#include "src/base/iterator.h"
#include "src/base/small-vector.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/tick-counter.h"
@@ -317,7 +317,6 @@ UsePositionHintType UsePosition::HintTypeForOperand(
switch (op.kind()) {
case InstructionOperand::CONSTANT:
case InstructionOperand::IMMEDIATE:
- case InstructionOperand::EXPLICIT:
return UsePositionHintType::kNone;
case InstructionOperand::UNALLOCATED:
return UsePositionHintType::kUnresolved;
@@ -797,12 +796,13 @@ LifetimePosition LiveRange::NextEndAfter(LifetimePosition position) const {
return start_search->end();
}
-LifetimePosition LiveRange::NextStartAfter(LifetimePosition position) const {
+LifetimePosition LiveRange::NextStartAfter(LifetimePosition position) {
UseInterval* start_search = FirstSearchIntervalForPosition(position);
while (start_search->start() < position) {
start_search = start_search->next();
}
- return start_search->start();
+ next_start_ = start_search->start();
+ return next_start_;
}
LifetimePosition LiveRange::FirstIntersection(LiveRange* other) const {
@@ -1940,8 +1940,8 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
// Handle fixed input operands of second instruction.
for (size_t i = 0; i < second->InputCount(); i++) {
InstructionOperand* input = second->InputAt(i);
- if (input->IsImmediate() || input->IsExplicit()) {
- continue; // Ignore immediates and explicitly reserved registers.
+ if (input->IsImmediate()) {
+ continue; // Ignore immediates.
}
UnallocatedOperand* cur_input = UnallocatedOperand::cast(input);
if (cur_input->HasFixedPolicy()) {
@@ -2323,8 +2323,8 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
for (size_t i = 0; i < instr->InputCount(); i++) {
InstructionOperand* input = instr->InputAt(i);
- if (input->IsImmediate() || input->IsExplicit()) {
- continue; // Ignore immediates and explicitly reserved registers.
+ if (input->IsImmediate()) {
+ continue; // Ignore immediates.
}
LifetimePosition use_pos;
if (input->IsUnallocated() &&
@@ -2504,10 +2504,10 @@ void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block,
predecessor_hint_preference |= kNotDeferredBlockPreference;
}
- // - Prefer hints from allocated (or explicit) operands.
+ // - Prefer hints from allocated operands.
//
- // Already-allocated or explicit operands are typically assigned using
- // the parallel moves on the last instruction. For example:
+ // Already-allocated operands are typically assigned using the parallel
+ // moves on the last instruction. For example:
//
// gap (v101 = [x0|R|w32]) (v100 = v101)
// ArchJmp
@@ -2515,7 +2515,7 @@ void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block,
// phi: v100 = v101 v102
//
// We have already found the END move, so look for a matching START move
- // from an allocated (or explicit) operand.
+ // from an allocated operand.
//
// Note that we cannot simply look up data()->live_ranges()[vreg] here
// because the live ranges are still being built when this function is
@@ -2527,7 +2527,7 @@ void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block,
for (MoveOperands* move : *moves) {
InstructionOperand& to = move->destination();
if (predecessor_hint->Equals(to)) {
- if (move->source().IsAllocated() || move->source().IsExplicit()) {
+ if (move->source().IsAllocated()) {
predecessor_hint_preference |= kMoveIsAllocatedPreference;
}
break;
@@ -3095,11 +3095,11 @@ LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data,
: RegisterAllocator(data, kind),
unhandled_live_ranges_(local_zone),
active_live_ranges_(local_zone),
- inactive_live_ranges_(local_zone),
+ inactive_live_ranges_(num_registers(), InactiveLiveRangeQueue(local_zone),
+ local_zone),
next_active_ranges_change_(LifetimePosition::Invalid()),
next_inactive_ranges_change_(LifetimePosition::Invalid()) {
active_live_ranges().reserve(8);
- inactive_live_ranges().reserve(8);
}
void LinearScanAllocator::MaybeSpillPreviousRanges(LiveRange* begin_range,
@@ -3143,15 +3143,15 @@ void LinearScanAllocator::MaybeUndoPreviousSplit(LiveRange* range) {
}
}
-void LinearScanAllocator::SpillNotLiveRanges(RangeWithRegisterSet& to_be_live,
+void LinearScanAllocator::SpillNotLiveRanges(RangeWithRegisterSet* to_be_live,
LifetimePosition position,
SpillMode spill_mode) {
for (auto it = active_live_ranges().begin();
it != active_live_ranges().end();) {
LiveRange* active_range = *it;
TopLevelLiveRange* toplevel = (*it)->TopLevel();
- auto found = to_be_live.find({toplevel, kUnassignedRegister});
- if (found == to_be_live.end()) {
+ auto found = to_be_live->find({toplevel, kUnassignedRegister});
+ if (found == to_be_live->end()) {
// Is not contained in {to_be_live}, spill it.
// Fixed registers are exempt from this. They might have been
// added from inactive at the block boundary but we know that
@@ -3207,7 +3207,7 @@ void LinearScanAllocator::SpillNotLiveRanges(RangeWithRegisterSet& to_be_live,
} else {
// This range is contained in {to_be_live}, so we can keep it.
int expected_register = (*found).expected_register;
- to_be_live.erase(found);
+ to_be_live->erase(found);
if (expected_register == active_range->assigned_register()) {
// Was life and in correct register, simply pass through.
TRACE("Keeping %d:%d in %s\n", toplevel->vreg(),
@@ -3238,31 +3238,22 @@ LiveRange* LinearScanAllocator::AssignRegisterOnReload(LiveRange* range,
// give reloading registers pecedence. That way we would compute the
// intersection for the entire future.
LifetimePosition new_end = range->End();
- for (const auto inactive : inactive_live_ranges()) {
- if (kSimpleFPAliasing || !check_fp_aliasing()) {
- if (inactive->assigned_register() != reg) continue;
- } else {
- bool conflict = inactive->assigned_register() == reg;
- if (!conflict) {
- int alias_base_index = -1;
- int aliases = data()->config()->GetAliases(range->representation(), reg,
- inactive->representation(),
- &alias_base_index);
- DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
- while (aliases-- && !conflict) {
- int aliased_reg = alias_base_index + aliases;
- if (aliased_reg == reg) {
- conflict = true;
- }
- }
- }
- if (!conflict) continue;
+ for (int cur_reg = 0; cur_reg < num_registers(); ++cur_reg) {
+ if ((kSimpleFPAliasing || !check_fp_aliasing()) && cur_reg != reg) {
+ continue;
}
- for (auto interval = inactive->first_interval(); interval != nullptr;
- interval = interval->next()) {
- if (interval->start() > new_end) break;
- if (interval->end() <= range->Start()) continue;
- if (new_end > interval->start()) new_end = interval->start();
+ for (const auto cur_inactive : inactive_live_ranges(cur_reg)) {
+ if (!kSimpleFPAliasing && check_fp_aliasing() &&
+ !data()->config()->AreAliases(cur_inactive->representation(), cur_reg,
+ range->representation(), reg)) {
+ continue;
+ }
+ for (auto interval = cur_inactive->first_interval(); interval != nullptr;
+ interval = interval->next()) {
+ if (interval->start() > new_end) break;
+ if (interval->end() <= range->Start()) continue;
+ if (new_end > interval->start()) new_end = interval->start();
+ }
}
}
if (new_end != range->End()) {
@@ -3275,8 +3266,8 @@ LiveRange* LinearScanAllocator::AssignRegisterOnReload(LiveRange* range,
return range;
}
-void LinearScanAllocator::ReloadLiveRanges(RangeWithRegisterSet& to_be_live,
- LifetimePosition position) {
+void LinearScanAllocator::ReloadLiveRanges(
+ RangeWithRegisterSet const& to_be_live, LifetimePosition position) {
// Assumption: All ranges in {to_be_live} are currently spilled and there are
// no conflicting registers in the active ranges.
// The former is ensured by SpillNotLiveRanges, the latter is by construction
@@ -3558,11 +3549,17 @@ void LinearScanAllocator::UpdateDeferredFixedRanges(SpillMode spill_mode,
Min(updated->End(), next_active_ranges_change_);
});
}
- for (auto inactive : inactive_live_ranges()) {
- split_conflicting(range, inactive, [this](LiveRange* updated) {
- next_inactive_ranges_change_ =
- Min(updated->End(), next_inactive_ranges_change_);
- });
+ for (int reg = 0; reg < num_registers(); ++reg) {
+ if ((kSimpleFPAliasing || !check_fp_aliasing()) &&
+ reg != range->assigned_register()) {
+ continue;
+ }
+ for (auto inactive : inactive_live_ranges(reg)) {
+ split_conflicting(range, inactive, [this](LiveRange* updated) {
+ next_inactive_ranges_change_ =
+ Min(updated->End(), next_inactive_ranges_change_);
+ });
+ }
}
};
if (mode() == GENERAL_REGISTERS) {
@@ -3600,12 +3597,14 @@ void LinearScanAllocator::UpdateDeferredFixedRanges(SpillMode spill_mode,
}
} else {
// Remove all ranges.
- for (auto it = inactive_live_ranges().begin();
- it != inactive_live_ranges().end();) {
- if ((*it)->TopLevel()->IsDeferredFixed()) {
- it = inactive_live_ranges().erase(it);
- } else {
- ++it;
+ for (int reg = 0; reg < num_registers(); ++reg) {
+ for (auto it = inactive_live_ranges(reg).begin();
+ it != inactive_live_ranges(reg).end();) {
+ if ((*it)->TopLevel()->IsDeferredFixed()) {
+ it = inactive_live_ranges(reg).erase(it);
+ } else {
+ ++it;
+ }
}
}
}
@@ -3636,7 +3635,9 @@ bool LinearScanAllocator::HasNonDeferredPredecessor(InstructionBlock* block) {
void LinearScanAllocator::AllocateRegisters() {
DCHECK(unhandled_live_ranges().empty());
DCHECK(active_live_ranges().empty());
- DCHECK(inactive_live_ranges().empty());
+ for (int reg = 0; reg < num_registers(); ++reg) {
+ DCHECK(inactive_live_ranges(reg).empty());
+ }
SplitAndSpillRangesDefinedByMemoryOperand();
data()->ResetSpillState();
@@ -3853,7 +3854,7 @@ void LinearScanAllocator::AllocateRegisters() {
}
if (!no_change_required) {
- SpillNotLiveRanges(to_be_live, next_block_boundary, spill_mode);
+ SpillNotLiveRanges(&to_be_live, next_block_boundary, spill_mode);
ReloadLiveRanges(to_be_live, next_block_boundary);
}
@@ -3941,9 +3942,10 @@ void LinearScanAllocator::AddToActive(LiveRange* range) {
void LinearScanAllocator::AddToInactive(LiveRange* range) {
TRACE("Add live range %d:%d to inactive\n", range->TopLevel()->vreg(),
range->relative_id());
- inactive_live_ranges().push_back(range);
next_inactive_ranges_change_ = std::min(
next_inactive_ranges_change_, range->NextStartAfter(range->Start()));
+ DCHECK(range->HasRegisterAssigned());
+ inactive_live_ranges(range->assigned_register()).insert(range);
}
void LinearScanAllocator::AddToUnhandled(LiveRange* range) {
@@ -3966,30 +3968,36 @@ ZoneVector<LiveRange*>::iterator LinearScanAllocator::ActiveToHandled(
ZoneVector<LiveRange*>::iterator LinearScanAllocator::ActiveToInactive(
const ZoneVector<LiveRange*>::iterator it, LifetimePosition position) {
LiveRange* range = *it;
- inactive_live_ranges().push_back(range);
TRACE("Moving live range %d:%d from active to inactive\n",
(range)->TopLevel()->vreg(), range->relative_id());
+ LifetimePosition next_active = range->NextStartAfter(position);
next_inactive_ranges_change_ =
- std::min(next_inactive_ranges_change_, range->NextStartAfter(position));
+ std::min(next_inactive_ranges_change_, next_active);
+ DCHECK(range->HasRegisterAssigned());
+ inactive_live_ranges(range->assigned_register()).insert(range);
return active_live_ranges().erase(it);
}
-ZoneVector<LiveRange*>::iterator LinearScanAllocator::InactiveToHandled(
- ZoneVector<LiveRange*>::iterator it) {
+LinearScanAllocator::InactiveLiveRangeQueue::iterator
+LinearScanAllocator::InactiveToHandled(InactiveLiveRangeQueue::iterator it) {
+ LiveRange* range = *it;
TRACE("Moving live range %d:%d from inactive to handled\n",
- (*it)->TopLevel()->vreg(), (*it)->relative_id());
- return inactive_live_ranges().erase(it);
+ range->TopLevel()->vreg(), range->relative_id());
+ int reg = range->assigned_register();
+ return inactive_live_ranges(reg).erase(it);
}
-ZoneVector<LiveRange*>::iterator LinearScanAllocator::InactiveToActive(
- ZoneVector<LiveRange*>::iterator it, LifetimePosition position) {
+LinearScanAllocator::InactiveLiveRangeQueue::iterator
+LinearScanAllocator::InactiveToActive(InactiveLiveRangeQueue::iterator it,
+ LifetimePosition position) {
LiveRange* range = *it;
active_live_ranges().push_back(range);
TRACE("Moving live range %d:%d from inactive to active\n",
range->TopLevel()->vreg(), range->relative_id());
next_active_ranges_change_ =
std::min(next_active_ranges_change_, range->NextEndAfter(position));
- return inactive_live_ranges().erase(it);
+ int reg = range->assigned_register();
+ return inactive_live_ranges(reg).erase(it);
}
void LinearScanAllocator::ForwardStateTo(LifetimePosition position) {
@@ -4012,18 +4020,25 @@ void LinearScanAllocator::ForwardStateTo(LifetimePosition position) {
if (position >= next_inactive_ranges_change_) {
next_inactive_ranges_change_ = LifetimePosition::MaxPosition();
- for (auto it = inactive_live_ranges().begin();
- it != inactive_live_ranges().end();) {
- LiveRange* cur_inactive = *it;
- if (cur_inactive->End() <= position) {
- it = InactiveToHandled(it);
- } else if (cur_inactive->Covers(position)) {
- it = InactiveToActive(it, position);
- } else {
- next_inactive_ranges_change_ =
- std::min(next_inactive_ranges_change_,
- cur_inactive->NextStartAfter(position));
- ++it;
+ for (int reg = 0; reg < num_registers(); ++reg) {
+ ZoneVector<LiveRange*> reorder(data()->allocation_zone());
+ for (auto it = inactive_live_ranges(reg).begin();
+ it != inactive_live_ranges(reg).end();) {
+ LiveRange* cur_inactive = *it;
+ if (cur_inactive->End() <= position) {
+ it = InactiveToHandled(it);
+ } else if (cur_inactive->Covers(position)) {
+ it = InactiveToActive(it, position);
+ } else {
+ next_inactive_ranges_change_ =
+ std::min(next_inactive_ranges_change_,
+ cur_inactive->NextStartAfter(position));
+ it = inactive_live_ranges(reg).erase(it);
+ reorder.push_back(cur_inactive);
+ }
+ }
+ for (LiveRange* range : reorder) {
+ inactive_live_ranges(reg).insert(range);
}
}
}
@@ -4094,31 +4109,34 @@ void LinearScanAllocator::FindFreeRegistersForRange(
}
}
- for (LiveRange* cur_inactive : inactive_live_ranges()) {
- DCHECK(cur_inactive->End() > range->Start());
- int cur_reg = cur_inactive->assigned_register();
- // No need to carry out intersections, when this register won't be
- // interesting to this range anyway.
- // TODO(mtrofin): extend to aliased ranges, too.
- if ((kSimpleFPAliasing || !check_fp_aliasing()) &&
- positions[cur_reg] < range->Start()) {
- continue;
- }
-
- LifetimePosition next_intersection = cur_inactive->FirstIntersection(range);
- if (!next_intersection.IsValid()) continue;
- if (kSimpleFPAliasing || !check_fp_aliasing()) {
- positions[cur_reg] = Min(positions[cur_reg], next_intersection);
- TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
- Min(positions[cur_reg], next_intersection).value());
- } else {
- int alias_base_index = -1;
- int aliases = data()->config()->GetAliases(
- cur_inactive->representation(), cur_reg, rep, &alias_base_index);
- DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
- while (aliases--) {
- int aliased_reg = alias_base_index + aliases;
- positions[aliased_reg] = Min(positions[aliased_reg], next_intersection);
+ for (int cur_reg = 0; cur_reg < num_regs; ++cur_reg) {
+ for (LiveRange* cur_inactive : inactive_live_ranges(cur_reg)) {
+ DCHECK_GT(cur_inactive->End(), range->Start());
+ CHECK_EQ(cur_inactive->assigned_register(), cur_reg);
+ // No need to carry out intersections, when this register won't be
+ // interesting to this range anyway.
+ // TODO(mtrofin): extend to aliased ranges, too.
+ if ((kSimpleFPAliasing || !check_fp_aliasing()) &&
+ positions[cur_reg] <= cur_inactive->NextStart()) {
+ break;
+ }
+ LifetimePosition next_intersection =
+ cur_inactive->FirstIntersection(range);
+ if (!next_intersection.IsValid()) continue;
+ if (kSimpleFPAliasing || !check_fp_aliasing()) {
+ positions[cur_reg] = std::min(positions[cur_reg], next_intersection);
+ TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
+ positions[cur_reg].value());
+ } else {
+ int alias_base_index = -1;
+ int aliases = data()->config()->GetAliases(
+ cur_inactive->representation(), cur_reg, rep, &alias_base_index);
+ DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
+ while (aliases--) {
+ int aliased_reg = alias_base_index + aliases;
+ positions[aliased_reg] =
+ std::min(positions[aliased_reg], next_intersection);
+ }
}
}
}
@@ -4337,46 +4355,46 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current,
}
}
- for (LiveRange* range : inactive_live_ranges()) {
- DCHECK(range->End() > current->Start());
- int cur_reg = range->assigned_register();
- bool is_fixed = range->TopLevel()->IsFixed();
-
- // Don't perform costly intersections if they are guaranteed to not update
- // block_pos or use_pos.
- // TODO(mtrofin): extend to aliased ranges, too.
- if ((kSimpleFPAliasing || !check_fp_aliasing())) {
- if (is_fixed) {
- if (block_pos[cur_reg] < range->Start()) continue;
- } else {
- if (use_pos[cur_reg] < range->Start()) continue;
+ for (int cur_reg = 0; cur_reg < num_registers(); ++cur_reg) {
+ for (LiveRange* range : inactive_live_ranges(cur_reg)) {
+ DCHECK(range->End() > current->Start());
+ DCHECK_EQ(range->assigned_register(), cur_reg);
+ bool is_fixed = range->TopLevel()->IsFixed();
+
+ // Don't perform costly intersections if they are guaranteed to not update
+ // block_pos or use_pos.
+ // TODO(mtrofin): extend to aliased ranges, too.
+ if ((kSimpleFPAliasing || !check_fp_aliasing())) {
+ DCHECK_LE(use_pos[cur_reg], block_pos[cur_reg]);
+ if (block_pos[cur_reg] <= range->NextStart()) break;
+ if (!is_fixed && use_pos[cur_reg] <= range->NextStart()) continue;
}
- }
- LifetimePosition next_intersection = range->FirstIntersection(current);
- if (!next_intersection.IsValid()) continue;
+ LifetimePosition next_intersection = range->FirstIntersection(current);
+ if (!next_intersection.IsValid()) continue;
- if (kSimpleFPAliasing || !check_fp_aliasing()) {
- if (is_fixed) {
- block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
- use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
- } else {
- use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
- }
- } else {
- int alias_base_index = -1;
- int aliases = data()->config()->GetAliases(
- range->representation(), cur_reg, rep, &alias_base_index);
- DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
- while (aliases--) {
- int aliased_reg = alias_base_index + aliases;
+ if (kSimpleFPAliasing || !check_fp_aliasing()) {
if (is_fixed) {
- block_pos[aliased_reg] =
- Min(block_pos[aliased_reg], next_intersection);
- use_pos[aliased_reg] =
- Min(block_pos[aliased_reg], use_pos[aliased_reg]);
+ block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
+ use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
} else {
- use_pos[aliased_reg] = Min(use_pos[aliased_reg], next_intersection);
+ use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
+ }
+ } else {
+ int alias_base_index = -1;
+ int aliases = data()->config()->GetAliases(
+ range->representation(), cur_reg, rep, &alias_base_index);
+ DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
+ while (aliases--) {
+ int aliased_reg = alias_base_index + aliases;
+ if (is_fixed) {
+ block_pos[aliased_reg] =
+ Min(block_pos[aliased_reg], next_intersection);
+ use_pos[aliased_reg] =
+ Min(block_pos[aliased_reg], use_pos[aliased_reg]);
+ } else {
+ use_pos[aliased_reg] = Min(use_pos[aliased_reg], next_intersection);
+ }
}
}
}
@@ -4490,40 +4508,38 @@ void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current,
it = ActiveToHandled(it);
}
- for (auto it = inactive_live_ranges().begin();
- it != inactive_live_ranges().end();) {
- LiveRange* range = *it;
- DCHECK(range->End() > current->Start());
- if (range->TopLevel()->IsFixed()) {
- ++it;
- continue;
- }
+ for (int cur_reg = 0; cur_reg < num_registers(); ++cur_reg) {
if (kSimpleFPAliasing || !check_fp_aliasing()) {
- if (range->assigned_register() != reg) {
+ if (cur_reg != reg) continue;
+ }
+ for (auto it = inactive_live_ranges(cur_reg).begin();
+ it != inactive_live_ranges(cur_reg).end();) {
+ LiveRange* range = *it;
+ if (!kSimpleFPAliasing && check_fp_aliasing() &&
+ !data()->config()->AreAliases(current->representation(), reg,
+ range->representation(), cur_reg)) {
++it;
continue;
}
- } else {
- if (!data()->config()->AreAliases(current->representation(), reg,
- range->representation(),
- range->assigned_register())) {
+ DCHECK(range->End() > current->Start());
+ if (range->TopLevel()->IsFixed()) {
++it;
continue;
}
- }
- LifetimePosition next_intersection = range->FirstIntersection(current);
- if (next_intersection.IsValid()) {
- UsePosition* next_pos = range->NextRegisterPosition(current->Start());
- if (next_pos == nullptr) {
- SpillAfter(range, split_pos, spill_mode);
+ LifetimePosition next_intersection = range->FirstIntersection(current);
+ if (next_intersection.IsValid()) {
+ UsePosition* next_pos = range->NextRegisterPosition(current->Start());
+ if (next_pos == nullptr) {
+ SpillAfter(range, split_pos, spill_mode);
+ } else {
+ next_intersection = Min(next_intersection, next_pos->pos());
+ SpillBetween(range, split_pos, next_intersection, spill_mode);
+ }
+ it = InactiveToHandled(it);
} else {
- next_intersection = Min(next_intersection, next_pos->pos());
- SpillBetween(range, split_pos, next_intersection, spill_mode);
+ ++it;
}
- it = InactiveToHandled(it);
- } else {
- ++it;
}
}
}
diff --git a/deps/v8/src/compiler/backend/register-allocator.h b/deps/v8/src/compiler/backend/register-allocator.h
index bc7b09d147..17d664e507 100644
--- a/deps/v8/src/compiler/backend/register-allocator.h
+++ b/deps/v8/src/compiler/backend/register-allocator.h
@@ -335,7 +335,11 @@ class RegisterAllocationData final : public ZoneObject {
return result;
}
- void ResetSpillState() { spill_state_.clear(); }
+ void ResetSpillState() {
+ for (auto& state : spill_state_) {
+ state.clear();
+ }
+ }
TickCounter* tick_counter() { return tick_counter_; }
@@ -626,9 +630,10 @@ class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) {
bool ShouldBeAllocatedBefore(const LiveRange* other) const;
bool CanCover(LifetimePosition position) const;
bool Covers(LifetimePosition position) const;
- LifetimePosition NextStartAfter(LifetimePosition position) const;
+ LifetimePosition NextStartAfter(LifetimePosition position);
LifetimePosition NextEndAfter(LifetimePosition position) const;
LifetimePosition FirstIntersection(LiveRange* other) const;
+ LifetimePosition NextStart() const { return next_start_; }
void VerifyChildStructure() const {
VerifyIntervals();
@@ -689,6 +694,8 @@ class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) {
// Cache the last position splintering stopped at.
mutable UsePosition* splitting_pointer_;
LiveRangeBundle* bundle_ = nullptr;
+ // Next interval start, relative to the current linear scan position.
+ LifetimePosition next_start_;
DISALLOW_COPY_AND_ASSIGN(LiveRange);
};
@@ -1298,29 +1305,39 @@ class LinearScanAllocator final : public RegisterAllocator {
LifetimePosition begin_pos,
LiveRange* end_range);
void MaybeUndoPreviousSplit(LiveRange* range);
- void SpillNotLiveRanges(
- RangeWithRegisterSet& to_be_live, // NOLINT(runtime/references)
- LifetimePosition position, SpillMode spill_mode);
+ void SpillNotLiveRanges(RangeWithRegisterSet* to_be_live,
+ LifetimePosition position, SpillMode spill_mode);
LiveRange* AssignRegisterOnReload(LiveRange* range, int reg);
- void ReloadLiveRanges(
- RangeWithRegisterSet& to_be_live, // NOLINT(runtime/references)
- LifetimePosition position);
+ void ReloadLiveRanges(RangeWithRegisterSet const& to_be_live,
+ LifetimePosition position);
void UpdateDeferredFixedRanges(SpillMode spill_mode, InstructionBlock* block);
bool BlockIsDeferredOrImmediatePredecessorIsNotDeferred(
const InstructionBlock* block);
bool HasNonDeferredPredecessor(InstructionBlock* block);
- struct LiveRangeOrdering {
+ struct UnhandledLiveRangeOrdering {
bool operator()(const LiveRange* a, const LiveRange* b) const {
return a->ShouldBeAllocatedBefore(b);
}
};
- using LiveRangeQueue = ZoneMultiset<LiveRange*, LiveRangeOrdering>;
- LiveRangeQueue& unhandled_live_ranges() { return unhandled_live_ranges_; }
+
+ struct InactiveLiveRangeOrdering {
+ bool operator()(const LiveRange* a, const LiveRange* b) const {
+ return a->NextStart() < b->NextStart();
+ }
+ };
+
+ using UnhandledLiveRangeQueue =
+ ZoneMultiset<LiveRange*, UnhandledLiveRangeOrdering>;
+ using InactiveLiveRangeQueue =
+ ZoneMultiset<LiveRange*, InactiveLiveRangeOrdering>;
+ UnhandledLiveRangeQueue& unhandled_live_ranges() {
+ return unhandled_live_ranges_;
+ }
ZoneVector<LiveRange*>& active_live_ranges() { return active_live_ranges_; }
- ZoneVector<LiveRange*>& inactive_live_ranges() {
- return inactive_live_ranges_;
+ InactiveLiveRangeQueue& inactive_live_ranges(int reg) {
+ return inactive_live_ranges_[reg];
}
void SetLiveRangeAssignedRegister(LiveRange* range, int reg);
@@ -1333,10 +1350,10 @@ class LinearScanAllocator final : public RegisterAllocator {
ZoneVector<LiveRange*>::iterator it);
ZoneVector<LiveRange*>::iterator ActiveToInactive(
ZoneVector<LiveRange*>::iterator it, LifetimePosition position);
- ZoneVector<LiveRange*>::iterator InactiveToHandled(
- ZoneVector<LiveRange*>::iterator it);
- ZoneVector<LiveRange*>::iterator InactiveToActive(
- ZoneVector<LiveRange*>::iterator it, LifetimePosition position);
+ InactiveLiveRangeQueue::iterator InactiveToHandled(
+ InactiveLiveRangeQueue::iterator it);
+ InactiveLiveRangeQueue::iterator InactiveToActive(
+ InactiveLiveRangeQueue::iterator it, LifetimePosition position);
void ForwardStateTo(LifetimePosition position);
@@ -1386,9 +1403,9 @@ class LinearScanAllocator final : public RegisterAllocator {
void PrintRangeOverview(std::ostream& os);
- LiveRangeQueue unhandled_live_ranges_;
+ UnhandledLiveRangeQueue unhandled_live_ranges_;
ZoneVector<LiveRange*> active_live_ranges_;
- ZoneVector<LiveRange*> inactive_live_ranges_;
+ ZoneVector<InactiveLiveRangeQueue> inactive_live_ranges_;
// Approximate at what position the set of ranges will change next.
// Used to avoid scanning for updates even if none are present.
diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
index 4c2d862fc4..d0f97eca57 100644
--- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
@@ -1246,9 +1246,8 @@ void AdjustStackPointerForTailCall(
}
}
-void EmitWordLoadPoisoningIfNeeded(
- CodeGenerator* codegen, Instruction* instr,
- S390OperandConverter& i) { // NOLINT(runtime/references)
+void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
+ S390OperandConverter const& i) {
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(instr->opcode()));
if (access_mode == kMemoryAccessPoisoned) {
diff --git a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
index 7f3277fc68..7b002fe6d3 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/base/adapters.h"
#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
@@ -436,68 +435,64 @@ void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode,
#endif
template <class CanCombineWithLoad>
-void GenerateRightOperands(
- InstructionSelector* selector, Node* node, Node* right,
- InstructionCode& opcode, // NOLINT(runtime/references)
- OperandModes& operand_mode, // NOLINT(runtime/references)
- InstructionOperand* inputs,
- size_t& input_count, // NOLINT(runtime/references)
- CanCombineWithLoad canCombineWithLoad) {
+void GenerateRightOperands(InstructionSelector* selector, Node* node,
+ Node* right, InstructionCode* opcode,
+ OperandModes* operand_mode,
+ InstructionOperand* inputs, size_t* input_count,
+ CanCombineWithLoad canCombineWithLoad) {
S390OperandGenerator g(selector);
- if ((operand_mode & OperandMode::kAllowImmediate) &&
- g.CanBeImmediate(right, operand_mode)) {
- inputs[input_count++] = g.UseImmediate(right);
+ if ((*operand_mode & OperandMode::kAllowImmediate) &&
+ g.CanBeImmediate(right, *operand_mode)) {
+ inputs[(*input_count)++] = g.UseImmediate(right);
// Can only be RI or RRI
- operand_mode &= OperandMode::kAllowImmediate;
- } else if (operand_mode & OperandMode::kAllowMemoryOperand) {
+ *operand_mode &= OperandMode::kAllowImmediate;
+ } else if (*operand_mode & OperandMode::kAllowMemoryOperand) {
NodeMatcher mright(right);
if (mright.IsLoad() && selector->CanCover(node, right) &&
canCombineWithLoad(SelectLoadOpcode(right))) {
AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
- right, inputs, &input_count, OpcodeImmMode(opcode));
- opcode |= AddressingModeField::encode(mode);
- operand_mode &= ~OperandMode::kAllowImmediate;
- if (operand_mode & OperandMode::kAllowRM)
- operand_mode &= ~OperandMode::kAllowDistinctOps;
- } else if (operand_mode & OperandMode::kAllowRM) {
- DCHECK(!(operand_mode & OperandMode::kAllowRRM));
- inputs[input_count++] = g.UseAnyExceptImmediate(right);
+ right, inputs, input_count, OpcodeImmMode(*opcode));
+ *opcode |= AddressingModeField::encode(mode);
+ *operand_mode &= ~OperandMode::kAllowImmediate;
+ if (*operand_mode & OperandMode::kAllowRM)
+ *operand_mode &= ~OperandMode::kAllowDistinctOps;
+ } else if (*operand_mode & OperandMode::kAllowRM) {
+ DCHECK(!(*operand_mode & OperandMode::kAllowRRM));
+ inputs[(*input_count)++] = g.UseAnyExceptImmediate(right);
// Can not be Immediate
- operand_mode &=
+ *operand_mode &=
~OperandMode::kAllowImmediate & ~OperandMode::kAllowDistinctOps;
- } else if (operand_mode & OperandMode::kAllowRRM) {
- DCHECK(!(operand_mode & OperandMode::kAllowRM));
- inputs[input_count++] = g.UseAnyExceptImmediate(right);
+ } else if (*operand_mode & OperandMode::kAllowRRM) {
+ DCHECK(!(*operand_mode & OperandMode::kAllowRM));
+ inputs[(*input_count)++] = g.UseAnyExceptImmediate(right);
// Can not be Immediate
- operand_mode &= ~OperandMode::kAllowImmediate;
+ *operand_mode &= ~OperandMode::kAllowImmediate;
} else {
UNREACHABLE();
}
} else {
- inputs[input_count++] = g.UseRegister(right);
+ inputs[(*input_count)++] = g.UseRegister(right);
// Can only be RR or RRR
- operand_mode &= OperandMode::kAllowRRR;
+ *operand_mode &= OperandMode::kAllowRRR;
}
}
template <class CanCombineWithLoad>
-void GenerateBinOpOperands(
- InstructionSelector* selector, Node* node, Node* left, Node* right,
- InstructionCode& opcode, // NOLINT(runtime/references)
- OperandModes& operand_mode, // NOLINT(runtime/references)
- InstructionOperand* inputs,
- size_t& input_count, // NOLINT(runtime/references)
- CanCombineWithLoad canCombineWithLoad) {
+void GenerateBinOpOperands(InstructionSelector* selector, Node* node,
+ Node* left, Node* right, InstructionCode* opcode,
+ OperandModes* operand_mode,
+ InstructionOperand* inputs, size_t* input_count,
+ CanCombineWithLoad canCombineWithLoad) {
S390OperandGenerator g(selector);
// left is always register
InstructionOperand const left_input = g.UseRegister(left);
- inputs[input_count++] = left_input;
+ inputs[(*input_count)++] = left_input;
if (left == right) {
- inputs[input_count++] = left_input;
+ inputs[(*input_count)++] = left_input;
// Can only be RR or RRR
- operand_mode &= OperandMode::kAllowRRR;
+ *operand_mode &= OperandMode::kAllowRRR;
} else {
GenerateRightOperands(selector, node, right, opcode, operand_mode, inputs,
input_count, canCombineWithLoad);
@@ -575,8 +570,8 @@ void VisitUnaryOp(InstructionSelector* selector, Node* node,
size_t output_count = 0;
Node* input = node->InputAt(0);
- GenerateRightOperands(selector, node, input, opcode, operand_mode, inputs,
- input_count, canCombineWithLoad);
+ GenerateRightOperands(selector, node, input, &opcode, &operand_mode, inputs,
+ &input_count, canCombineWithLoad);
bool input_is_word32 = ProduceWord32Result(input);
@@ -631,8 +626,8 @@ void VisitBinOp(InstructionSelector* selector, Node* node,
std::swap(left, right);
}
- GenerateBinOpOperands(selector, node, left, right, opcode, operand_mode,
- inputs, input_count, canCombineWithLoad);
+ GenerateBinOpOperands(selector, node, left, right, &opcode, &operand_mode,
+ inputs, &input_count, canCombineWithLoad);
bool left_is_word32 = ProduceWord32Result(left);
@@ -1175,6 +1170,12 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
g.UseRegister(node->InputAt(0)));
}
+void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
+ // TODO(miladfar): Implement the s390 selector for reversing SIMD bytes.
+ // Check if the input node is a Load and do a Load Reverse at once.
+ UNIMPLEMENTED();
+}
+
template <class Matcher, ArchOpcode neg_opcode>
static inline bool TryMatchNegFromSub(InstructionSelector* selector,
Node* node) {
@@ -2691,6 +2692,8 @@ void InstructionSelector::VisitF32x4Sub(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Mul(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF32x4Sqrt(Node* node) { UNIMPLEMENTED(); }
+
void InstructionSelector::VisitF32x4Div(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); }
diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
index a4f82b153b..44da872f26 100644
--- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
@@ -361,7 +361,6 @@ class WasmProtectedInstructionTrap final : public WasmOutOfLineTrap {
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
- X64OperandConverter& i, // NOLINT(runtime/references)
int pc) {
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
@@ -370,9 +369,9 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
}
}
-void EmitWordLoadPoisoningIfNeeded(
- CodeGenerator* codegen, InstructionCode opcode, Instruction* instr,
- X64OperandConverter& i) { // NOLINT(runtime/references)
+void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
+ InstructionCode opcode, Instruction* instr,
+ X64OperandConverter const& i) {
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessPoisoned) {
@@ -1876,30 +1875,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Subsd(i.InputDoubleRegister(0), kScratchDoubleReg);
break;
case kX64Movsxbl:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movsxbl);
__ AssertZeroExtended(i.OutputRegister());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movzxbl:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movzxbl);
__ AssertZeroExtended(i.OutputRegister());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movsxbq:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movsxbq);
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movzxbq:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movzxbq);
__ AssertZeroExtended(i.OutputRegister());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movb: {
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
@@ -1911,29 +1910,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64Movsxwl:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movsxwl);
__ AssertZeroExtended(i.OutputRegister());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movzxwl:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movzxwl);
__ AssertZeroExtended(i.OutputRegister());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movsxwq:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movsxwq);
break;
case kX64Movzxwq:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movzxwq);
__ AssertZeroExtended(i.OutputRegister());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movw: {
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
@@ -1945,7 +1944,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64Movl:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
if (instr->HasOutput()) {
if (HasAddressingMode(instr)) {
__ movl(i.OutputRegister(), i.MemoryOperand());
@@ -1969,7 +1968,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movsxlq:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movsxlq);
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
@@ -2021,7 +2020,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64Movq:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
if (instr->HasOutput()) {
__ movq(i.OutputRegister(), i.MemoryOperand());
} else {
@@ -2036,7 +2035,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movss:
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
if (instr->HasOutput()) {
__ Movss(i.OutputDoubleRegister(), i.MemoryOperand());
} else {
@@ -2046,7 +2045,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kX64Movsd: {
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
if (instr->HasOutput()) {
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
@@ -2069,7 +2068,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64Movdqu: {
CpuFeatureScope sse_scope(tasm(), SSSE3);
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
if (instr->HasOutput()) {
__ Movdqu(i.OutputSimd128Register(), i.MemoryOperand());
} else {
@@ -2293,6 +2292,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ movq(i.OutputDoubleRegister(), kScratchRegister);
break;
}
+ case kX64F64x2Sqrt: {
+ __ Sqrtpd(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
case kX64F64x2Add: {
ASSEMBLE_SSE_BINOP(addpd);
break;
@@ -2350,22 +2353,48 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64F64x2Eq: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ cmpeqpd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Cmpeqpd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64F64x2Ne: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ cmpneqpd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Cmpneqpd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64F64x2Lt: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ cmpltpd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Cmpltpd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64F64x2Le: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ cmplepd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Cmplepd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64F64x2Qfma: {
+ if (CpuFeatures::IsSupported(FMA3)) {
+ CpuFeatureScope fma3_scope(tasm(), FMA3);
+ __ vfmadd231pd(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(2));
+ } else {
+ XMMRegister tmp = i.TempSimd128Register(0);
+ __ movapd(tmp, i.InputSimd128Register(2));
+ __ mulpd(tmp, i.InputSimd128Register(1));
+ __ addpd(i.OutputSimd128Register(), tmp);
+ }
+ break;
+ }
+ case kX64F64x2Qfms: {
+ if (CpuFeatures::IsSupported(FMA3)) {
+ CpuFeatureScope fma3_scope(tasm(), FMA3);
+ __ vfnmadd231pd(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(2));
+ } else {
+ XMMRegister tmp = i.TempSimd128Register(0);
+ __ movapd(tmp, i.InputSimd128Register(2));
+ __ mulpd(tmp, i.InputSimd128Register(1));
+ __ subpd(i.OutputSimd128Register(), tmp);
+ }
break;
}
// TODO(gdeepti): Get rid of redundant moves for F32x4Splat/Extract below
@@ -2445,6 +2474,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kX64F32x4Sqrt: {
+ __ sqrtps(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
case kX64F32x4RecipApprox: {
__ rcpps(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
@@ -2538,6 +2571,32 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cmpleps(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
+ case kX64F32x4Qfma: {
+ if (CpuFeatures::IsSupported(FMA3)) {
+ CpuFeatureScope fma3_scope(tasm(), FMA3);
+ __ vfmadd231ps(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(2));
+ } else {
+ XMMRegister tmp = i.TempSimd128Register(0);
+ __ movaps(tmp, i.InputSimd128Register(2));
+ __ mulps(tmp, i.InputSimd128Register(1));
+ __ addps(i.OutputSimd128Register(), tmp);
+ }
+ break;
+ }
+ case kX64F32x4Qfms: {
+ if (CpuFeatures::IsSupported(FMA3)) {
+ CpuFeatureScope fma3_scope(tasm(), FMA3);
+ __ vfnmadd231ps(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(2));
+ } else {
+ XMMRegister tmp = i.TempSimd128Register(0);
+ __ movaps(tmp, i.InputSimd128Register(2));
+ __ mulps(tmp, i.InputSimd128Register(1));
+ __ subps(i.OutputSimd128Register(), tmp);
+ }
+ break;
+ }
case kX64I64x2Splat: {
CpuFeatureScope sse_scope(tasm(), SSE3);
XMMRegister dst = i.OutputSimd128Register();
@@ -2577,7 +2636,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I64x2Shl: {
XMMRegister tmp = i.TempSimd128Register(0);
- __ movq(tmp, i.InputRegister(1));
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 8.
+ __ andq(shift, Immediate(63));
+ __ movq(tmp, shift);
__ psllq(i.OutputSimd128Register(), tmp);
break;
}
@@ -2588,6 +2650,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(0);
Register tmp = i.ToRegister(instr->TempAt(0));
+ // Modulo 64 not required as sarq_cl will mask cl to 6 bits.
// lower quadword
__ pextrq(tmp, src, 0x0);
@@ -2640,15 +2703,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (CpuFeatures::IsSupported(SSE4_2)) {
CpuFeatureScope sse_scope_4_2(tasm(), SSE4_2);
XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(1);
+ XMMRegister src0 = i.InputSimd128Register(0);
+ XMMRegister src1 = i.InputSimd128Register(1);
XMMRegister tmp = i.TempSimd128Register(0);
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- DCHECK_EQ(src, xmm0);
+ DCHECK_EQ(tmp, xmm0);
- __ movaps(tmp, src);
- __ pcmpgtq(src, dst);
- __ blendvpd(tmp, dst); // implicit use of xmm0 as mask
- __ movaps(dst, tmp);
+ __ movaps(tmp, src1);
+ __ pcmpgtq(tmp, src0);
+ __ movaps(dst, src1);
+ __ blendvpd(dst, src0); // implicit use of xmm0 as mask
} else {
CpuFeatureScope sse_scope_4_1(tasm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
@@ -2689,11 +2752,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister src = i.InputSimd128Register(1);
XMMRegister tmp = i.TempSimd128Register(0);
DCHECK_EQ(dst, i.InputSimd128Register(0));
- DCHECK_EQ(src, xmm0);
+ DCHECK_EQ(tmp, xmm0);
__ movaps(tmp, src);
- __ pcmpgtq(src, dst);
- __ blendvpd(dst, tmp); // implicit use of xmm0 as mask
+ __ pcmpgtq(tmp, dst);
+ __ blendvpd(dst, src); // implicit use of xmm0 as mask
break;
}
case kX64I64x2Eq: {
@@ -2732,7 +2795,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I64x2ShrU: {
XMMRegister tmp = i.TempSimd128Register(0);
- __ movq(tmp, i.InputRegister(1));
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 64.
+ __ andq(shift, Immediate(63));
+ __ movq(tmp, shift);
__ psrlq(i.OutputSimd128Register(), tmp);
break;
}
@@ -2740,24 +2806,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CpuFeatureScope sse_scope_4_2(tasm(), SSE4_2);
CpuFeatureScope sse_scope_4_1(tasm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(1);
- XMMRegister src_tmp = i.TempSimd128Register(0);
- XMMRegister dst_tmp = i.TempSimd128Register(1);
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- DCHECK_EQ(src, xmm0);
+ XMMRegister src0 = i.InputSimd128Register(0);
+ XMMRegister src1 = i.InputSimd128Register(1);
+ XMMRegister tmp0 = i.TempSimd128Register(0);
+ XMMRegister tmp1 = i.TempSimd128Register(1);
+ DCHECK_EQ(tmp1, xmm0);
- __ movaps(src_tmp, src);
- __ movaps(dst_tmp, dst);
+ __ movaps(dst, src1);
+ __ movaps(tmp0, src0);
- __ pcmpeqd(src, src);
- __ psllq(src, 63);
+ __ pcmpeqd(tmp1, tmp1);
+ __ psllq(tmp1, 63);
- __ pxor(dst_tmp, src);
- __ pxor(src, src_tmp);
+ __ pxor(tmp0, tmp1);
+ __ pxor(tmp1, dst);
- __ pcmpgtq(src, dst_tmp);
- __ blendvpd(src_tmp, dst); // implicit use of xmm0 as mask
- __ movaps(dst, src_tmp);
+ __ pcmpgtq(tmp1, tmp0);
+ __ blendvpd(dst, src0); // implicit use of xmm0 as mask
break;
}
case kX64I64x2MaxU: {
@@ -2765,22 +2830,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CpuFeatureScope sse_scope_4_1(tasm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(1);
- XMMRegister src_tmp = i.TempSimd128Register(0);
- XMMRegister dst_tmp = i.TempSimd128Register(1);
+ XMMRegister dst_tmp = i.TempSimd128Register(0);
+ XMMRegister tmp = i.TempSimd128Register(1);
DCHECK_EQ(dst, i.InputSimd128Register(0));
- DCHECK_EQ(src, xmm0);
+ DCHECK_EQ(tmp, xmm0);
- __ movaps(src_tmp, src);
__ movaps(dst_tmp, dst);
- __ pcmpeqd(src, src);
- __ psllq(src, 63);
+ __ pcmpeqd(tmp, tmp);
+ __ psllq(tmp, 63);
- __ pxor(dst_tmp, src);
- __ pxor(src, src_tmp);
+ __ pxor(dst_tmp, tmp);
+ __ pxor(tmp, src);
- __ pcmpgtq(src, dst_tmp);
- __ blendvpd(dst, src_tmp); // implicit use of xmm0 as mask
+ __ pcmpgtq(tmp, dst_tmp);
+ __ blendvpd(dst, src); // implicit use of xmm0 as mask
break;
}
case kX64I64x2GtU: {
@@ -2820,11 +2884,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64I32x4Splat: {
XMMRegister dst = i.OutputSimd128Register();
if (HasRegisterInput(instr, 0)) {
- __ movd(dst, i.InputRegister(0));
+ __ Movd(dst, i.InputRegister(0));
} else {
- __ movd(dst, i.InputOperand(0));
+ __ Movd(dst, i.InputOperand(0));
}
- __ pshufd(dst, dst, 0x0);
+ __ Pshufd(dst, dst, 0x0);
break;
}
case kX64I32x4ExtractLane: {
@@ -2878,28 +2942,34 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(0);
if (dst == src) {
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psignd(dst, kScratchDoubleReg);
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psignd(dst, kScratchDoubleReg);
} else {
- __ pxor(dst, dst);
- __ psubd(dst, src);
+ __ Pxor(dst, dst);
+ __ Psubd(dst, src);
}
break;
}
case kX64I32x4Shl: {
XMMRegister tmp = i.TempSimd128Register(0);
- __ movq(tmp, i.InputRegister(1));
- __ pslld(i.OutputSimd128Register(), tmp);
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 32.
+ __ andq(shift, Immediate(31));
+ __ Movq(tmp, shift);
+ __ Pslld(i.OutputSimd128Register(), tmp);
break;
}
case kX64I32x4ShrS: {
XMMRegister tmp = i.TempSimd128Register(0);
- __ movq(tmp, i.InputRegister(1));
- __ psrad(i.OutputSimd128Register(), tmp);
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 32.
+ __ andq(shift, Immediate(31));
+ __ Movq(tmp, shift);
+ __ Psrad(i.OutputSimd128Register(), tmp);
break;
}
case kX64I32x4Add: {
- __ paddd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Paddd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I32x4AddHoriz: {
@@ -2908,45 +2978,45 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I32x4Sub: {
- __ psubd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Psubd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I32x4Mul: {
CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pmulld(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pmulld(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I32x4MinS: {
CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pminsd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pminsd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I32x4MaxS: {
CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pmaxsd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pmaxsd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I32x4Eq: {
- __ pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I32x4Ne: {
XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(1));
- __ pcmpeqd(tmp, tmp);
- __ pxor(i.OutputSimd128Register(), tmp);
+ __ Pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pcmpeqd(tmp, tmp);
+ __ Pxor(i.OutputSimd128Register(), tmp);
break;
}
case kX64I32x4GtS: {
- __ pcmpgtd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pcmpgtd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I32x4GeS: {
CpuFeatureScope sse_scope(tasm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(1);
- __ pminsd(dst, src);
- __ pcmpeqd(dst, src);
+ __ Pminsd(dst, src);
+ __ Pcmpeqd(dst, src);
break;
}
case kX64I32x4UConvertF32x4: {
@@ -2992,18 +3062,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I32x4ShrU: {
XMMRegister tmp = i.TempSimd128Register(0);
- __ movq(tmp, i.InputRegister(1));
- __ psrld(i.OutputSimd128Register(), tmp);
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 32.
+ __ andq(shift, Immediate(31));
+ __ Movq(tmp, shift);
+ __ Psrld(i.OutputSimd128Register(), tmp);
break;
}
case kX64I32x4MinU: {
CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pminud(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pminud(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I32x4MaxU: {
CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pmaxud(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pmaxud(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I32x4GtU: {
@@ -3011,18 +3084,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(1);
XMMRegister tmp = i.TempSimd128Register(0);
- __ pmaxud(dst, src);
- __ pcmpeqd(dst, src);
- __ pcmpeqd(tmp, tmp);
- __ pxor(dst, tmp);
+ __ Pmaxud(dst, src);
+ __ Pcmpeqd(dst, src);
+ __ Pcmpeqd(tmp, tmp);
+ __ Pxor(dst, tmp);
break;
}
case kX64I32x4GeU: {
CpuFeatureScope sse_scope(tasm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(1);
- __ pminud(dst, src);
- __ pcmpeqd(dst, src);
+ __ Pminud(dst, src);
+ __ Pcmpeqd(dst, src);
break;
}
case kX64S128Zero: {
@@ -3044,17 +3117,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64I16x8ExtractLane: {
CpuFeatureScope sse_scope(tasm(), SSE4_1);
Register dst = i.OutputRegister();
- __ pextrw(dst, i.InputSimd128Register(0), i.InputInt8(1));
- __ movsxwl(dst, dst);
+ __ Pextrw(dst, i.InputSimd128Register(0), i.InputInt8(1));
break;
}
case kX64I16x8ReplaceLane: {
CpuFeatureScope sse_scope(tasm(), SSE4_1);
if (HasRegisterInput(instr, 2)) {
- __ pinsrw(i.OutputSimd128Register(), i.InputRegister(2),
+ __ Pinsrw(i.OutputSimd128Register(), i.InputRegister(2),
i.InputInt8(1));
} else {
- __ pinsrw(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
+ __ Pinsrw(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
}
break;
}
@@ -3085,13 +3157,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I16x8Shl: {
XMMRegister tmp = i.TempSimd128Register(0);
- __ movq(tmp, i.InputRegister(1));
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 16.
+ __ andq(shift, Immediate(15));
+ __ movq(tmp, shift);
__ psllw(i.OutputSimd128Register(), tmp);
break;
}
case kX64I16x8ShrS: {
XMMRegister tmp = i.TempSimd128Register(0);
- __ movq(tmp, i.InputRegister(1));
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 16.
+ __ andq(shift, Immediate(15));
+ __ movq(tmp, shift);
__ psraw(i.OutputSimd128Register(), tmp);
break;
}
@@ -3173,7 +3251,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I16x8ShrU: {
XMMRegister tmp = i.TempSimd128Register(0);
- __ movq(tmp, i.InputRegister(1));
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 16.
+ __ andq(shift, Immediate(15));
+ __ movq(tmp, shift);
__ psrlw(i.OutputSimd128Register(), tmp);
break;
}
@@ -3230,28 +3311,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CpuFeatureScope sse_scope(tasm(), SSSE3);
XMMRegister dst = i.OutputSimd128Register();
if (HasRegisterInput(instr, 0)) {
- __ movd(dst, i.InputRegister(0));
+ __ Movd(dst, i.InputRegister(0));
} else {
- __ movd(dst, i.InputOperand(0));
+ __ Movd(dst, i.InputOperand(0));
}
- __ xorps(kScratchDoubleReg, kScratchDoubleReg);
- __ pshufb(dst, kScratchDoubleReg);
+ __ Xorps(kScratchDoubleReg, kScratchDoubleReg);
+ __ Pshufb(dst, kScratchDoubleReg);
break;
}
case kX64I8x16ExtractLane: {
CpuFeatureScope sse_scope(tasm(), SSE4_1);
Register dst = i.OutputRegister();
- __ pextrb(dst, i.InputSimd128Register(0), i.InputInt8(1));
- __ movsxbl(dst, dst);
+ __ Pextrb(dst, i.InputSimd128Register(0), i.InputInt8(1));
break;
}
case kX64I8x16ReplaceLane: {
CpuFeatureScope sse_scope(tasm(), SSE4_1);
if (HasRegisterInput(instr, 2)) {
- __ pinsrb(i.OutputSimd128Register(), i.InputRegister(2),
+ __ Pinsrb(i.OutputSimd128Register(), i.InputRegister(2),
i.InputInt8(1));
} else {
- __ pinsrb(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
+ __ Pinsrb(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
}
break;
}
@@ -3279,15 +3359,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// Temp registers for shift mask andadditional moves to XMM registers.
Register tmp = i.ToRegister(instr->TempAt(0));
XMMRegister tmp_simd = i.TempSimd128Register(1);
+ Register shift = i.InputRegister(1);
// Mask off the unwanted bits before word-shifting.
__ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
- __ movq(tmp, i.InputRegister(1));
+ // Take shift value modulo 8.
+ __ andq(shift, Immediate(7));
+ __ movq(tmp, shift);
__ addq(tmp, Immediate(8));
__ movq(tmp_simd, tmp);
__ psrlw(kScratchDoubleReg, tmp_simd);
__ packuswb(kScratchDoubleReg, kScratchDoubleReg);
__ pand(dst, kScratchDoubleReg);
- __ movq(tmp_simd, i.InputRegister(1));
+ __ movq(tmp_simd, shift);
__ psllw(dst, tmp_simd);
break;
}
@@ -3302,6 +3385,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ punpcklbw(dst, dst);
// Prepare shift value
__ movq(tmp, i.InputRegister(1));
+ // Take shift value modulo 8.
+ __ andq(tmp, Immediate(7));
__ addq(tmp, Immediate(8));
__ movq(tmp_simd, tmp);
__ psraw(kScratchDoubleReg, tmp_simd);
@@ -3414,6 +3499,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ punpcklbw(dst, dst);
// Prepare shift value
__ movq(tmp, i.InputRegister(1));
+ // Take shift value modulo 8.
+ __ andq(tmp, Immediate(7));
__ addq(tmp, Immediate(8));
__ movq(tmp_simd, tmp);
__ psrlw(kScratchDoubleReg, tmp_simd);
@@ -3422,7 +3509,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I8x16AddSaturateU: {
- __ paddusb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Paddusb(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I8x16SubSaturateU: {
@@ -3487,10 +3574,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64S128Select: {
// Mask used here is stored in dst.
XMMRegister dst = i.OutputSimd128Register();
- __ movaps(kScratchDoubleReg, i.InputSimd128Register(1));
- __ xorps(kScratchDoubleReg, i.InputSimd128Register(2));
- __ andps(dst, kScratchDoubleReg);
- __ xorps(dst, i.InputSimd128Register(2));
+ __ Movaps(kScratchDoubleReg, i.InputSimd128Register(1));
+ __ Xorps(kScratchDoubleReg, i.InputSimd128Register(2));
+ __ Andps(dst, kScratchDoubleReg);
+ __ Xorps(dst, i.InputSimd128Register(2));
+ break;
+ }
+ case kX64S8x16Swizzle: {
+ CpuFeatureScope sse_scope(tasm(), SSSE3);
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister mask = i.TempSimd128Register(0);
+
+ // Out-of-range indices should return 0, add 112 so that any value > 15
+ // saturates to 128 (top bit set), so pshufb will zero that lane.
+ __ Move(mask, static_cast<uint32_t>(0x70707070));
+ __ Pshufd(mask, mask, 0x0);
+ __ Paddusb(mask, i.InputSimd128Register(1));
+ __ Pshufb(dst, mask);
break;
}
case kX64S8x16Shuffle: {
@@ -3507,10 +3608,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
SetupShuffleMaskOnStack(tasm(), mask);
- __ pshufb(dst, Operand(rsp, 0));
+ __ Pshufb(dst, Operand(rsp, 0));
} else { // two input operands
DCHECK_EQ(6, instr->InputCount());
- ASSEMBLE_SIMD_INSTR(movups, kScratchDoubleReg, 0);
+ ASSEMBLE_SIMD_INSTR(Movups, kScratchDoubleReg, 0);
uint32_t mask[4] = {};
for (int j = 5; j > 1; j--) {
uint32_t lanes = i.InputUint32(j);
@@ -3520,13 +3621,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
}
SetupShuffleMaskOnStack(tasm(), mask);
- __ pshufb(kScratchDoubleReg, Operand(rsp, 0));
+ __ Pshufb(kScratchDoubleReg, Operand(rsp, 0));
uint32_t mask1[4] = {};
if (instr->InputAt(1)->IsSimd128Register()) {
XMMRegister src1 = i.InputSimd128Register(1);
if (src1 != dst) __ movups(dst, src1);
} else {
- __ movups(dst, i.InputOperand(1));
+ __ Movups(dst, i.InputOperand(1));
}
for (int j = 5; j > 1; j--) {
uint32_t lanes = i.InputUint32(j);
@@ -3536,8 +3637,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
}
SetupShuffleMaskOnStack(tasm(), mask1);
- __ pshufb(dst, Operand(rsp, 0));
- __ por(dst, kScratchDoubleReg);
+ __ Pshufb(dst, Operand(rsp, 0));
+ __ Por(dst, kScratchDoubleReg);
}
__ movq(rsp, tmp);
break;
diff --git a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
index 8a0a45a916..e390c6922c 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
@@ -160,6 +160,7 @@ namespace compiler {
V(X64F64x2ReplaceLane) \
V(X64F64x2Abs) \
V(X64F64x2Neg) \
+ V(X64F64x2Sqrt) \
V(X64F64x2Add) \
V(X64F64x2Sub) \
V(X64F64x2Mul) \
@@ -170,6 +171,8 @@ namespace compiler {
V(X64F64x2Ne) \
V(X64F64x2Lt) \
V(X64F64x2Le) \
+ V(X64F64x2Qfma) \
+ V(X64F64x2Qfms) \
V(X64F32x4Splat) \
V(X64F32x4ExtractLane) \
V(X64F32x4ReplaceLane) \
@@ -177,6 +180,7 @@ namespace compiler {
V(X64F32x4UConvertI32x4) \
V(X64F32x4Abs) \
V(X64F32x4Neg) \
+ V(X64F32x4Sqrt) \
V(X64F32x4RecipApprox) \
V(X64F32x4RecipSqrtApprox) \
V(X64F32x4Add) \
@@ -190,6 +194,8 @@ namespace compiler {
V(X64F32x4Ne) \
V(X64F32x4Lt) \
V(X64F32x4Le) \
+ V(X64F32x4Qfma) \
+ V(X64F32x4Qfms) \
V(X64I64x2Splat) \
V(X64I64x2ExtractLane) \
V(X64I64x2ReplaceLane) \
@@ -300,6 +306,7 @@ namespace compiler {
V(X64S128Or) \
V(X64S128Xor) \
V(X64S128Select) \
+ V(X64S8x16Swizzle) \
V(X64S8x16Shuffle) \
V(X64S32x4Swizzle) \
V(X64S32x4Shuffle) \
diff --git a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
index e9fa450c38..28a935fd91 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
@@ -129,6 +129,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64F64x2ReplaceLane:
case kX64F64x2Abs:
case kX64F64x2Neg:
+ case kX64F64x2Sqrt:
case kX64F64x2Add:
case kX64F64x2Sub:
case kX64F64x2Mul:
@@ -139,6 +140,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64F64x2Ne:
case kX64F64x2Lt:
case kX64F64x2Le:
+ case kX64F64x2Qfma:
+ case kX64F64x2Qfms:
case kX64F32x4Splat:
case kX64F32x4ExtractLane:
case kX64F32x4ReplaceLane:
@@ -148,6 +151,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64F32x4RecipSqrtApprox:
case kX64F32x4Abs:
case kX64F32x4Neg:
+ case kX64F32x4Sqrt:
case kX64F32x4Add:
case kX64F32x4AddHoriz:
case kX64F32x4Sub:
@@ -159,6 +163,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64F32x4Ne:
case kX64F32x4Lt:
case kX64F32x4Le:
+ case kX64F32x4Qfma:
+ case kX64F32x4Qfms:
case kX64I64x2Splat:
case kX64I64x2ExtractLane:
case kX64I64x2ReplaceLane:
@@ -275,6 +281,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64S1x4AllTrue:
case kX64S1x8AnyTrue:
case kX64S1x8AllTrue:
+ case kX64S8x16Swizzle:
case kX64S8x16Shuffle:
case kX64S32x4Swizzle:
case kX64S32x4Shuffle:
diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
index 5379074bac..f5d05fdd85 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
@@ -4,7 +4,7 @@
#include <algorithm>
-#include "src/base/adapters.h"
+#include "src/base/iterator.h"
#include "src/base/overflowing-math.h"
#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
@@ -250,9 +250,21 @@ ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
#else
UNREACHABLE();
#endif
+#ifdef V8_COMPRESS_POINTERS
+ case MachineRepresentation::kTaggedSigned:
+ opcode = kX64MovqDecompressTaggedSigned;
+ break;
+ case MachineRepresentation::kTaggedPointer:
+ opcode = kX64MovqDecompressTaggedPointer;
+ break;
+ case MachineRepresentation::kTagged:
+ opcode = kX64MovqDecompressAnyTagged;
+ break;
+#else
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
+#endif
case MachineRepresentation::kWord64:
opcode = kX64Movq;
break;
@@ -288,7 +300,8 @@ ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
#endif
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kTagged:
+ return kX64MovqCompressTagged;
case MachineRepresentation::kWord64:
return kX64Movq;
case MachineRepresentation::kSimd128: // Fall through.
@@ -875,6 +888,10 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
Emit(kX64Bswap32, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)));
}
+void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
+ UNREACHABLE();
+}
+
void InstructionSelector::VisitInt32Add(Node* node) {
X64OperandGenerator g(this);
@@ -1843,17 +1860,15 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
node->op()->HasProperty(Operator::kCommutative));
}
-// Shared routine for 64-bit word comparison operations.
-void VisitWord64Compare(InstructionSelector* selector, Node* node,
- FlagsContinuation* cont) {
- X64OperandGenerator g(selector);
+void VisitWord64EqualImpl(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
if (selector->CanUseRootsRegister()) {
+ X64OperandGenerator g(selector);
const RootsTable& roots_table = selector->isolate()->roots_table();
RootIndex root_index;
HeapObjectBinopMatcher m(node);
if (m.right().HasValue() &&
roots_table.IsRootHandle(m.right().Value(), &root_index)) {
- if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
InstructionCode opcode =
kX64Cmp | AddressingModeField::encode(kMode_Root);
return VisitCompare(
@@ -1861,18 +1876,30 @@ void VisitWord64Compare(InstructionSelector* selector, Node* node,
g.TempImmediate(
TurboAssemblerBase::RootRegisterOffsetForRootIndex(root_index)),
g.UseRegister(m.left().node()), cont);
- } else if (m.left().HasValue() &&
- roots_table.IsRootHandle(m.left().Value(), &root_index)) {
+ }
+ }
+ VisitWordCompare(selector, node, kX64Cmp, cont);
+}
+
+void VisitWord32EqualImpl(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ if (COMPRESS_POINTERS_BOOL && selector->CanUseRootsRegister()) {
+ X64OperandGenerator g(selector);
+ const RootsTable& roots_table = selector->isolate()->roots_table();
+ RootIndex root_index;
+ CompressedHeapObjectBinopMatcher m(node);
+ if (m.right().HasValue() &&
+ roots_table.IsRootHandle(m.right().Value(), &root_index)) {
InstructionCode opcode =
- kX64Cmp | AddressingModeField::encode(kMode_Root);
+ kX64Cmp32 | AddressingModeField::encode(kMode_Root);
return VisitCompare(
selector, opcode,
g.TempImmediate(
TurboAssemblerBase::RootRegisterOffsetForRootIndex(root_index)),
- g.UseRegister(m.right().node()), cont);
+ g.UseRegister(m.left().node()), cont);
}
}
- VisitWordCompare(selector, node, kX64Cmp, cont);
+ VisitWordCompare(selector, node, kX64Cmp32, cont);
}
// Shared routine for comparison with zero.
@@ -2048,7 +2075,7 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
switch (value->opcode()) {
case IrOpcode::kWord32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitWordCompare(this, value, kX64Cmp32, cont);
+ return VisitWord32EqualImpl(this, value, cont);
case IrOpcode::kInt32LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
return VisitWordCompare(this, value, kX64Cmp32, cont);
@@ -2071,7 +2098,7 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kInt64Sub:
- return VisitWord64Compare(this, value, cont);
+ return VisitWordCompare(this, value, kX64Cmp, cont);
case IrOpcode::kWord64And:
return VisitWordCompare(this, value, kX64Test, cont);
default:
@@ -2080,20 +2107,20 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
}
return VisitCompareZero(this, user, value, kX64Cmp, cont);
}
- return VisitWord64Compare(this, value, cont);
+ return VisitWord64EqualImpl(this, value, cont);
}
case IrOpcode::kInt64LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWord64Compare(this, value, cont);
+ return VisitWordCompare(this, value, kX64Cmp, cont);
case IrOpcode::kInt64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWord64Compare(this, value, cont);
+ return VisitWordCompare(this, value, kX64Cmp, cont);
case IrOpcode::kUint64LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWord64Compare(this, value, cont);
+ return VisitWordCompare(this, value, kX64Cmp, cont);
case IrOpcode::kUint64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWord64Compare(this, value, cont);
+ return VisitWordCompare(this, value, kX64Cmp, cont);
case IrOpcode::kFloat32Equal:
cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
return VisitFloat32Compare(this, value, cont);
@@ -2221,7 +2248,7 @@ void InstructionSelector::VisitWord32Equal(Node* const node) {
if (m.right().Is(0)) {
return VisitWordCompareZero(m.node(), m.left().node(), &cont);
}
- VisitWordCompare(this, node, kX64Cmp32, &cont);
+ VisitWord32EqualImpl(this, node, &cont);
}
void InstructionSelector::VisitInt32LessThan(Node* node) {
@@ -2246,7 +2273,7 @@ void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
VisitWordCompare(this, node, kX64Cmp32, &cont);
}
-void InstructionSelector::VisitWord64Equal(Node* const node) {
+void InstructionSelector::VisitWord64Equal(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int64BinopMatcher m(node);
if (m.right().Is(0)) {
@@ -2256,7 +2283,7 @@ void InstructionSelector::VisitWord64Equal(Node* const node) {
if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kInt64Sub:
- return VisitWord64Compare(this, value, &cont);
+ return VisitWordCompare(this, value, kX64Cmp, &cont);
case IrOpcode::kWord64And:
return VisitWordCompare(this, value, kX64Test, &cont);
default:
@@ -2264,7 +2291,7 @@ void InstructionSelector::VisitWord64Equal(Node* const node) {
}
}
}
- VisitWord64Compare(this, node, &cont);
+ VisitWord64EqualImpl(this, node, &cont);
}
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
@@ -2287,24 +2314,24 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
void InstructionSelector::VisitInt64LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
- VisitWord64Compare(this, node, &cont);
+ VisitWordCompare(this, node, kX64Cmp, &cont);
}
void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
- VisitWord64Compare(this, node, &cont);
+ VisitWordCompare(this, node, kX64Cmp, &cont);
}
void InstructionSelector::VisitUint64LessThan(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
- VisitWord64Compare(this, node, &cont);
+ VisitWordCompare(this, node, kX64Cmp, &cont);
}
void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
- VisitWord64Compare(this, node, &cont);
+ VisitWordCompare(this, node, kX64Cmp, &cont);
}
void InstructionSelector::VisitFloat32Equal(Node* node) {
@@ -2685,9 +2712,11 @@ VISIT_ATOMIC_BINOP(Xor)
V(I8x16GtU)
#define SIMD_UNOP_LIST(V) \
+ V(F64x2Sqrt) \
V(F32x4SConvertI32x4) \
V(F32x4Abs) \
V(F32x4Neg) \
+ V(F32x4Sqrt) \
V(F32x4RecipApprox) \
V(F32x4RecipSqrtApprox) \
V(I64x2Neg) \
@@ -2872,6 +2901,27 @@ void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
g.UseRegister(node->InputAt(0)));
}
+#define VISIT_SIMD_QFMOP(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ X64OperandGenerator g(this); \
+ if (CpuFeatures::IsSupported(FMA3)) { \
+ Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), \
+ g.UseRegister(node->InputAt(2))); \
+ } else { \
+ InstructionOperand temps[] = {g.TempSimd128Register()}; \
+ Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
+ g.UseUniqueRegister(node->InputAt(0)), \
+ g.UseUniqueRegister(node->InputAt(1)), \
+ g.UseRegister(node->InputAt(2)), arraysize(temps), temps); \
+ } \
+ }
+VISIT_SIMD_QFMOP(F64x2Qfma)
+VISIT_SIMD_QFMOP(F64x2Qfms)
+VISIT_SIMD_QFMOP(F32x4Qfma)
+VISIT_SIMD_QFMOP(F32x4Qfms)
+#undef VISIT_SIMD_QFMOP
+
void InstructionSelector::VisitI64x2ShrS(Node* node) {
X64OperandGenerator g(this);
InstructionOperand temps[] = {g.TempRegister()};
@@ -2893,10 +2943,10 @@ void InstructionSelector::VisitI64x2Mul(Node* node) {
void InstructionSelector::VisitI64x2MinS(Node* node) {
X64OperandGenerator g(this);
if (this->IsSupported(SSE4_2)) {
- InstructionOperand temps[] = {g.TempSimd128Register()};
- Emit(kX64I64x2MinS, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseFixed(node->InputAt(1), xmm0),
- arraysize(temps), temps);
+ InstructionOperand temps[] = {g.TempFpRegister(xmm0)};
+ Emit(kX64I64x2MinS, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
} else {
InstructionOperand temps[] = {g.TempSimd128Register(), g.TempRegister(),
g.TempRegister()};
@@ -2908,27 +2958,27 @@ void InstructionSelector::VisitI64x2MinS(Node* node) {
void InstructionSelector::VisitI64x2MaxS(Node* node) {
X64OperandGenerator g(this);
- InstructionOperand temps[] = {g.TempSimd128Register()};
+ InstructionOperand temps[] = {g.TempFpRegister(xmm0)};
Emit(kX64I64x2MaxS, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseFixed(node->InputAt(1), xmm0),
+ g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
arraysize(temps), temps);
}
void InstructionSelector::VisitI64x2MinU(Node* node) {
X64OperandGenerator g(this);
InstructionOperand temps[] = {g.TempSimd128Register(),
- g.TempSimd128Register()};
- Emit(kX64I64x2MinU, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseFixed(node->InputAt(1), xmm0),
- arraysize(temps), temps);
+ g.TempFpRegister(xmm0)};
+ Emit(kX64I64x2MinU, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
}
void InstructionSelector::VisitI64x2MaxU(Node* node) {
X64OperandGenerator g(this);
InstructionOperand temps[] = {g.TempSimd128Register(),
- g.TempSimd128Register()};
+ g.TempFpRegister(xmm0)};
Emit(kX64I64x2MaxU, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseFixed(node->InputAt(1), xmm0),
+ g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
arraysize(temps), temps);
}
@@ -3256,6 +3306,14 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
Emit(opcode, 1, &dst, input_count, inputs, temp_count, temps);
}
+void InstructionSelector::VisitS8x16Swizzle(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempSimd128Register()};
+ Emit(kX64S8x16Swizzle, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+ arraysize(temps), temps);
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index b1051be571..17472a305d 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -24,7 +24,7 @@
#include "src/objects/literal-objects-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
-#include "src/objects/template-objects-inl.h"
+#include "src/objects/template-objects.h"
namespace v8 {
namespace internal {
@@ -215,6 +215,9 @@ class BytecodeGraphBuilder {
FeedbackSlot slot);
JSTypeHintLowering::LoweringResult TryBuildSimplifiedConstruct(
const Operator* op, Node* const* args, int arg_count, FeedbackSlot slot);
+ JSTypeHintLowering::LoweringResult TryBuildSimplifiedGetIterator(
+ const Operator* op, Node* receiver, FeedbackSlot load_slot,
+ FeedbackSlot call_slot);
JSTypeHintLowering::LoweringResult TryBuildSimplifiedLoadNamed(
const Operator* op, Node* receiver, FeedbackSlot slot);
JSTypeHintLowering::LoweringResult TryBuildSimplifiedLoadKeyed(
@@ -945,7 +948,7 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
bytecode_array().parameter_count(), bytecode_array().register_count(),
shared_info.object())),
bytecode_iterator_(
- base::make_unique<OffHeapBytecodeArray>(bytecode_array())),
+ std::make_unique<OffHeapBytecodeArray>(bytecode_array())),
bytecode_analysis_(broker_->GetBytecodeAnalysis(
bytecode_array().object(), osr_offset,
flags & BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness,
@@ -971,12 +974,12 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
if (FLAG_concurrent_inlining) {
// With concurrent inlining on, the source position address doesn't change
// because it's been copied from the heap.
- source_position_iterator_ = base::make_unique<SourcePositionTableIterator>(
+ source_position_iterator_ = std::make_unique<SourcePositionTableIterator>(
Vector<const byte>(bytecode_array().source_positions_address(),
bytecode_array().source_positions_size()));
} else {
// Otherwise, we need to access the table through a handle.
- source_position_iterator_ = base::make_unique<SourcePositionTableIterator>(
+ source_position_iterator_ = std::make_unique<SourcePositionTableIterator>(
handle(bytecode_array().object()->SourcePositionTableIfCollected(),
isolate()));
}
@@ -2087,12 +2090,13 @@ void BytecodeGraphBuilder::VisitCloneObject() {
void BytecodeGraphBuilder::VisitGetTemplateObject() {
DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
- FeedbackSlot slot = bytecode_iterator().GetSlotOperand(1);
- ObjectRef description(
+ FeedbackSource source =
+ CreateFeedbackSource(bytecode_iterator().GetIndexOperand(1));
+ TemplateObjectDescriptionRef description(
broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
- JSArrayRef template_object =
- shared_info().GetTemplateObject(description, feedback_vector(), slot);
- environment()->BindAccumulator(jsgraph()->Constant(template_object));
+ Node* template_object = NewNode(javascript()->GetTemplateObject(
+ description.object(), shared_info().object(), source));
+ environment()->BindAccumulator(template_object);
}
Node* const* BytecodeGraphBuilder::GetCallArgumentsFromRegisters(
@@ -3297,19 +3301,21 @@ void BytecodeGraphBuilder::VisitForInStep() {
void BytecodeGraphBuilder::VisitGetIterator() {
PrepareEagerCheckpoint();
- Node* object =
+ Node* receiver =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- FeedbackSource feedback =
+ FeedbackSource load_feedback =
CreateFeedbackSource(bytecode_iterator().GetIndexOperand(1));
- const Operator* op = javascript()->GetIterator(feedback);
+ FeedbackSource call_feedback =
+ CreateFeedbackSource(bytecode_iterator().GetIndexOperand(2));
+ const Operator* op = javascript()->GetIterator(load_feedback, call_feedback);
- JSTypeHintLowering::LoweringResult lowering =
- TryBuildSimplifiedLoadNamed(op, object, feedback.slot);
+ JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedGetIterator(
+ op, receiver, load_feedback.slot, call_feedback.slot);
if (lowering.IsExit()) return;
DCHECK(!lowering.Changed());
- Node* node = NewNode(op, object);
- environment()->BindAccumulator(node, Environment::kAttachFrameState);
+ Node* iterator = NewNode(op, receiver);
+ environment()->BindAccumulator(iterator, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitSuspendGenerator() {
@@ -3776,6 +3782,20 @@ BytecodeGraphBuilder::TryBuildSimplifiedConstruct(const Operator* op,
}
JSTypeHintLowering::LoweringResult
+BytecodeGraphBuilder::TryBuildSimplifiedGetIterator(const Operator* op,
+ Node* receiver,
+ FeedbackSlot load_slot,
+ FeedbackSlot call_slot) {
+ Node* effect = environment()->GetEffectDependency();
+ Node* control = environment()->GetControlDependency();
+ JSTypeHintLowering::LoweringResult early_reduction =
+ type_hint_lowering().ReduceGetIteratorOperation(
+ op, receiver, effect, control, load_slot, call_slot);
+ ApplyEarlyReduction(early_reduction);
+ return early_reduction;
+}
+
+JSTypeHintLowering::LoweringResult
BytecodeGraphBuilder::TryBuildSimplifiedLoadNamed(const Operator* op,
Node* receiver,
FeedbackSlot slot) {
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index 428ba058a7..4c576b771a 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -27,7 +27,7 @@ namespace {
// == x64 ====================================================================
// ===========================================================================
-#ifdef _WIN64
+#ifdef V8_TARGET_OS_WIN
// == x64 windows ============================================================
#define STACK_SHADOW_WORDS 4
#define PARAM_REGISTERS rcx, rdx, r8, r9
@@ -39,12 +39,12 @@ namespace {
(1 << xmm9.code()) | (1 << xmm10.code()) | (1 << xmm11.code()) | \
(1 << xmm12.code()) | (1 << xmm13.code()) | (1 << xmm14.code()) | \
(1 << xmm15.code())
-#else
+#else // V8_TARGET_OS_WIN
// == x64 other ==============================================================
#define PARAM_REGISTERS rdi, rsi, rdx, rcx, r8, r9
#define CALLEE_SAVE_REGISTERS \
rbx.bit() | r12.bit() | r13.bit() | r14.bit() | r15.bit()
-#endif
+#endif // V8_TARGET_OS_WIN
#elif V8_TARGET_ARCH_ARM
// ===========================================================================
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index 4f18011463..5b89e1b663 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -29,6 +29,7 @@ namespace internal {
constexpr MachineType MachineTypeOf<Smi>::value;
constexpr MachineType MachineTypeOf<Object>::value;
+constexpr MachineType MachineTypeOf<MaybeObject>::value;
namespace compiler {
@@ -1349,8 +1350,8 @@ void CodeAssembler::TailCallStubImpl(const CallInterfaceDescriptor& descriptor,
Node* CodeAssembler::CallStubRImpl(StubCallMode call_mode,
const CallInterfaceDescriptor& descriptor,
- size_t result_size, Node* target,
- SloppyTNode<Object> context,
+ size_t result_size, TNode<Object> target,
+ TNode<Object> context,
std::initializer_list<Node*> args) {
DCHECK(call_mode == StubCallMode::kCallCodeObject ||
call_mode == StubCallMode::kCallBuiltinPointer);
@@ -1369,7 +1370,7 @@ Node* CodeAssembler::CallStubRImpl(StubCallMode call_mode,
inputs.data());
}
-Node* CodeAssembler::TailCallStubThenBytecodeDispatchImpl(
+void CodeAssembler::TailCallStubThenBytecodeDispatchImpl(
const CallInterfaceDescriptor& descriptor, Node* target, Node* context,
std::initializer_list<Node*> args) {
constexpr size_t kMaxNumArgs = 6;
@@ -1389,33 +1390,33 @@ Node* CodeAssembler::TailCallStubThenBytecodeDispatchImpl(
for (auto arg : args) inputs.Add(arg);
inputs.Add(context);
- return raw_assembler()->TailCallN(call_descriptor, inputs.size(),
- inputs.data());
+ raw_assembler()->TailCallN(call_descriptor, inputs.size(), inputs.data());
}
template <class... TArgs>
-Node* CodeAssembler::TailCallBytecodeDispatch(
- const CallInterfaceDescriptor& descriptor, Node* target, TArgs... args) {
+void CodeAssembler::TailCallBytecodeDispatch(
+ const CallInterfaceDescriptor& descriptor, TNode<RawPtrT> target,
+ TArgs... args) {
DCHECK_EQ(descriptor.GetParameterCount(), sizeof...(args));
auto call_descriptor = Linkage::GetBytecodeDispatchCallDescriptor(
zone(), descriptor, descriptor.GetStackParameterCount());
Node* nodes[] = {target, args...};
CHECK_EQ(descriptor.GetParameterCount() + 1, arraysize(nodes));
- return raw_assembler()->TailCallN(call_descriptor, arraysize(nodes), nodes);
+ raw_assembler()->TailCallN(call_descriptor, arraysize(nodes), nodes);
}
// Instantiate TailCallBytecodeDispatch() for argument counts used by
// CSA-generated code
-template V8_EXPORT_PRIVATE Node* CodeAssembler::TailCallBytecodeDispatch(
- const CallInterfaceDescriptor& descriptor, Node* target, Node*, Node*,
- Node*, Node*);
-
-TNode<Object> CodeAssembler::TailCallJSCode(TNode<Code> code,
- TNode<Context> context,
- TNode<JSFunction> function,
- TNode<Object> new_target,
- TNode<Int32T> arg_count) {
+template V8_EXPORT_PRIVATE void CodeAssembler::TailCallBytecodeDispatch(
+ const CallInterfaceDescriptor& descriptor, TNode<RawPtrT> target,
+ TNode<Object>, TNode<IntPtrT>, TNode<BytecodeArray>,
+ TNode<ExternalReference>);
+
+void CodeAssembler::TailCallJSCode(TNode<Code> code, TNode<Context> context,
+ TNode<JSFunction> function,
+ TNode<Object> new_target,
+ TNode<Int32T> arg_count) {
JSTrampolineDescriptor descriptor;
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), descriptor, descriptor.GetStackParameterCount(),
@@ -1423,8 +1424,7 @@ TNode<Object> CodeAssembler::TailCallJSCode(TNode<Code> code,
Node* nodes[] = {code, function, new_target, arg_count, context};
CHECK_EQ(descriptor.GetParameterCount() + 2, arraysize(nodes));
- return UncheckedCast<Object>(
- raw_assembler()->TailCallN(call_descriptor, arraysize(nodes), nodes));
+ raw_assembler()->TailCallN(call_descriptor, arraysize(nodes), nodes);
}
Node* CodeAssembler::CallCFunctionN(Signature<MachineType>* signature,
@@ -1914,7 +1914,7 @@ CodeAssemblerScopedExceptionHandler::CodeAssemblerScopedExceptionHandler(
compatibility_label_(label),
exception_(exception) {
if (has_handler_) {
- label_ = base::make_unique<CodeAssemblerExceptionHandlerLabel>(
+ label_ = std::make_unique<CodeAssemblerExceptionHandlerLabel>(
assembler, CodeAssemblerLabel::kDeferred);
assembler_->state()->PushExceptionHandler(label_.get());
}
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index c9adb1601d..036b00b14d 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -17,6 +17,7 @@
#include "src/codegen/code-factory.h"
#include "src/codegen/machine-type.h"
#include "src/codegen/source-position.h"
+#include "src/codegen/tnode.h"
#include "src/heap/heap.h"
#include "src/objects/arguments.h"
#include "src/objects/data-handler.h"
@@ -79,210 +80,6 @@ TORQUE_STRUCT_LIST_GENERATOR(MAKE_FORWARD_DECLARATION, UNUSED)
template <typename T>
class Signature;
-struct UntaggedT {};
-
-struct IntegralT : UntaggedT {};
-
-struct WordT : IntegralT {
- static const MachineRepresentation kMachineRepresentation =
- (kSystemPointerSize == 4) ? MachineRepresentation::kWord32
- : MachineRepresentation::kWord64;
-};
-
-struct RawPtrT : WordT {
- static constexpr MachineType kMachineType = MachineType::Pointer();
-};
-
-template <class To>
-struct RawPtr : RawPtrT {};
-
-struct Word32T : IntegralT {
- static const MachineRepresentation kMachineRepresentation =
- MachineRepresentation::kWord32;
-};
-struct Int32T : Word32T {
- static constexpr MachineType kMachineType = MachineType::Int32();
-};
-struct Uint32T : Word32T {
- static constexpr MachineType kMachineType = MachineType::Uint32();
-};
-struct Int16T : Int32T {
- static constexpr MachineType kMachineType = MachineType::Int16();
-};
-struct Uint16T : Uint32T, Int32T {
- static constexpr MachineType kMachineType = MachineType::Uint16();
-};
-struct Int8T : Int16T {
- static constexpr MachineType kMachineType = MachineType::Int8();
-};
-struct Uint8T : Uint16T, Int16T {
- static constexpr MachineType kMachineType = MachineType::Uint8();
-};
-
-struct Word64T : IntegralT {
- static const MachineRepresentation kMachineRepresentation =
- MachineRepresentation::kWord64;
-};
-struct Int64T : Word64T {
- static constexpr MachineType kMachineType = MachineType::Int64();
-};
-struct Uint64T : Word64T {
- static constexpr MachineType kMachineType = MachineType::Uint64();
-};
-
-struct IntPtrT : WordT {
- static constexpr MachineType kMachineType = MachineType::IntPtr();
-};
-struct UintPtrT : WordT {
- static constexpr MachineType kMachineType = MachineType::UintPtr();
-};
-
-struct Float32T : UntaggedT {
- static const MachineRepresentation kMachineRepresentation =
- MachineRepresentation::kFloat32;
- static constexpr MachineType kMachineType = MachineType::Float32();
-};
-
-struct Float64T : UntaggedT {
- static const MachineRepresentation kMachineRepresentation =
- MachineRepresentation::kFloat64;
- static constexpr MachineType kMachineType = MachineType::Float64();
-};
-
-#ifdef V8_COMPRESS_POINTERS
-using TaggedT = Int32T;
-#else
-using TaggedT = IntPtrT;
-#endif
-
-// Result of a comparison operation.
-struct BoolT : Word32T {};
-
-// Value type of a Turbofan node with two results.
-template <class T1, class T2>
-struct PairT {};
-
-inline constexpr MachineType CommonMachineType(MachineType type1,
- MachineType type2) {
- return (type1 == type2) ? type1
- : ((type1.IsTagged() && type2.IsTagged())
- ? MachineType::AnyTagged()
- : MachineType::None());
-}
-
-template <class Type, class Enable = void>
-struct MachineTypeOf {
- static constexpr MachineType value = Type::kMachineType;
-};
-
-template <class Type, class Enable>
-constexpr MachineType MachineTypeOf<Type, Enable>::value;
-
-template <>
-struct MachineTypeOf<Object> {
- static constexpr MachineType value = MachineType::AnyTagged();
-};
-template <>
-struct MachineTypeOf<MaybeObject> {
- static constexpr MachineType value = MachineType::AnyTagged();
-};
-template <>
-struct MachineTypeOf<Smi> {
- static constexpr MachineType value = MachineType::TaggedSigned();
-};
-template <class HeapObjectSubtype>
-struct MachineTypeOf<HeapObjectSubtype,
- typename std::enable_if<std::is_base_of<
- HeapObject, HeapObjectSubtype>::value>::type> {
- static constexpr MachineType value = MachineType::TaggedPointer();
-};
-
-template <class HeapObjectSubtype>
-constexpr MachineType MachineTypeOf<
- HeapObjectSubtype, typename std::enable_if<std::is_base_of<
- HeapObject, HeapObjectSubtype>::value>::type>::value;
-
-template <class Type, class Enable = void>
-struct MachineRepresentationOf {
- static const MachineRepresentation value = Type::kMachineRepresentation;
-};
-template <class T>
-struct MachineRepresentationOf<
- T, typename std::enable_if<std::is_base_of<Object, T>::value>::type> {
- static const MachineRepresentation value =
- MachineTypeOf<T>::value.representation();
-};
-template <class T>
-struct MachineRepresentationOf<
- T, typename std::enable_if<std::is_base_of<MaybeObject, T>::value>::type> {
- static const MachineRepresentation value =
- MachineTypeOf<T>::value.representation();
-};
-
-template <class T>
-struct is_valid_type_tag {
- static const bool value = std::is_base_of<Object, T>::value ||
- std::is_base_of<UntaggedT, T>::value ||
- std::is_base_of<MaybeObject, T>::value ||
- std::is_same<ExternalReference, T>::value;
- static const bool is_tagged = std::is_base_of<Object, T>::value ||
- std::is_base_of<MaybeObject, T>::value;
-};
-
-template <class T1, class T2>
-struct is_valid_type_tag<PairT<T1, T2>> {
- static const bool value =
- is_valid_type_tag<T1>::value && is_valid_type_tag<T2>::value;
- static const bool is_tagged = false;
-};
-
-template <class T1, class T2>
-struct UnionT;
-
-template <class T1, class T2>
-struct is_valid_type_tag<UnionT<T1, T2>> {
- static const bool is_tagged =
- is_valid_type_tag<T1>::is_tagged && is_valid_type_tag<T2>::is_tagged;
- static const bool value = is_tagged;
-};
-
-template <class T1, class T2>
-struct UnionT {
- static constexpr MachineType kMachineType =
- CommonMachineType(MachineTypeOf<T1>::value, MachineTypeOf<T2>::value);
- static const MachineRepresentation kMachineRepresentation =
- kMachineType.representation();
- static_assert(kMachineRepresentation != MachineRepresentation::kNone,
- "no common representation");
- static_assert(is_valid_type_tag<T1>::is_tagged &&
- is_valid_type_tag<T2>::is_tagged,
- "union types are only possible for tagged values");
-};
-
-using Number = UnionT<Smi, HeapNumber>;
-using Numeric = UnionT<Number, BigInt>;
-
-// A pointer to a builtin function, used by Torque's function pointers.
-using BuiltinPtr = Smi;
-
-class int31_t {
- public:
- int31_t() : value_(0) {}
- int31_t(int value) : value_(value) { // NOLINT(runtime/explicit)
- DCHECK_EQ((value & 0x80000000) != 0, (value & 0x40000000) != 0);
- }
- int31_t& operator=(int value) {
- DCHECK_EQ((value & 0x80000000) != 0, (value & 0x40000000) != 0);
- value_ = value;
- return *this;
- }
- int32_t value() const { return value_; }
- operator int32_t() const { return value_; }
-
- private:
- int32_t value_;
-};
-
#define ENUM_ELEMENT(Name) k##Name,
#define ENUM_STRUCT_ELEMENT(NAME, Name, name) k##Name,
enum class ObjectType {
@@ -334,6 +131,7 @@ class Undetectable;
class UniqueName;
class WasmCapiFunctionData;
class WasmExceptionObject;
+class WasmExceptionPackage;
class WasmExceptionTag;
class WasmExportedFunctionData;
class WasmGlobalObject;
@@ -396,143 +194,6 @@ using CodeAssemblerVariableList = ZoneVector<CodeAssemblerVariable*>;
using CodeAssemblerCallback = std::function<void()>;
-template <class T, class U>
-struct is_subtype {
- static const bool value = std::is_base_of<U, T>::value;
-};
-template <class T1, class T2, class U>
-struct is_subtype<UnionT<T1, T2>, U> {
- static const bool value =
- is_subtype<T1, U>::value && is_subtype<T2, U>::value;
-};
-template <class T, class U1, class U2>
-struct is_subtype<T, UnionT<U1, U2>> {
- static const bool value =
- is_subtype<T, U1>::value || is_subtype<T, U2>::value;
-};
-template <class T1, class T2, class U1, class U2>
-struct is_subtype<UnionT<T1, T2>, UnionT<U1, U2>> {
- static const bool value =
- (is_subtype<T1, U1>::value || is_subtype<T1, U2>::value) &&
- (is_subtype<T2, U1>::value || is_subtype<T2, U2>::value);
-};
-
-template <class T, class U>
-struct types_have_common_values {
- static const bool value = is_subtype<T, U>::value || is_subtype<U, T>::value;
-};
-template <class U>
-struct types_have_common_values<BoolT, U> {
- static const bool value = types_have_common_values<Word32T, U>::value;
-};
-template <class U>
-struct types_have_common_values<Uint32T, U> {
- static const bool value = types_have_common_values<Word32T, U>::value;
-};
-template <class U>
-struct types_have_common_values<Int32T, U> {
- static const bool value = types_have_common_values<Word32T, U>::value;
-};
-template <class U>
-struct types_have_common_values<Uint64T, U> {
- static const bool value = types_have_common_values<Word64T, U>::value;
-};
-template <class U>
-struct types_have_common_values<Int64T, U> {
- static const bool value = types_have_common_values<Word64T, U>::value;
-};
-template <class U>
-struct types_have_common_values<IntPtrT, U> {
- static const bool value = types_have_common_values<WordT, U>::value;
-};
-template <class U>
-struct types_have_common_values<UintPtrT, U> {
- static const bool value = types_have_common_values<WordT, U>::value;
-};
-template <class T1, class T2, class U>
-struct types_have_common_values<UnionT<T1, T2>, U> {
- static const bool value = types_have_common_values<T1, U>::value ||
- types_have_common_values<T2, U>::value;
-};
-
-template <class T, class U1, class U2>
-struct types_have_common_values<T, UnionT<U1, U2>> {
- static const bool value = types_have_common_values<T, U1>::value ||
- types_have_common_values<T, U2>::value;
-};
-template <class T1, class T2, class U1, class U2>
-struct types_have_common_values<UnionT<T1, T2>, UnionT<U1, U2>> {
- static const bool value = types_have_common_values<T1, U1>::value ||
- types_have_common_values<T1, U2>::value ||
- types_have_common_values<T2, U1>::value ||
- types_have_common_values<T2, U2>::value;
-};
-
-template <class T>
-struct types_have_common_values<T, MaybeObject> {
- static const bool value = types_have_common_values<T, Object>::value;
-};
-
-template <class T>
-struct types_have_common_values<MaybeObject, T> {
- static const bool value = types_have_common_values<Object, T>::value;
-};
-
-// TNode<T> is an SSA value with the static type tag T, which is one of the
-// following:
-// - a subclass of internal::Object represents a tagged type
-// - a subclass of internal::UntaggedT represents an untagged type
-// - ExternalReference
-// - PairT<T1, T2> for an operation returning two values, with types T1
-// and T2
-// - UnionT<T1, T2> represents either a value of type T1 or of type T2.
-template <class T>
-class TNode {
- public:
- template <class U,
- typename std::enable_if<is_subtype<U, T>::value, int>::type = 0>
- TNode(const TNode<U>& other) : node_(other) {
- LazyTemplateChecks();
- }
- TNode() : TNode(nullptr) {}
-
- TNode operator=(TNode other) {
- DCHECK_NOT_NULL(other.node_);
- node_ = other.node_;
- return *this;
- }
-
- operator compiler::Node*() const { return node_; }
-
- static TNode UncheckedCast(compiler::Node* node) { return TNode(node); }
-
- protected:
- explicit TNode(compiler::Node* node) : node_(node) { LazyTemplateChecks(); }
-
- private:
- // These checks shouldn't be checked before TNode is actually used.
- void LazyTemplateChecks() {
- static_assert(is_valid_type_tag<T>::value, "invalid type tag");
- }
-
- compiler::Node* node_;
-};
-
-// SloppyTNode<T> is a variant of TNode<T> and allows implicit casts from
-// Node*. It is intended for function arguments as long as some call sites
-// still use untyped Node* arguments.
-// TODO(tebbi): Delete this class once transition is finished.
-template <class T>
-class SloppyTNode : public TNode<T> {
- public:
- SloppyTNode(compiler::Node* node) // NOLINT(runtime/explicit)
- : TNode<T>(node) {}
- template <class U, typename std::enable_if<is_subtype<U, T>::value,
- int>::type = 0>
- SloppyTNode(const TNode<U>& other) // NOLINT(runtime/explicit)
- : TNode<T>(other) {}
-};
-
template <class... Types>
class CodeAssemblerParameterizedLabel;
@@ -627,7 +288,7 @@ TNode<Float64T> Float64Add(TNode<Float64T> a, TNode<Float64T> b);
V(Float64ExtractLowWord32, Uint32T, Float64T) \
V(Float64ExtractHighWord32, Uint32T, Float64T) \
V(BitcastTaggedToWord, IntPtrT, Object) \
- V(BitcastTaggedSignedToWord, IntPtrT, Smi) \
+ V(BitcastTaggedToWordForTagAndSmiBits, IntPtrT, AnyTaggedT) \
V(BitcastMaybeObjectToWord, IntPtrT, MaybeObject) \
V(BitcastWordToTagged, Object, WordT) \
V(BitcastWordToTaggedSigned, Smi, WordT) \
@@ -641,6 +302,7 @@ TNode<Float64T> Float64Add(TNode<Float64T> a, TNode<Float64T> b);
V(ChangeInt32ToInt64, Int64T, Int32T) \
V(ChangeUint32ToFloat64, Float64T, Word32T) \
V(ChangeUint32ToUint64, Uint64T, Word32T) \
+ V(ChangeTaggedToCompressed, TaggedT, AnyTaggedT) \
V(BitcastInt32ToFloat32, Float32T, Word32T) \
V(BitcastFloat32ToInt32, Uint32T, Float32T) \
V(RoundFloat64ToInt32, Int32T, Float64T) \
@@ -1187,8 +849,12 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<RawPtrT> RawPtrAdd(TNode<RawPtrT> left, TNode<IntPtrT> right) {
return ReinterpretCast<RawPtrT>(IntPtrAdd(left, right));
}
- TNode<RawPtrT> RawPtrAdd(TNode<IntPtrT> left, TNode<RawPtrT> right) {
- return ReinterpretCast<RawPtrT>(IntPtrAdd(left, right));
+ TNode<RawPtrT> RawPtrSub(TNode<RawPtrT> left, TNode<IntPtrT> right) {
+ return ReinterpretCast<RawPtrT>(IntPtrSub(left, right));
+ }
+ TNode<IntPtrT> RawPtrSub(TNode<RawPtrT> left, TNode<RawPtrT> right) {
+ return Signed(
+ IntPtrSub(static_cast<Node*>(left), static_cast<Node*>(right)));
}
TNode<WordT> WordShl(SloppyTNode<WordT> value, int shift);
@@ -1243,7 +909,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
template <class Dummy = void>
TNode<IntPtrT> BitcastTaggedToWord(TNode<Smi> node) {
static_assert(sizeof(Dummy) < 0,
- "Should use BitcastTaggedSignedToWord instead.");
+ "Should use BitcastTaggedToWordForTagAndSmiBits instead.");
}
// Changes a double to an inptr_t for pointer arithmetic outside of Smi range.
@@ -1363,26 +1029,26 @@ class V8_EXPORT_PRIVATE CodeAssembler {
void TailCallStub(Callable const& callable, SloppyTNode<Object> context,
TArgs... args) {
TNode<Code> target = HeapConstant(callable.code());
- return TailCallStub(callable.descriptor(), target, context, args...);
+ TailCallStub(callable.descriptor(), target, context, args...);
}
template <class... TArgs>
void TailCallStub(const CallInterfaceDescriptor& descriptor,
SloppyTNode<Code> target, SloppyTNode<Object> context,
TArgs... args) {
- return TailCallStubImpl(descriptor, target, context, {args...});
+ TailCallStubImpl(descriptor, target, context, {args...});
}
template <class... TArgs>
- Node* TailCallBytecodeDispatch(const CallInterfaceDescriptor& descriptor,
- Node* target, TArgs... args);
+ void TailCallBytecodeDispatch(const CallInterfaceDescriptor& descriptor,
+ TNode<RawPtrT> target, TArgs... args);
template <class... TArgs>
- Node* TailCallStubThenBytecodeDispatch(
+ void TailCallStubThenBytecodeDispatch(
const CallInterfaceDescriptor& descriptor, Node* target, Node* context,
TArgs... args) {
- return TailCallStubThenBytecodeDispatchImpl(descriptor, target, context,
- {args...});
+ TailCallStubThenBytecodeDispatchImpl(descriptor, target, context,
+ {args...});
}
// Tailcalls to the given code object with JSCall linkage. The JS arguments
@@ -1392,14 +1058,13 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// Note that no arguments adaption is going on here - all the JavaScript
// arguments are left on the stack unmodified. Therefore, this tail call can
// only be used after arguments adaptation has been performed already.
- TNode<Object> TailCallJSCode(TNode<Code> code, TNode<Context> context,
- TNode<JSFunction> function,
- TNode<Object> new_target,
- TNode<Int32T> arg_count);
+ void TailCallJSCode(TNode<Code> code, TNode<Context> context,
+ TNode<JSFunction> function, TNode<Object> new_target,
+ TNode<Int32T> arg_count);
template <class... TArgs>
- Node* CallJS(Callable const& callable, Node* context, Node* function,
- Node* receiver, TArgs... args) {
+ TNode<Object> CallJS(Callable const& callable, Node* context, Node* function,
+ Node* receiver, TArgs... args) {
int argc = static_cast<int>(sizeof...(args));
TNode<Int32T> arity = Int32Constant(argc);
return CallStub(callable, context, function, arity, receiver, args...);
@@ -1511,15 +1176,14 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<Code> target, TNode<Object> context,
std::initializer_list<Node*> args);
- Node* TailCallStubThenBytecodeDispatchImpl(
+ void TailCallStubThenBytecodeDispatchImpl(
const CallInterfaceDescriptor& descriptor, Node* target, Node* context,
std::initializer_list<Node*> args);
Node* CallStubRImpl(StubCallMode call_mode,
const CallInterfaceDescriptor& descriptor,
- size_t result_size, Node* target,
- SloppyTNode<Object> context,
- std::initializer_list<Node*> args);
+ size_t result_size, TNode<Object> target,
+ TNode<Object> context, std::initializer_list<Node*> args);
// These two don't have definitions and are here only for catching use cases
// where the cast is not necessary.
@@ -1810,7 +1474,7 @@ class V8_EXPORT_PRIVATE CodeAssemblerScopedExceptionHandler {
} // namespace compiler
-#if defined(V8_HOST_ARCH_32_BIT) || defined(V8_COMPRESS_POINTERS)
+#if defined(V8_HOST_ARCH_32_BIT)
#define BINT_IS_SMI
using BInt = Smi;
#elif defined(V8_HOST_ARCH_64_BIT)
diff --git a/deps/v8/src/compiler/compilation-dependencies.cc b/deps/v8/src/compiler/compilation-dependencies.cc
index 592d85440c..33990dfa48 100644
--- a/deps/v8/src/compiler/compilation-dependencies.cc
+++ b/deps/v8/src/compiler/compilation-dependencies.cc
@@ -5,6 +5,7 @@
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/compilation-dependency.h"
+#include "src/execution/protectors.h"
#include "src/handles/handles-inl.h"
#include "src/objects/allocation-site-inl.h"
#include "src/objects/objects-inl.h"
@@ -155,7 +156,7 @@ class FieldRepresentationDependency final : public CompilationDependency {
public:
// TODO(neis): Once the concurrent compiler frontend is always-on, we no
// longer need to explicitly store the representation.
- FieldRepresentationDependency(const MapRef& owner, int descriptor,
+ FieldRepresentationDependency(const MapRef& owner, InternalIndex descriptor,
Representation representation)
: owner_(owner),
descriptor_(descriptor),
@@ -180,7 +181,7 @@ class FieldRepresentationDependency final : public CompilationDependency {
private:
MapRef owner_;
- int descriptor_;
+ InternalIndex descriptor_;
Representation representation_;
};
@@ -188,7 +189,7 @@ class FieldTypeDependency final : public CompilationDependency {
public:
// TODO(neis): Once the concurrent compiler frontend is always-on, we no
// longer need to explicitly store the type.
- FieldTypeDependency(const MapRef& owner, int descriptor,
+ FieldTypeDependency(const MapRef& owner, InternalIndex descriptor,
const ObjectRef& type)
: owner_(owner), descriptor_(descriptor), type_(type) {
DCHECK(owner_.equals(owner_.FindFieldOwner(descriptor_)));
@@ -210,13 +211,13 @@ class FieldTypeDependency final : public CompilationDependency {
private:
MapRef owner_;
- int descriptor_;
+ InternalIndex descriptor_;
ObjectRef type_;
};
class FieldConstnessDependency final : public CompilationDependency {
public:
- FieldConstnessDependency(const MapRef& owner, int descriptor)
+ FieldConstnessDependency(const MapRef& owner, InternalIndex descriptor)
: owner_(owner), descriptor_(descriptor) {
DCHECK(owner_.equals(owner_.FindFieldOwner(descriptor_)));
DCHECK_EQ(PropertyConstness::kConst,
@@ -238,7 +239,7 @@ class FieldConstnessDependency final : public CompilationDependency {
private:
MapRef owner_;
- int descriptor_;
+ InternalIndex descriptor_;
};
class GlobalPropertyDependency final : public CompilationDependency {
@@ -282,12 +283,12 @@ class GlobalPropertyDependency final : public CompilationDependency {
class ProtectorDependency final : public CompilationDependency {
public:
explicit ProtectorDependency(const PropertyCellRef& cell) : cell_(cell) {
- DCHECK_EQ(cell_.value().AsSmi(), Isolate::kProtectorValid);
+ DCHECK_EQ(cell_.value().AsSmi(), Protectors::kProtectorValid);
}
bool IsValid() const override {
Handle<PropertyCell> cell = cell_.object();
- return cell->value() == Smi::FromInt(Isolate::kProtectorValid);
+ return cell->value() == Smi::FromInt(Protectors::kProtectorValid);
}
void Install(const MaybeObjectHandle& code) const override {
@@ -404,7 +405,7 @@ AllocationType CompilationDependencies::DependOnPretenureMode(
}
PropertyConstness CompilationDependencies::DependOnFieldConstness(
- const MapRef& map, int descriptor) {
+ const MapRef& map, InternalIndex descriptor) {
MapRef owner = map.FindFieldOwner(descriptor);
PropertyConstness constness =
owner.GetPropertyDetails(descriptor).constness();
@@ -426,13 +427,13 @@ PropertyConstness CompilationDependencies::DependOnFieldConstness(
return PropertyConstness::kConst;
}
-void CompilationDependencies::DependOnFieldRepresentation(const MapRef& map,
- int descriptor) {
+void CompilationDependencies::DependOnFieldRepresentation(
+ const MapRef& map, InternalIndex descriptor) {
RecordDependency(FieldRepresentationDependencyOffTheRecord(map, descriptor));
}
void CompilationDependencies::DependOnFieldType(const MapRef& map,
- int descriptor) {
+ InternalIndex descriptor) {
RecordDependency(FieldTypeDependencyOffTheRecord(map, descriptor));
}
@@ -444,7 +445,7 @@ void CompilationDependencies::DependOnGlobalProperty(
}
bool CompilationDependencies::DependOnProtector(const PropertyCellRef& cell) {
- if (cell.value().AsSmi() != Isolate::kProtectorValid) return false;
+ if (cell.value().AsSmi() != Protectors::kProtectorValid) return false;
RecordDependency(new (zone_) ProtectorDependency(cell));
return true;
}
@@ -632,7 +633,7 @@ CompilationDependencies::TransitionDependencyOffTheRecord(
CompilationDependency const*
CompilationDependencies::FieldRepresentationDependencyOffTheRecord(
- const MapRef& map, int descriptor) const {
+ const MapRef& map, InternalIndex descriptor) const {
MapRef owner = map.FindFieldOwner(descriptor);
PropertyDetails details = owner.GetPropertyDetails(descriptor);
DCHECK(details.representation().Equals(
@@ -642,8 +643,8 @@ CompilationDependencies::FieldRepresentationDependencyOffTheRecord(
}
CompilationDependency const*
-CompilationDependencies::FieldTypeDependencyOffTheRecord(const MapRef& map,
- int descriptor) const {
+CompilationDependencies::FieldTypeDependencyOffTheRecord(
+ const MapRef& map, InternalIndex descriptor) const {
MapRef owner = map.FindFieldOwner(descriptor);
ObjectRef type = owner.GetFieldType(descriptor);
DCHECK(type.equals(map.GetFieldType(descriptor)));
diff --git a/deps/v8/src/compiler/compilation-dependencies.h b/deps/v8/src/compiler/compilation-dependencies.h
index cb6cea0685..0b1612487e 100644
--- a/deps/v8/src/compiler/compilation-dependencies.h
+++ b/deps/v8/src/compiler/compilation-dependencies.h
@@ -55,11 +55,11 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
// Record the assumption that the field representation of a field does not
// change. The field is identified by the arguments.
- void DependOnFieldRepresentation(const MapRef& map, int descriptor);
+ void DependOnFieldRepresentation(const MapRef& map, InternalIndex descriptor);
// Record the assumption that the field type of a field does not change. The
// field is identified by the arguments.
- void DependOnFieldType(const MapRef& map, int descriptor);
+ void DependOnFieldType(const MapRef& map, InternalIndex descriptor);
// Return a field's constness and, if kConst, record the assumption that it
// remains kConst. The field is identified by the arguments.
@@ -68,7 +68,8 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
// kConst if the map is stable (and register stability dependency in that
// case). This is to ensure that fast elements kind transitions cannot be
// used to mutate fields without deoptimization of the dependent code.
- PropertyConstness DependOnFieldConstness(const MapRef& map, int descriptor);
+ PropertyConstness DependOnFieldConstness(const MapRef& map,
+ InternalIndex descriptor);
// Record the assumption that neither {cell}'s {CellType} changes, nor the
// {IsReadOnly()} flag of {cell}'s {PropertyDetails}.
@@ -119,9 +120,9 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
CompilationDependency const* TransitionDependencyOffTheRecord(
const MapRef& target_map) const;
CompilationDependency const* FieldRepresentationDependencyOffTheRecord(
- const MapRef& map, int descriptor) const;
+ const MapRef& map, InternalIndex descriptor) const;
CompilationDependency const* FieldTypeDependencyOffTheRecord(
- const MapRef& map, int descriptor) const;
+ const MapRef& map, InternalIndex descriptor) const;
// Exposed only for testing purposes.
bool AreValid() const;
diff --git a/deps/v8/src/compiler/decompression-elimination.cc b/deps/v8/src/compiler/decompression-elimination.cc
index 537744652b..5c0f6b1cfa 100644
--- a/deps/v8/src/compiler/decompression-elimination.cc
+++ b/deps/v8/src/compiler/decompression-elimination.cc
@@ -67,7 +67,6 @@ Reduction DecompressionElimination::ReduceCompress(Node* node) {
Node* input_node = node->InputAt(0);
IrOpcode::Value input_opcode = input_node->opcode();
if (IrOpcode::IsDecompressOpcode(input_opcode)) {
- DCHECK(IsValidDecompress(node->opcode(), input_opcode));
DCHECK_EQ(input_node->InputCount(), 1);
return Replace(input_node->InputAt(0));
} else if (IsReducibleConstantOpcode(input_opcode)) {
@@ -167,6 +166,42 @@ Reduction DecompressionElimination::ReduceTypedStateValues(Node* node) {
return any_change ? Changed(node) : NoChange();
}
+Reduction DecompressionElimination::ReduceWord32Equal(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kWord32Equal);
+
+ DCHECK_EQ(node->InputCount(), 2);
+ Node* lhs = node->InputAt(0);
+ Node* rhs = node->InputAt(1);
+
+ if (!IrOpcode::IsCompressOpcode(lhs->opcode()) ||
+ !IrOpcode::IsCompressOpcode(rhs->opcode())) {
+ return NoChange();
+ }
+ // Input nodes for compress operation.
+ lhs = lhs->InputAt(0);
+ rhs = rhs->InputAt(0);
+
+ bool changed = false;
+
+ if (lhs->opcode() == IrOpcode::kBitcastWordToTaggedSigned) {
+ Node* input = lhs->InputAt(0);
+ if (IsReducibleConstantOpcode(input->opcode())) {
+ node->ReplaceInput(0, GetCompressedConstant(input));
+ changed = true;
+ }
+ }
+
+ if (rhs->opcode() == IrOpcode::kBitcastWordToTaggedSigned) {
+ Node* input = rhs->InputAt(0);
+ if (IsReducibleConstantOpcode(input->opcode())) {
+ node->ReplaceInput(1, GetCompressedConstant(input));
+ changed = true;
+ }
+ }
+
+ return changed ? Changed(node) : NoChange();
+}
+
Reduction DecompressionElimination::ReduceWord64Equal(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kWord64Equal);
@@ -220,6 +255,8 @@ Reduction DecompressionElimination::Reduce(Node* node) {
return ReducePhi(node);
case IrOpcode::kTypedStateValues:
return ReduceTypedStateValues(node);
+ case IrOpcode::kWord32Equal:
+ return ReduceWord32Equal(node);
case IrOpcode::kWord64Equal:
return ReduceWord64Equal(node);
default:
diff --git a/deps/v8/src/compiler/decompression-elimination.h b/deps/v8/src/compiler/decompression-elimination.h
index 85a6c98aa0..6b2be009c6 100644
--- a/deps/v8/src/compiler/decompression-elimination.h
+++ b/deps/v8/src/compiler/decompression-elimination.h
@@ -65,6 +65,11 @@ class V8_EXPORT_PRIVATE DecompressionElimination final
// value of that constant.
Reduction ReduceWord64Equal(Node* node);
+ // This is a workaround for load elimination test.
+ // Replaces Compress -> BitcastWordToTaggedSigned -> ReducibleConstant
+ // to CompressedConstant on both inputs of Word32Equal operation.
+ Reduction ReduceWord32Equal(Node* node);
+
Graph* graph() const { return graph_; }
MachineOperatorBuilder* machine() const { return machine_; }
CommonOperatorBuilder* common() const { return common_; }
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index 8dfe356c34..ceff453164 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -187,8 +187,11 @@ class EffectControlLinearizer {
Node* LowerMaybeGrowFastElements(Node* node, Node* frame_state);
void LowerTransitionElementsKind(Node* node);
Node* LowerLoadFieldByIndex(Node* node);
+ Node* LowerLoadMessage(Node* node);
Node* LowerLoadTypedElement(Node* node);
Node* LowerLoadDataViewElement(Node* node);
+ Node* LowerLoadStackArgument(Node* node);
+ void LowerStoreMessage(Node* node);
void LowerStoreTypedElement(Node* node);
void LowerStoreDataViewElement(Node* node);
void LowerStoreSignedSmallElement(Node* node);
@@ -227,6 +230,8 @@ class EffectControlLinearizer {
Node* LowerStringComparison(Callable const& callable, Node* node);
Node* IsElementsKindGreaterThan(Node* kind, ElementsKind reference_kind);
+ Node* BuildTypedArrayDataPointer(Node* base, Node* external);
+
Node* ChangeInt32ToCompressedSmi(Node* value);
Node* ChangeInt32ToSmi(Node* value);
Node* ChangeInt32ToIntPtr(Node* value);
@@ -247,6 +252,7 @@ class EffectControlLinearizer {
Node* SmiShiftBitsConstant();
void TransitionElementsTo(Node* node, Node* array, ElementsKind from,
ElementsKind to);
+ void ConnectUnreachableToEnd(Node* effect, Node* control);
Factory* factory() const { return isolate()->factory(); }
Isolate* isolate() const { return jsgraph()->isolate(); }
@@ -308,19 +314,8 @@ struct PendingEffectPhi {
: effect_phi(effect_phi), block(block) {}
};
-void ConnectUnreachableToEnd(Node* effect, Node* control, JSGraph* jsgraph) {
- Graph* graph = jsgraph->graph();
- CommonOperatorBuilder* common = jsgraph->common();
- if (effect->opcode() == IrOpcode::kDead) return;
- if (effect->opcode() != IrOpcode::kUnreachable) {
- effect = graph->NewNode(common->Unreachable(), effect, control);
- }
- Node* throw_node = graph->NewNode(common->Throw(), effect, control);
- NodeProperties::MergeControlToEnd(graph, common, throw_node);
-}
-
void UpdateEffectPhi(Node* node, BasicBlock* block,
- BlockEffectControlMap* block_effects, JSGraph* jsgraph) {
+ BlockEffectControlMap* block_effects) {
// Update all inputs to an effect phi with the effects from the given
// block->effect map.
DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode());
@@ -607,7 +602,7 @@ void EffectControlLinearizer::Run() {
// record the effect phi for later processing.
pending_effect_phis.push_back(PendingEffectPhi(effect_phi, block));
} else {
- UpdateEffectPhi(effect_phi, block, &block_effects, jsgraph());
+ UpdateEffectPhi(effect_phi, block, &block_effects);
}
}
@@ -649,7 +644,7 @@ void EffectControlLinearizer::Run() {
if (control->opcode() == IrOpcode::kLoop) {
pending_effect_phis.push_back(PendingEffectPhi(effect, block));
} else {
- UpdateEffectPhi(effect, block, &block_effects, jsgraph());
+ UpdateEffectPhi(effect, block, &block_effects);
}
} else if (control->opcode() == IrOpcode::kIfException) {
// The IfException is connected into the effect chain, so we need
@@ -734,7 +729,7 @@ void EffectControlLinearizer::Run() {
// during the first pass (because they could have incoming back edges).
for (const PendingEffectPhi& pending_effect_phi : pending_effect_phis) {
UpdateEffectPhi(pending_effect_phi.effect_phi, pending_effect_phi.block,
- &block_effects, jsgraph());
+ &block_effects);
}
}
@@ -828,7 +823,7 @@ void EffectControlLinearizer::ProcessNode(Node* node, Node** frame_state,
// Break the effect chain on {Unreachable} and reconnect to the graph end.
// Mark the following code for deletion by connecting to the {Dead} node.
if (node->opcode() == IrOpcode::kUnreachable) {
- ConnectUnreachableToEnd(*effect, *control, jsgraph());
+ ConnectUnreachableToEnd(*effect, *control);
*effect = *control = jsgraph()->Dead();
}
}
@@ -1243,6 +1238,12 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kTransitionElementsKind:
LowerTransitionElementsKind(node);
break;
+ case IrOpcode::kLoadMessage:
+ result = LowerLoadMessage(node);
+ break;
+ case IrOpcode::kStoreMessage:
+ LowerStoreMessage(node);
+ break;
case IrOpcode::kLoadFieldByIndex:
result = LowerLoadFieldByIndex(node);
break;
@@ -1252,6 +1253,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kLoadDataViewElement:
result = LowerLoadDataViewElement(node);
break;
+ case IrOpcode::kLoadStackArgument:
+ result = LowerLoadStackArgument(node);
+ break;
case IrOpcode::kStoreTypedElement:
LowerStoreTypedElement(node);
break;
@@ -1325,6 +1329,13 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
return true;
}
+void EffectControlLinearizer::ConnectUnreachableToEnd(Node* effect,
+ Node* control) {
+ DCHECK_EQ(effect->opcode(), IrOpcode::kUnreachable);
+ Node* throw_node = graph()->NewNode(common()->Throw(), effect, control);
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
+}
+
#define __ gasm()->
Node* EffectControlLinearizer::LowerChangeFloat64ToTagged(Node* node) {
@@ -1601,7 +1612,7 @@ Node* EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node) {
__ Bind(&if_smi);
{
// If {value} is a Smi, then we only need to check that it's not zero.
- __ Goto(&done, __ Word32Equal(__ IntPtrEqual(value, __ IntPtrConstant(0)),
+ __ Goto(&done, __ Word32Equal(__ TaggedEqual(value, __ SmiConstant(0)),
__ Int32Constant(0)));
}
@@ -1952,7 +1963,7 @@ Node* EffectControlLinearizer::LowerCheckReceiverOrNullOrUndefined(
__ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
// Rule out all primitives except oddballs (true, false, undefined, null).
- STATIC_ASSERT(LAST_PRIMITIVE_TYPE == ODDBALL_TYPE);
+ STATIC_ASSERT(LAST_PRIMITIVE_HEAP_OBJECT_TYPE == ODDBALL_TYPE);
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
Node* check0 = __ Uint32LessThanOrEqual(__ Uint32Constant(ODDBALL_TYPE),
value_instance_type);
@@ -2028,9 +2039,8 @@ Node* EffectControlLinearizer::LowerStringConcat(Node* node) {
callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
Operator::kNoDeopt | Operator::kNoWrite | Operator::kNoThrow);
- Node* value =
- __ Call(call_descriptor, jsgraph()->HeapConstant(callable.code()), lhs,
- rhs, __ NoContextConstant());
+ Node* value = __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs,
+ rhs, __ NoContextConstant());
return value;
}
@@ -2112,8 +2122,7 @@ Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node,
// Check if {lhs} is kMinInt and {rhs} is -1, in which case we'd have
// to return -kMinInt, which is not representable as Word32.
- Node* check_lhs_minint = graph()->NewNode(machine()->Word32Equal(), lhs,
- __ Int32Constant(kMinInt));
+ Node* check_lhs_minint = __ Word32Equal(lhs, __ Int32Constant(kMinInt));
__ Branch(check_lhs_minint, &if_lhs_minint, &if_lhs_notminint);
__ Bind(&if_lhs_minint);
@@ -2760,7 +2769,7 @@ Node* EffectControlLinearizer::LowerChangeUint64ToBigInt(Node* node) {
DCHECK(machine()->Is64());
Node* value = node->InputAt(0);
- Node* map = jsgraph()->HeapConstant(factory()->bigint_map());
+ Node* map = __ HeapConstant(factory()->bigint_map());
// BigInts with value 0 must be of size 0 (canonical form).
auto if_zerodigits = __ MakeLabel();
auto if_onedigit = __ MakeLabel();
@@ -2963,10 +2972,11 @@ Node* EffectControlLinearizer::LowerObjectIsArrayBufferView(Node* node) {
Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
Node* value_instance_type =
__ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
- STATIC_ASSERT(JS_TYPED_ARRAY_TYPE + 1 == JS_DATA_VIEW_TYPE);
Node* vfalse = __ Uint32LessThan(
- __ Int32Sub(value_instance_type, __ Int32Constant(JS_TYPED_ARRAY_TYPE)),
- __ Int32Constant(2));
+ __ Int32Sub(value_instance_type,
+ __ Int32Constant(FIRST_JS_ARRAY_BUFFER_VIEW_TYPE)),
+ __ Int32Constant(LAST_JS_ARRAY_BUFFER_VIEW_TYPE -
+ FIRST_JS_ARRAY_BUFFER_VIEW_TYPE + 1));
__ Goto(&done, vfalse);
__ Bind(&if_smi);
@@ -3521,7 +3531,7 @@ Node* EffectControlLinearizer::LowerArgumentsFrame(Node* node) {
__ Load(MachineType::Pointer(), frame,
__ IntPtrConstant(StandardFrameConstants::kCallerFPOffset));
Node* parent_frame_type = __ Load(
- MachineType::TypeCompressedTagged(), parent_frame,
+ MachineType::IntPtr(), parent_frame,
__ IntPtrConstant(CommonFrameConstants::kContextOrFrameTypeOffset));
__ GotoIf(__ IntPtrEqual(parent_frame_type,
@@ -3541,7 +3551,7 @@ Node* EffectControlLinearizer::LowerNewDoubleElements(Node* node) {
auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
Node* zero_length = __ IntPtrEqual(length, __ IntPtrConstant(0));
__ GotoIf(zero_length, &done,
- jsgraph()->HeapConstant(factory()->empty_fixed_array()));
+ __ HeapConstant(factory()->empty_fixed_array()));
// Compute the effective size of the backing store.
Node* size = __ IntAdd(__ WordShl(length, __ IntPtrConstant(kDoubleSizeLog2)),
@@ -3589,7 +3599,7 @@ Node* EffectControlLinearizer::LowerNewSmiOrObjectElements(Node* node) {
auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
Node* zero_length = __ IntPtrEqual(length, __ IntPtrConstant(0));
__ GotoIf(zero_length, &done,
- jsgraph()->HeapConstant(factory()->empty_fixed_array()));
+ __ HeapConstant(factory()->empty_fixed_array()));
// Compute the effective size of the backing store.
Node* size = __ IntAdd(__ WordShl(length, __ IntPtrConstant(kTaggedSizeLog2)),
@@ -3671,10 +3681,9 @@ Node* EffectControlLinearizer::LowerNewConsString(Node* node) {
__ Branch(__ Word32Equal(encoding, __ Int32Constant(kTwoByteStringTag)),
&if_twobyte, &if_onebyte);
__ Bind(&if_onebyte);
- __ Goto(&done,
- jsgraph()->HeapConstant(factory()->cons_one_byte_string_map()));
+ __ Goto(&done, __ HeapConstant(factory()->cons_one_byte_string_map()));
__ Bind(&if_twobyte);
- __ Goto(&done, jsgraph()->HeapConstant(factory()->cons_string_map()));
+ __ Goto(&done, __ HeapConstant(factory()->cons_string_map()));
__ Bind(&done);
Node* result_map = done.PhiAt(0);
@@ -4287,9 +4296,8 @@ Node* EffectControlLinearizer::LowerBigIntAdd(Node* node, Node* frame_state) {
graph()->zone(), callable.descriptor(),
callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
Operator::kFoldable | Operator::kNoThrow);
- Node* value =
- __ Call(call_descriptor, jsgraph()->HeapConstant(callable.code()), lhs,
- rhs, __ NoContextConstant());
+ Node* value = __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs,
+ rhs, __ NoContextConstant());
// Check for exception sentinel: Smi is returned to signal BigIntTooBig.
__ DeoptimizeIf(DeoptimizeReason::kBigIntTooBig, FeedbackSource{},
@@ -4305,9 +4313,8 @@ Node* EffectControlLinearizer::LowerBigIntNegate(Node* node) {
graph()->zone(), callable.descriptor(),
callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
Operator::kFoldable | Operator::kNoThrow);
- Node* value =
- __ Call(call_descriptor, jsgraph()->HeapConstant(callable.code()),
- node->InputAt(0), __ NoContextConstant());
+ Node* value = __ Call(call_descriptor, __ HeapConstant(callable.code()),
+ node->InputAt(0), __ NoContextConstant());
return value;
}
@@ -4746,6 +4753,20 @@ void EffectControlLinearizer::LowerTransitionElementsKind(Node* node) {
__ Bind(&done);
}
+Node* EffectControlLinearizer::LowerLoadMessage(Node* node) {
+ Node* offset = node->InputAt(0);
+ Node* object_pattern =
+ __ LoadField(AccessBuilder::ForExternalIntPtr(), offset);
+ return __ BitcastWordToTagged(object_pattern);
+}
+
+void EffectControlLinearizer::LowerStoreMessage(Node* node) {
+ Node* offset = node->InputAt(0);
+ Node* object = node->InputAt(1);
+ Node* object_pattern = __ BitcastTaggedToWord(object);
+ __ StoreField(AccessBuilder::ForExternalIntPtr(), offset, object_pattern);
+}
+
Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
Node* object = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -4801,6 +4822,7 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
// architectures, or a mutable HeapNumber.
__ Bind(&if_double);
{
+ auto loaded_field = __ MakeLabel(MachineRepresentation::kTagged);
auto done_double = __ MakeLabel(MachineRepresentation::kFloat64);
index = __ WordSar(index, one);
@@ -4818,10 +4840,9 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
Node* result = __ Load(MachineType::Float64(), object, offset);
__ Goto(&done_double, result);
} else {
- Node* result =
+ Node* field =
__ Load(MachineType::TypeCompressedTagged(), object, offset);
- result = __ LoadField(AccessBuilder::ForHeapNumberValue(), result);
- __ Goto(&done_double, result);
+ __ Goto(&loaded_field, field);
}
}
@@ -4834,10 +4855,24 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
__ IntPtrConstant(kTaggedSizeLog2)),
__ IntPtrConstant((FixedArray::kHeaderSize - kTaggedSize) -
kHeapObjectTag));
- Node* result =
+ Node* field =
__ Load(MachineType::TypeCompressedTagged(), properties, offset);
- result = __ LoadField(AccessBuilder::ForHeapNumberValue(), result);
- __ Goto(&done_double, result);
+ __ Goto(&loaded_field, field);
+ }
+
+ __ Bind(&loaded_field);
+ {
+ Node* field = loaded_field.PhiAt(0);
+ // We may have transitioned in-place away from double, so check that
+ // this is a HeapNumber -- otherwise the load is fine and we don't need
+ // to copy anything anyway.
+ __ GotoIf(ObjectIsSmi(field), &done, field);
+ Node* field_map = __ LoadField(AccessBuilder::ForMap(), field);
+ __ GotoIfNot(__ TaggedEqual(field_map, __ HeapNumberMapConstant()), &done,
+ field);
+
+ Node* value = __ LoadField(AccessBuilder::ForHeapNumberValue(), field);
+ __ Goto(&done_double, value);
}
__ Bind(&done_double);
@@ -4988,6 +5023,35 @@ void EffectControlLinearizer::LowerStoreDataViewElement(Node* node) {
done.PhiAt(0));
}
+// Compute the data pointer, handling the case where the {external} pointer
+// is the effective data pointer (i.e. the {base} is Smi zero).
+Node* EffectControlLinearizer::BuildTypedArrayDataPointer(Node* base,
+ Node* external) {
+ if (IntPtrMatcher(base).Is(0)) {
+ return external;
+ } else {
+ if (COMPRESS_POINTERS_BOOL) {
+ // TurboFan does not support loading of compressed fields without
+ // decompression so we add the following operations to workaround that.
+ // We can't load the base value as word32 because in that case the
+ // value will not be marked as tagged in the pointer map and will not
+ // survive GC.
+ // Compress base value back to in order to be able to decompress by
+ // doing an unsafe add below. Both decompression and compression
+ // will be removed by the decompression elimination pass.
+ base = __ ChangeTaggedToCompressed(base);
+ base = __ BitcastTaggedToWord(base);
+ // Zero-extend Tagged_t to UintPtr according to current compression
+ // scheme so that the addition with |external_pointer| (which already
+ // contains compensated offset value) will decompress the tagged value.
+ // See JSTypedArray::ExternalPointerCompensationForOnHeapArray() for
+ // details.
+ base = ChangeUint32ToUintPtr(base);
+ }
+ return __ UnsafePointerAdd(base, external);
+ }
+}
+
Node* EffectControlLinearizer::LowerLoadTypedElement(Node* node) {
ExternalArrayType array_type = ExternalArrayTypeOf(node->op());
Node* buffer = node->InputAt(0);
@@ -4999,17 +5063,22 @@ Node* EffectControlLinearizer::LowerLoadTypedElement(Node* node) {
// ArrayBuffer (if there's any) as long as we are still operating on it.
__ Retain(buffer);
- // Compute the effective storage pointer, handling the case where the
- // {external} pointer is the effective storage pointer (i.e. the {base}
- // is Smi zero).
- Node* storage = IntPtrMatcher(base).Is(0)
- ? external
- : __ UnsafePointerAdd(base, external);
+ Node* data_ptr = BuildTypedArrayDataPointer(base, external);
// Perform the actual typed element access.
return __ LoadElement(AccessBuilder::ForTypedArrayElement(
array_type, true, LoadSensitivity::kCritical),
- storage, index);
+ data_ptr, index);
+}
+
+Node* EffectControlLinearizer::LowerLoadStackArgument(Node* node) {
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ Node* argument =
+ __ LoadElement(AccessBuilder::ForStackArgument(), base, index);
+
+ return __ BitcastWordToTagged(argument);
}
void EffectControlLinearizer::LowerStoreTypedElement(Node* node) {
@@ -5024,16 +5093,11 @@ void EffectControlLinearizer::LowerStoreTypedElement(Node* node) {
// ArrayBuffer (if there's any) as long as we are still operating on it.
__ Retain(buffer);
- // Compute the effective storage pointer, handling the case where the
- // {external} pointer is the effective storage pointer (i.e. the {base}
- // is Smi zero).
- Node* storage = IntPtrMatcher(base).Is(0)
- ? external
- : __ UnsafePointerAdd(base, external);
+ Node* data_ptr = BuildTypedArrayDataPointer(base, external);
// Perform the actual typed element access.
__ StoreElement(AccessBuilder::ForTypedArrayElement(array_type, true),
- storage, index, value);
+ data_ptr, index, value);
}
void EffectControlLinearizer::TransitionElementsTo(Node* node, Node* array,
@@ -5402,7 +5466,7 @@ void EffectControlLinearizer::LowerRuntimeAbort(Node* node) {
auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
__ Call(call_descriptor, __ CEntryStubConstant(1),
- jsgraph()->SmiConstant(static_cast<int>(reason)),
+ __ SmiConstant(static_cast<int>(reason)),
__ ExternalConstant(ExternalReference::Create(id)),
__ Int32Constant(1), __ NoContextConstant());
}
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.cc b/deps/v8/src/compiler/escape-analysis-reducer.cc
index 18ae069b21..b2fb8d10ce 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.cc
+++ b/deps/v8/src/compiler/escape-analysis-reducer.cc
@@ -326,9 +326,8 @@ void EscapeAnalysisReducer::Finalize() {
TypeCache::Get()->kArgumentsLengthType);
NodeProperties::ReplaceValueInput(load, arguments_frame, 0);
NodeProperties::ReplaceValueInput(load, offset, 1);
- NodeProperties::ChangeOp(load,
- jsgraph()->simplified()->LoadElement(
- AccessBuilder::ForStackArgument()));
+ NodeProperties::ChangeOp(
+ load, jsgraph()->simplified()->LoadStackArgument());
break;
}
case IrOpcode::kLoadField: {
diff --git a/deps/v8/src/compiler/frame-states.cc b/deps/v8/src/compiler/frame-states.cc
index 9478c08c6c..576f6ce542 100644
--- a/deps/v8/src/compiler/frame-states.cc
+++ b/deps/v8/src/compiler/frame-states.cc
@@ -137,13 +137,17 @@ Node* CreateStubBuiltinContinuationFrameState(
// Stack parameters first. Depending on {mode}, final parameters are added
// by the deoptimizer and aren't explicitly passed in the frame state.
int stack_parameter_count =
- descriptor.GetParameterCount() - DeoptimizerParameterCountFor(mode);
- // Reserving space in the vector, except for the case where
- // stack_parameter_count is -1.
- actual_parameters.reserve(stack_parameter_count >= 0
- ? stack_parameter_count +
- descriptor.GetRegisterParameterCount()
- : 0);
+ descriptor.GetStackParameterCount() - DeoptimizerParameterCountFor(mode);
+
+ // Ensure the parameters added by the deoptimizer are passed on the stack.
+ // This check prevents using TFS builtins as continuations while doing the
+ // lazy deopt. Use TFC or TFJ builtin as a lazy deopt continuation which
+ // would pass the result parameter on the stack.
+ DCHECK_GE(stack_parameter_count, 0);
+
+ // Reserving space in the vector.
+ actual_parameters.reserve(stack_parameter_count +
+ descriptor.GetRegisterParameterCount());
for (int i = 0; i < stack_parameter_count; ++i) {
actual_parameters.push_back(
parameters[descriptor.GetRegisterParameterCount() + i]);
diff --git a/deps/v8/src/compiler/functional-list.h b/deps/v8/src/compiler/functional-list.h
index 2345f1d360..6af63030f8 100644
--- a/deps/v8/src/compiler/functional-list.h
+++ b/deps/v8/src/compiler/functional-list.h
@@ -90,6 +90,8 @@ class FunctionalList {
size_t Size() const { return elements_ ? elements_->size : 0; }
+ void Clear() { elements_ = nullptr; }
+
class iterator {
public:
explicit iterator(Cons* cur) : current_(cur) {}
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index b4ad81ecda..5c167db980 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -99,6 +99,10 @@ Node* GraphAssembler::IntPtrEqual(Node* left, Node* right) {
}
Node* GraphAssembler::TaggedEqual(Node* left, Node* right) {
+ if (COMPRESS_POINTERS_BOOL) {
+ return Word32Equal(ChangeTaggedToCompressed(left),
+ ChangeTaggedToCompressed(right));
+ }
return WordEqual(left, right);
}
@@ -232,10 +236,10 @@ Node* GraphAssembler::BitcastTaggedToWord(Node* value) {
current_effect_, current_control_);
}
-Node* GraphAssembler::BitcastTaggedSignedToWord(Node* value) {
+Node* GraphAssembler::BitcastTaggedToWordForTagAndSmiBits(Node* value) {
return current_effect_ =
- graph()->NewNode(machine()->BitcastTaggedSignedToWord(), value,
- current_effect_, current_control_);
+ graph()->NewNode(machine()->BitcastTaggedToWordForTagAndSmiBits(),
+ value, current_effect_, current_control_);
}
Node* GraphAssembler::Word32PoisonOnSpeculation(Node* value) {
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index 0088f867c5..d2df5a75f3 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -233,7 +233,7 @@ class GraphAssembler {
Node* ToNumber(Node* value);
Node* BitcastWordToTagged(Node* value);
Node* BitcastTaggedToWord(Node* value);
- Node* BitcastTaggedSignedToWord(Node* value);
+ Node* BitcastTaggedToWordForTagAndSmiBits(Node* value);
Node* Allocate(AllocationType allocation, Node* size);
Node* LoadField(FieldAccess const&, Node* object);
Node* LoadElement(ElementAccess const&, Node* object, Node* index);
diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc
index 85123261db..dddba7d36f 100644
--- a/deps/v8/src/compiler/graph-visualizer.cc
+++ b/deps/v8/src/compiler/graph-visualizer.cc
@@ -163,7 +163,6 @@ void JsonPrintInlinedFunctionInfo(
void JsonPrintAllSourceWithPositions(std::ostream& os,
OptimizedCompilationInfo* info,
Isolate* isolate) {
- AllowDeferredHandleDereference allow_deference_for_print_code;
os << "\"sources\" : {";
Handle<Script> script =
(info->shared_info().is_null() ||
@@ -1055,15 +1054,9 @@ std::ostream& operator<<(std::ostream& os, const InstructionOperandAsJSON& o) {
}
break;
}
- case InstructionOperand::EXPLICIT:
case InstructionOperand::ALLOCATED: {
const LocationOperand* allocated = LocationOperand::cast(op);
- os << "\"type\": ";
- if (allocated->IsExplicit()) {
- os << "\"explicit\", ";
- } else {
- os << "\"allocated\", ";
- }
+ os << "\"type\": \"allocated\", ";
os << "\"text\": \"";
if (op->IsStackSlot()) {
os << "stack:" << allocated->index();
diff --git a/deps/v8/src/compiler/heap-refs.h b/deps/v8/src/compiler/heap-refs.h
index 9b1aa53eb9..c6322ebe69 100644
--- a/deps/v8/src/compiler/heap-refs.h
+++ b/deps/v8/src/compiler/heap-refs.h
@@ -29,7 +29,6 @@ class NativeContext;
class ScriptContextTable;
namespace compiler {
-
// Whether we are loading a property or storing to a property.
// For a store during literal creation, do not walk up the prototype chain.
enum class AccessMode { kLoad, kStore, kStoreInLiteral, kHas };
@@ -95,10 +94,12 @@ enum class OddballType : uint8_t {
V(PropertyCell) \
V(SharedFunctionInfo) \
V(SourceTextModule) \
+ V(TemplateObjectDescription) \
/* Subtypes of Object */ \
V(HeapObject)
class CompilationDependencies;
+struct FeedbackSource;
class JSHeapBroker;
class ObjectData;
class PerIsolateCompilerCache;
@@ -163,8 +164,8 @@ class V8_EXPORT_PRIVATE ObjectRef {
private:
friend class FunctionTemplateInfoRef;
friend class JSArrayData;
- friend class JSGlobalProxyRef;
- friend class JSGlobalProxyData;
+ friend class JSGlobalObjectData;
+ friend class JSGlobalObjectRef;
friend class JSHeapBroker;
friend class JSObjectData;
friend class StringData;
@@ -329,8 +330,6 @@ class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef {
SharedFunctionInfoRef shared() const;
FeedbackVectorRef feedback_vector() const;
int InitialMapInstanceSizeWithMinSlack() const;
-
- bool IsSerializedForCompilation() const;
};
class JSRegExpRef : public JSObjectRef {
@@ -344,6 +343,8 @@ class JSRegExpRef : public JSObjectRef {
ObjectRef source() const;
ObjectRef flags() const;
ObjectRef last_index() const;
+
+ void SerializeAsRegExpBoilerplate();
};
class HeapNumberRef : public HeapObjectRef {
@@ -496,7 +497,6 @@ class FeedbackVectorRef : public HeapObjectRef {
double invocation_count() const;
void Serialize();
- ObjectRef get(FeedbackSlot slot) const;
FeedbackCellRef GetClosureFeedbackCell(int index) const;
};
@@ -535,6 +535,9 @@ class AllocationSiteRef : public HeapObjectRef {
//
// If PointsToLiteral() is false, then IsFastLiteral() is also false.
bool IsFastLiteral() const;
+
+ void SerializeBoilerplate();
+
// We only serialize boilerplate if IsFastLiteral is true.
base::Optional<JSObjectRef> boilerplate() const;
@@ -585,7 +588,6 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
bool is_migration_target() const;
bool supports_fast_array_iteration() const;
bool supports_fast_array_resize() const;
- bool IsMapOfTargetGlobalProxy() const;
bool is_abandoned_prototype_map() const;
OddballType oddball_type() const;
@@ -609,15 +611,15 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
// Concerning the underlying instance_descriptors:
void SerializeOwnDescriptors();
- void SerializeOwnDescriptor(int descriptor_index);
- bool serialized_own_descriptor(int descriptor_index) const;
- MapRef FindFieldOwner(int descriptor_index) const;
- PropertyDetails GetPropertyDetails(int descriptor_index) const;
- NameRef GetPropertyKey(int descriptor_index) const;
- FieldIndex GetFieldIndexFor(int descriptor_index) const;
- ObjectRef GetFieldType(int descriptor_index) const;
- bool IsUnboxedDoubleField(int descriptor_index) const;
- ObjectRef GetStrongValue(int descriptor_number) const;
+ void SerializeOwnDescriptor(InternalIndex descriptor_index);
+ bool serialized_own_descriptor(InternalIndex descriptor_index) const;
+ MapRef FindFieldOwner(InternalIndex descriptor_index) const;
+ PropertyDetails GetPropertyDetails(InternalIndex descriptor_index) const;
+ NameRef GetPropertyKey(InternalIndex descriptor_index) const;
+ FieldIndex GetFieldIndexFor(InternalIndex descriptor_index) const;
+ ObjectRef GetFieldType(InternalIndex descriptor_index) const;
+ bool IsUnboxedDoubleField(InternalIndex descriptor_index) const;
+ ObjectRef GetStrongValue(InternalIndex descriptor_number) const;
void SerializeRootMap();
base::Optional<MapRef> FindRootMap() const;
@@ -727,7 +729,6 @@ class BytecodeArrayRef : public FixedArrayBaseRef {
Address handler_table_address() const;
int handler_table_size() const;
- bool IsSerializedForCompilation() const;
void SerializeForCompilation();
};
@@ -769,7 +770,8 @@ class ScopeInfoRef : public HeapObjectRef {
V(bool, is_safe_to_skip_arguments_adaptor) \
V(bool, IsInlineable) \
V(int, StartPosition) \
- V(bool, is_compiled)
+ V(bool, is_compiled) \
+ V(bool, IsUserJavaScript)
class V8_EXPORT_PRIVATE SharedFunctionInfoRef : public HeapObjectRef {
public:
@@ -791,7 +793,7 @@ class V8_EXPORT_PRIVATE SharedFunctionInfoRef : public HeapObjectRef {
// wraps the retrieval of the template object and creates it if
// necessary.
JSArrayRef GetTemplateObject(
- ObjectRef description, FeedbackVectorRef vector, FeedbackSlot slot,
+ TemplateObjectDescriptionRef description, FeedbackSource const& source,
SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
void SerializeFunctionTemplateInfo();
@@ -826,7 +828,7 @@ class JSTypedArrayRef : public JSObjectRef {
bool is_on_heap() const;
size_t length() const;
- void* external_pointer() const;
+ void* data_ptr() const;
void Serialize();
bool serialized() const;
@@ -845,6 +847,13 @@ class SourceTextModuleRef : public HeapObjectRef {
base::Optional<CellRef> GetCell(int cell_index) const;
};
+class TemplateObjectDescriptionRef : public HeapObjectRef {
+ public:
+ DEFINE_REF_CONSTRUCTOR(TemplateObjectDescription, HeapObjectRef)
+
+ Handle<TemplateObjectDescription> object() const;
+};
+
class CellRef : public HeapObjectRef {
public:
DEFINE_REF_CONSTRUCTOR(Cell, HeapObjectRef)
@@ -859,13 +868,8 @@ class JSGlobalObjectRef : public JSObjectRef {
DEFINE_REF_CONSTRUCTOR(JSGlobalObject, JSObjectRef)
Handle<JSGlobalObject> object() const;
-};
-
-class JSGlobalProxyRef : public JSObjectRef {
- public:
- DEFINE_REF_CONSTRUCTOR(JSGlobalProxy, JSObjectRef)
- Handle<JSGlobalProxy> object() const;
+ bool IsDetached() const;
// If {serialize} is false:
// If the property is known to exist as a property cell (on the global
@@ -879,6 +883,13 @@ class JSGlobalProxyRef : public JSObjectRef {
SerializationPolicy::kAssumeSerialized) const;
};
+class JSGlobalProxyRef : public JSObjectRef {
+ public:
+ DEFINE_REF_CONSTRUCTOR(JSGlobalProxy, JSObjectRef)
+
+ Handle<JSGlobalProxy> object() const;
+};
+
class CodeRef : public HeapObjectRef {
public:
DEFINE_REF_CONSTRUCTOR(Code, HeapObjectRef)
diff --git a/deps/v8/src/compiler/int64-lowering.h b/deps/v8/src/compiler/int64-lowering.h
index 1e2a36089b..0190d3a9c4 100644
--- a/deps/v8/src/compiler/int64-lowering.h
+++ b/deps/v8/src/compiler/int64-lowering.h
@@ -5,6 +5,8 @@
#ifndef V8_COMPILER_INT64_LOWERING_H_
#define V8_COMPILER_INT64_LOWERING_H_
+#include <memory>
+
#include "src/common/globals.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index 0b7b4a65f4..d400fa2673 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -473,10 +473,10 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
if (receiver_map.NumberOfOwnDescriptors() < minimum_nof_descriptors) {
return inference.NoChange();
}
- if (!receiver_map.serialized_own_descriptor(
- JSFunction::kLengthDescriptorIndex) ||
- !receiver_map.serialized_own_descriptor(
- JSFunction::kNameDescriptorIndex)) {
+ const InternalIndex kLengthIndex(JSFunction::kLengthDescriptorIndex);
+ const InternalIndex kNameIndex(JSFunction::kNameDescriptorIndex);
+ if (!receiver_map.serialized_own_descriptor(kLengthIndex) ||
+ !receiver_map.serialized_own_descriptor(kNameIndex)) {
TRACE_BROKER_MISSING(broker(),
"serialized descriptors on map " << receiver_map);
return inference.NoChange();
@@ -485,14 +485,10 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
StringRef length_string(broker(), roots.length_string_handle());
StringRef name_string(broker(), roots.name_string_handle());
- if (!receiver_map.GetPropertyKey(JSFunction::kLengthDescriptorIndex)
- .equals(length_string) ||
- !receiver_map.GetStrongValue(JSFunction::kLengthDescriptorIndex)
- .IsAccessorInfo() ||
- !receiver_map.GetPropertyKey(JSFunction::kNameDescriptorIndex)
- .equals(name_string) ||
- !receiver_map.GetStrongValue(JSFunction::kNameDescriptorIndex)
- .IsAccessorInfo()) {
+ if (!receiver_map.GetPropertyKey(kLengthIndex).equals(length_string) ||
+ !receiver_map.GetStrongValue(kLengthIndex).IsAccessorInfo() ||
+ !receiver_map.GetPropertyKey(kNameIndex).equals(name_string) ||
+ !receiver_map.GetStrongValue(kNameIndex).IsAccessorInfo()) {
return inference.NoChange();
}
}
@@ -3013,12 +3009,13 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
node->opcode() == IrOpcode::kJSConstructWithArrayLike ||
node->opcode() == IrOpcode::kJSConstructWithSpread);
- // Check if {arguments_list} is an arguments object, and {node} is the only
- // value user of {arguments_list} (except for value uses in frame states).
Node* arguments_list = NodeProperties::GetValueInput(node, arity);
if (arguments_list->opcode() != IrOpcode::kJSCreateArguments) {
return NoChange();
}
+
+ // Check if {node} is the only value user of {arguments_list} (except for
+ // value uses in frame states). If not, we give up for now.
for (Edge edge : arguments_list->use_edges()) {
if (!NodeProperties::IsValueEdge(edge)) continue;
Node* const user = edge.from();
@@ -3704,7 +3701,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
case Builtins::kMapIteratorPrototypeNext:
return ReduceCollectionIteratorPrototypeNext(
node, OrderedHashMap::kEntrySize, factory()->empty_ordered_hash_map(),
- FIRST_MAP_ITERATOR_TYPE, LAST_MAP_ITERATOR_TYPE);
+ FIRST_JS_MAP_ITERATOR_TYPE, LAST_JS_MAP_ITERATOR_TYPE);
case Builtins::kSetPrototypeEntries:
return ReduceCollectionIteration(node, CollectionKind::kSet,
IterationKind::kEntries);
@@ -3716,7 +3713,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
case Builtins::kSetIteratorPrototypeNext:
return ReduceCollectionIteratorPrototypeNext(
node, OrderedHashSet::kEntrySize, factory()->empty_ordered_hash_set(),
- FIRST_SET_ITERATOR_TYPE, LAST_SET_ITERATOR_TYPE);
+ FIRST_JS_SET_ITERATOR_TYPE, LAST_JS_SET_ITERATOR_TYPE);
case Builtins::kDatePrototypeGetTime:
return ReduceDatePrototypeGetTime(node);
case Builtins::kDateNow:
@@ -5676,8 +5673,6 @@ Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- if (!FLAG_experimental_inline_promise_constructor) return NoChange();
-
// Only handle builtins Promises, not subclasses.
if (target != new_target) return NoChange();
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index 035e8b7ceb..409fc6c9a1 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -38,7 +38,7 @@ Reduction JSContextSpecialization::ReduceParameter(Node* node) {
// Constant-fold the function parameter {node}.
Handle<JSFunction> function;
if (closure().ToHandle(&function)) {
- Node* value = jsgraph()->HeapConstant(function);
+ Node* value = jsgraph()->Constant(JSFunctionRef(broker_, function));
return Replace(value);
}
}
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index cb52ccaccb..6ab54d793a 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -18,6 +18,7 @@
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/state-values-utils.h"
+#include "src/execution/protectors.h"
#include "src/objects/arguments.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/heap-number.h"
@@ -26,6 +27,7 @@
#include "src/objects/js-promise.h"
#include "src/objects/js-regexp-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/template-objects.h"
namespace v8 {
namespace internal {
@@ -84,6 +86,8 @@ Reduction JSCreateLowering::Reduce(Node* node) {
return ReduceJSCreateLiteralArrayOrObject(node);
case IrOpcode::kJSCreateLiteralRegExp:
return ReduceJSCreateLiteralRegExp(node);
+ case IrOpcode::kJSGetTemplateObject:
+ return ReduceJSGetTemplateObject(node);
case IrOpcode::kJSCreateEmptyLiteralArray:
return ReduceJSCreateEmptyLiteralArray(node);
case IrOpcode::kJSCreateEmptyLiteralObject:
@@ -640,10 +644,10 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
allocation = dependencies()->DependOnPretenureMode(*site_ref);
dependencies()->DependOnElementsKind(*site_ref);
} else {
- CellRef array_constructor_protector(
+ PropertyCellRef array_constructor_protector(
broker(), factory()->array_constructor_protector());
- can_inline_call =
- array_constructor_protector.value().AsSmi() == Isolate::kProtectorValid;
+ can_inline_call = array_constructor_protector.value().AsSmi() ==
+ Protectors::kProtectorValid;
}
if (arity == 0) {
@@ -1073,15 +1077,10 @@ Reduction JSCreateLowering::ReduceJSCreateLiteralArrayOrObject(Node* node) {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
-
- FeedbackVectorRef feedback_vector(broker(), p.feedback().vector);
- ObjectRef feedback = feedback_vector.get(p.feedback().slot);
- // TODO(turbofan): we should consider creating a ProcessedFeedback for
- // allocation sites/boiler plates so that we use GetFeedback here. Then
- // we can eventually get rid of the additional copy of feedback slots that
- // we currently have in FeedbackVectorData.
- if (feedback.IsAllocationSite()) {
- AllocationSiteRef site = feedback.AsAllocationSite();
+ ProcessedFeedback const& feedback =
+ broker()->GetFeedbackForArrayOrObjectLiteral(p.feedback());
+ if (!feedback.IsInsufficient()) {
+ AllocationSiteRef site = feedback.AsLiteral().value();
if (site.IsFastLiteral()) {
AllocationType allocation = AllocationType::kYoung;
if (FLAG_allocation_site_pretenuring) {
@@ -1095,20 +1094,17 @@ Reduction JSCreateLowering::ReduceJSCreateLiteralArrayOrObject(Node* node) {
return Replace(value);
}
}
+
return NoChange();
}
Reduction JSCreateLowering::ReduceJSCreateEmptyLiteralArray(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateEmptyLiteralArray, node->opcode());
FeedbackParameter const& p = FeedbackParameterOf(node->op());
- FeedbackVectorRef fv(broker(), p.feedback().vector);
- ObjectRef feedback = fv.get(p.feedback().slot);
- // TODO(turbofan): we should consider creating a ProcessedFeedback for
- // allocation sites/boiler plates so that we use GetFeedback here. Then
- // we can eventually get rid of the additional copy of feedback slots that
- // we currently have in FeedbackVectorData.
- if (feedback.IsAllocationSite()) {
- AllocationSiteRef site = feedback.AsAllocationSite();
+ ProcessedFeedback const& feedback =
+ broker()->GetFeedbackForArrayOrObjectLiteral(p.feedback());
+ if (!feedback.IsInsufficient()) {
+ AllocationSiteRef site = feedback.AsLiteral().value();
DCHECK(!site.PointsToLiteral());
MapRef initial_map =
native_context().GetInitialJSArrayMap(site.GetElementsKind());
@@ -1162,22 +1158,30 @@ Reduction JSCreateLowering::ReduceJSCreateLiteralRegExp(Node* node) {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
-
- FeedbackVectorRef feedback_vector(broker(), p.feedback().vector);
- ObjectRef feedback = feedback_vector.get(p.feedback().slot);
- // TODO(turbofan): we should consider creating a ProcessedFeedback for
- // allocation sites/boiler plates so that we use GetFeedback here. Then
- // we can eventually get rid of the additional copy of feedback slots that
- // we currently have in FeedbackVectorData.
- if (feedback.IsJSRegExp()) {
- JSRegExpRef boilerplate = feedback.AsJSRegExp();
- Node* value = effect = AllocateLiteralRegExp(effect, control, boilerplate);
+ ProcessedFeedback const& feedback =
+ broker()->GetFeedbackForRegExpLiteral(p.feedback());
+ if (!feedback.IsInsufficient()) {
+ JSRegExpRef literal = feedback.AsRegExpLiteral().value();
+ Node* value = effect = AllocateLiteralRegExp(effect, control, literal);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
return NoChange();
}
+Reduction JSCreateLowering::ReduceJSGetTemplateObject(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSGetTemplateObject, node->opcode());
+ GetTemplateObjectParameters const& parameters =
+ GetTemplateObjectParametersOf(node->op());
+ SharedFunctionInfoRef shared(broker(), parameters.shared());
+ JSArrayRef template_object = shared.GetTemplateObject(
+ TemplateObjectDescriptionRef(broker(), parameters.description()),
+ parameters.feedback());
+ Node* value = jsgraph()->Constant(template_object);
+ ReplaceWithValue(node, value);
+ return Replace(value);
+}
+
Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateFunctionContext, node->opcode());
const CreateFunctionContextParameters& parameters =
@@ -1628,7 +1632,7 @@ Node* JSCreateLowering::AllocateFastLiteral(Node* effect, Node* control,
ZoneVector<std::pair<FieldAccess, Node*>> inobject_fields(zone());
inobject_fields.reserve(boilerplate_map.GetInObjectProperties());
int const boilerplate_nof = boilerplate_map.NumberOfOwnDescriptors();
- for (int i = 0; i < boilerplate_nof; ++i) {
+ for (InternalIndex i : InternalIndex::Range(boilerplate_nof)) {
PropertyDetails const property_details =
boilerplate_map.GetPropertyDetails(i);
if (property_details.location() != kField) continue;
diff --git a/deps/v8/src/compiler/js-create-lowering.h b/deps/v8/src/compiler/js-create-lowering.h
index 44a3b213b7..2fb28ebfd4 100644
--- a/deps/v8/src/compiler/js-create-lowering.h
+++ b/deps/v8/src/compiler/js-create-lowering.h
@@ -67,6 +67,7 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
Reduction ReduceJSCreateCatchContext(Node* node);
Reduction ReduceJSCreateBlockContext(Node* node);
Reduction ReduceJSCreateGeneratorObject(Node* node);
+ Reduction ReduceJSGetTemplateObject(Node* node);
Reduction ReduceNewArray(
Node* node, Node* length, MapRef initial_map, ElementsKind elements_kind,
AllocationType allocation,
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index d2a9b675f9..d419a804a5 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -236,14 +236,15 @@ void JSGenericLowering::LowerJSLoadGlobal(Node* node) {
}
void JSGenericLowering::LowerJSGetIterator(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- const PropertyAccess& p = PropertyAccessOf(node->op());
- node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.feedback().index()));
- Node* vector = jsgraph()->HeapConstant(p.feedback().vector);
- node->InsertInput(zone(), 2, vector);
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kGetIteratorWithFeedback);
- ReplaceWithStubCall(node, callable, flags);
+ // TODO(v8:9625): Currently, the GetIterator operator is desugared in the
+ // native context specialization phase. Thus, the following generic lowering
+ // would never be reachable. We can add a check in native context
+ // specialization to avoid desugaring the GetIterator operator when in the
+ // case of megamorphic feedback and here, add a call to the
+ // 'GetIteratorWithFeedback' builtin. This would reduce the size of the
+ // compiled code as it would insert 1 call to the builtin instead of 2 calls
+ // resulting from the generic lowering of the LoadNamed and Call operators.
+ UNREACHABLE();
}
void JSGenericLowering::LowerJSStoreProperty(Node* node) {
@@ -561,6 +562,10 @@ void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
}
}
+void JSGenericLowering::LowerJSGetTemplateObject(Node* node) {
+ UNREACHABLE(); // Eliminated in native context specialization.
+}
+
void JSGenericLowering::LowerJSCreateEmptyLiteralArray(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
FeedbackParameter const& p = FeedbackParameterOf(node->op());
diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc
index 05048f7f4b..9a725eb4e9 100644
--- a/deps/v8/src/compiler/js-heap-broker.cc
+++ b/deps/v8/src/compiler/js-heap-broker.cc
@@ -16,6 +16,7 @@
#include "src/compiler/bytecode-analysis.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/per-isolate-compiler-cache.h"
+#include "src/execution/protectors-inl.h"
#include "src/init/bootstrapper.h"
#include "src/objects/allocation-site-inl.h"
#include "src/objects/api-callbacks.h"
@@ -86,6 +87,11 @@ class ObjectData : public ZoneObject {
ObjectDataKind kind() const { return kind_; }
bool is_smi() const { return kind_ == kSmi; }
+#ifdef DEBUG
+ enum class Usage{kUnused, kOnlyIdentityUsed, kDataUsed};
+ mutable Usage used_status = Usage::kUnused;
+#endif // DEBUG
+
private:
Handle<Object> const object_;
ObjectDataKind const kind_;
@@ -420,7 +426,7 @@ class JSTypedArrayData : public JSObjectData {
bool is_on_heap() const { return is_on_heap_; }
size_t length() const { return length_; }
- void* external_pointer() const { return external_pointer_; }
+ void* data_ptr() const { return data_ptr_; }
void Serialize(JSHeapBroker* broker);
bool serialized() const { return serialized_; }
@@ -430,7 +436,7 @@ class JSTypedArrayData : public JSObjectData {
private:
bool const is_on_heap_;
size_t const length_;
- void* const external_pointer_;
+ void* const data_ptr_;
bool serialized_ = false;
HeapObjectData* buffer_ = nullptr;
@@ -441,7 +447,7 @@ JSTypedArrayData::JSTypedArrayData(JSHeapBroker* broker, ObjectData** storage,
: JSObjectData(broker, storage, object),
is_on_heap_(object->is_on_heap()),
length_(object->length()),
- external_pointer_(object->external_pointer()) {}
+ data_ptr_(object->DataPtr()) {}
void JSTypedArrayData::Serialize(JSHeapBroker* broker) {
if (serialized_) return;
@@ -833,8 +839,7 @@ bool IsFastLiteralHelper(Handle<JSObject> boilerplate, int max_depth,
// Check the in-object properties.
Handle<DescriptorArray> descriptors(boilerplate->map().instance_descriptors(),
isolate);
- int limit = boilerplate->map().NumberOfOwnDescriptors();
- for (int i = 0; i < limit; i++) {
+ for (InternalIndex i : boilerplate->map().IterateOwnDescriptors()) {
PropertyDetails details = descriptors->GetDetails(i);
if (details.location() != kField) continue;
DCHECK_EQ(kData, details.kind());
@@ -962,9 +967,6 @@ class MapData : public HeapObjectData {
bool supports_fast_array_resize() const {
return supports_fast_array_resize_;
}
- bool IsMapOfTargetGlobalProxy() const {
- return is_map_of_target_global_proxy_;
- }
bool is_abandoned_prototype_map() const {
return is_abandoned_prototype_map_;
}
@@ -979,9 +981,10 @@ class MapData : public HeapObjectData {
// Serialize a single (or all) own slot(s) of the descriptor array and recurse
// on field owner(s).
- void SerializeOwnDescriptor(JSHeapBroker* broker, int descriptor_index);
+ void SerializeOwnDescriptor(JSHeapBroker* broker,
+ InternalIndex descriptor_index);
void SerializeOwnDescriptors(JSHeapBroker* broker);
- ObjectData* GetStrongValue(int descriptor_index) const;
+ ObjectData* GetStrongValue(InternalIndex descriptor_index) const;
DescriptorArrayData* instance_descriptors() const {
return instance_descriptors_;
}
@@ -1027,7 +1030,6 @@ class MapData : public HeapObjectData {
int const unused_property_fields_;
bool const supports_fast_array_iteration_;
bool const supports_fast_array_resize_;
- bool const is_map_of_target_global_proxy_;
bool const is_abandoned_prototype_map_;
bool serialized_elements_kind_generalizations_ = false;
@@ -1109,8 +1111,9 @@ bool IsReadOnlyLengthDescriptor(Isolate* isolate, Handle<Map> jsarray_map) {
DCHECK(!jsarray_map->is_dictionary_map());
Handle<Name> length_string = isolate->factory()->length_string();
DescriptorArray descriptors = jsarray_map->instance_descriptors();
- int number = descriptors.Search(*length_string, *jsarray_map);
- DCHECK_NE(DescriptorArray::kNotFound, number);
+ // TODO(jkummerow): We could skip the search and hardcode number == 0.
+ InternalIndex number = descriptors.Search(*length_string, *jsarray_map);
+ DCHECK(number.is_found());
return descriptors.GetDetails(number).IsReadOnly();
}
@@ -1120,7 +1123,7 @@ bool SupportsFastArrayIteration(Isolate* isolate, Handle<Map> map) {
map->prototype().IsJSArray() &&
isolate->IsAnyInitialArrayPrototype(
handle(JSArray::cast(map->prototype()), isolate)) &&
- isolate->IsNoElementsProtectorIntact();
+ Protectors::IsNoElementsIntact(isolate);
}
bool SupportsFastArrayResize(Isolate* isolate, Handle<Map> map) {
@@ -1154,8 +1157,6 @@ MapData::MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object)
SupportsFastArrayIteration(broker->isolate(), object)),
supports_fast_array_resize_(
SupportsFastArrayResize(broker->isolate(), object)),
- is_map_of_target_global_proxy_(
- object->IsMapOfGlobalProxy(broker->target_native_context().object())),
is_abandoned_prototype_map_(object->is_abandoned_prototype_map()),
elements_kind_generalizations_(broker->zone()) {}
@@ -1268,7 +1269,6 @@ class FeedbackVectorData : public HeapObjectData {
double invocation_count() const { return invocation_count_; }
void Serialize(JSHeapBroker* broker);
- const ZoneVector<ObjectData*>& feedback() { return feedback_; }
FeedbackCellData* GetClosureFeedbackCell(JSHeapBroker* broker,
int index) const;
@@ -1276,7 +1276,6 @@ class FeedbackVectorData : public HeapObjectData {
double const invocation_count_;
bool serialized_ = false;
- ZoneVector<ObjectData*> feedback_;
ZoneVector<ObjectData*> closure_feedback_cell_array_;
};
@@ -1285,7 +1284,6 @@ FeedbackVectorData::FeedbackVectorData(JSHeapBroker* broker,
Handle<FeedbackVector> object)
: HeapObjectData(broker, storage, object),
invocation_count_(object->invocation_count()),
- feedback_(broker->zone()),
closure_feedback_cell_array_(broker->zone()) {}
FeedbackCellData* FeedbackVectorData::GetClosureFeedbackCell(
@@ -1309,26 +1307,6 @@ void FeedbackVectorData::Serialize(JSHeapBroker* broker) {
TraceScope tracer(broker, this, "FeedbackVectorData::Serialize");
Handle<FeedbackVector> vector = Handle<FeedbackVector>::cast(object());
- DCHECK(feedback_.empty());
- feedback_.reserve(vector->length());
- for (int i = 0; i < vector->length(); ++i) {
- MaybeObject value = vector->get(i);
- ObjectData* slot_value =
- value->IsObject() ? broker->GetOrCreateData(value->cast<Object>())
- : nullptr;
- feedback_.push_back(slot_value);
- if (slot_value == nullptr) continue;
-
- if (slot_value->IsAllocationSite() &&
- slot_value->AsAllocationSite()->IsFastLiteral()) {
- slot_value->AsAllocationSite()->SerializeBoilerplate(broker);
- } else if (slot_value->IsJSRegExp()) {
- slot_value->AsJSRegExp()->SerializeAsRegExpBoilerplate(broker);
- }
- }
- DCHECK_EQ(vector->length(), feedback_.size());
- TRACE(broker, "Copied " << feedback_.size() << " slots");
-
DCHECK(closure_feedback_cell_array_.empty());
int length = vector->closure_feedback_cell_array().length();
closure_feedback_cell_array_.reserve(length);
@@ -1496,10 +1474,6 @@ class BytecodeArrayData : public FixedArrayBaseData {
return *(Handle<Smi>::cast(constant_pool_[index]->object()));
}
- bool IsSerializedForCompilation() const {
- return is_serialized_for_compilation_;
- }
-
void SerializeForCompilation(JSHeapBroker* broker) {
if (is_serialized_for_compilation_) return;
@@ -1843,23 +1817,15 @@ class JSGlobalObjectData : public JSObjectData {
public:
JSGlobalObjectData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSGlobalObject> object);
-};
-
-JSGlobalObjectData::JSGlobalObjectData(JSHeapBroker* broker,
- ObjectData** storage,
- Handle<JSGlobalObject> object)
- : JSObjectData(broker, storage, object) {}
-
-class JSGlobalProxyData : public JSObjectData {
- public:
- JSGlobalProxyData(JSHeapBroker* broker, ObjectData** storage,
- Handle<JSGlobalProxy> object);
+ bool IsDetached() const { return is_detached_; }
PropertyCellData* GetPropertyCell(
JSHeapBroker* broker, NameData* name,
SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
private:
+ bool const is_detached_;
+
// Properties that either
// (1) are known to exist as property cells on the global object, or
// (2) are known not to (possibly they don't exist at all).
@@ -1867,9 +1833,22 @@ class JSGlobalProxyData : public JSObjectData {
ZoneVector<std::pair<NameData*, PropertyCellData*>> properties_;
};
+JSGlobalObjectData::JSGlobalObjectData(JSHeapBroker* broker,
+ ObjectData** storage,
+ Handle<JSGlobalObject> object)
+ : JSObjectData(broker, storage, object),
+ is_detached_(object->IsDetached()),
+ properties_(broker->zone()) {}
+
+class JSGlobalProxyData : public JSObjectData {
+ public:
+ JSGlobalProxyData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<JSGlobalProxy> object);
+};
+
JSGlobalProxyData::JSGlobalProxyData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSGlobalProxy> object)
- : JSObjectData(broker, storage, object), properties_(broker->zone()) {}
+ : JSObjectData(broker, storage, object) {}
namespace {
base::Optional<PropertyCellRef> GetPropertyCellFromHeap(JSHeapBroker* broker,
@@ -1888,7 +1867,7 @@ base::Optional<PropertyCellRef> GetPropertyCellFromHeap(JSHeapBroker* broker,
}
} // namespace
-PropertyCellData* JSGlobalProxyData::GetPropertyCell(
+PropertyCellData* JSGlobalObjectData::GetPropertyCell(
JSHeapBroker* broker, NameData* name, SerializationPolicy policy) {
CHECK_NOT_NULL(name);
for (auto const& p : properties_) {
@@ -1911,6 +1890,13 @@ PropertyCellData* JSGlobalProxyData::GetPropertyCell(
return result;
}
+class TemplateObjectDescriptionData : public HeapObjectData {
+ public:
+ TemplateObjectDescriptionData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<TemplateObjectDescription> object)
+ : HeapObjectData(broker, storage, object) {}
+};
+
class CodeData : public HeapObjectData {
public:
CodeData(JSHeapBroker* broker, ObjectData** storage, Handle<Code> object)
@@ -2001,20 +1987,20 @@ void MapData::SerializeOwnDescriptors(JSHeapBroker* broker) {
Handle<Map> map = Handle<Map>::cast(object());
int const number_of_own = map->NumberOfOwnDescriptors();
- for (int i = 0; i < number_of_own; ++i) {
+ for (InternalIndex i : InternalIndex::Range(number_of_own)) {
SerializeOwnDescriptor(broker, i);
}
}
-ObjectData* MapData::GetStrongValue(int descriptor_index) const {
- auto data = instance_descriptors_->contents().find(descriptor_index);
+ObjectData* MapData::GetStrongValue(InternalIndex descriptor_index) const {
+ auto data = instance_descriptors_->contents().find(descriptor_index.as_int());
if (data == instance_descriptors_->contents().end()) return nullptr;
return data->second.value;
}
void MapData::SerializeOwnDescriptor(JSHeapBroker* broker,
- int descriptor_index) {
+ InternalIndex descriptor_index) {
TraceScope tracer(broker, this, "MapData::SerializeOwnDescriptor");
Handle<Map> map = Handle<Map>::cast(object());
@@ -2025,8 +2011,8 @@ void MapData::SerializeOwnDescriptor(JSHeapBroker* broker,
ZoneMap<int, PropertyDescriptor>& contents =
instance_descriptors()->contents();
- CHECK_LT(descriptor_index, map->NumberOfOwnDescriptors());
- if (contents.find(descriptor_index) != contents.end()) return;
+ CHECK_LT(descriptor_index.as_int(), map->NumberOfOwnDescriptors());
+ if (contents.find(descriptor_index.as_int()) != contents.end()) return;
Isolate* const isolate = broker->isolate();
auto descriptors =
@@ -2051,14 +2037,14 @@ void MapData::SerializeOwnDescriptor(JSHeapBroker* broker,
broker->GetOrCreateData(descriptors->GetFieldType(descriptor_index));
d.is_unboxed_double_field = map->IsUnboxedDoubleField(d.field_index);
}
- contents[descriptor_index] = d;
+ contents[descriptor_index.as_int()] = d;
if (d.details.location() == kField) {
// Recurse on the owner map.
d.field_owner->SerializeOwnDescriptor(broker, descriptor_index);
}
- TRACE(broker, "Copied descriptor " << descriptor_index << " into "
+ TRACE(broker, "Copied descriptor " << descriptor_index.as_int() << " into "
<< instance_descriptors_ << " ("
<< contents.size() << " total)");
}
@@ -2146,8 +2132,7 @@ void JSObjectData::SerializeRecursiveAsBoilerplate(JSHeapBroker* broker,
// Check the in-object properties.
Handle<DescriptorArray> descriptors(boilerplate->map().instance_descriptors(),
isolate);
- int const limit = boilerplate->map().NumberOfOwnDescriptors();
- for (int i = 0; i < limit; i++) {
+ for (InternalIndex i : boilerplate->map().IterateOwnDescriptors()) {
PropertyDetails details = descriptors->GetDetails(i);
if (details.location() != kField) continue;
DCHECK_EQ(kData, details.kind());
@@ -2210,6 +2195,12 @@ void JSRegExpData::SerializeAsRegExpBoilerplate(JSHeapBroker* broker) {
}
bool ObjectRef::equals(const ObjectRef& other) const {
+#ifdef DEBUG
+ if (broker()->mode() == JSHeapBroker::kSerialized &&
+ data_->used_status == ObjectData::Usage::kUnused) {
+ data_->used_status = ObjectData::Usage::kOnlyIdentityUsed;
+ }
+#endif // DEBUG
return data_ == other.data_;
}
@@ -2269,7 +2260,7 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
TRACE(this, "Constructing heap broker");
}
-std::ostream& JSHeapBroker::Trace() {
+std::ostream& JSHeapBroker::Trace() const {
return trace_out_ << "[" << this << "] "
<< std::string(trace_indentation_ * 2, ' ');
}
@@ -2280,10 +2271,92 @@ void JSHeapBroker::StopSerializing() {
mode_ = kSerialized;
}
+#ifdef DEBUG
+void JSHeapBroker::PrintRefsAnalysis() const {
+ // Usage counts
+ size_t used_total = 0, unused_total = 0, identity_used_total = 0;
+ for (RefsMap::Entry* ref = refs_->Start(); ref != nullptr;
+ ref = refs_->Next(ref)) {
+ switch (ref->value->used_status) {
+ case ObjectData::Usage::kUnused:
+ ++unused_total;
+ break;
+ case ObjectData::Usage::kOnlyIdentityUsed:
+ ++identity_used_total;
+ break;
+ case ObjectData::Usage::kDataUsed:
+ ++used_total;
+ break;
+ }
+ }
+
+ // Ref types analysis
+ TRACE_BROKER_MEMORY(
+ this, "Refs: " << refs_->occupancy() << "; data used: " << used_total
+ << "; only identity used: " << identity_used_total
+ << "; unused: " << unused_total);
+ size_t used_smis = 0, unused_smis = 0, identity_used_smis = 0;
+ size_t used[LAST_TYPE + 1] = {0};
+ size_t unused[LAST_TYPE + 1] = {0};
+ size_t identity_used[LAST_TYPE + 1] = {0};
+ for (RefsMap::Entry* ref = refs_->Start(); ref != nullptr;
+ ref = refs_->Next(ref)) {
+ if (ref->value->is_smi()) {
+ switch (ref->value->used_status) {
+ case ObjectData::Usage::kUnused:
+ ++unused_smis;
+ break;
+ case ObjectData::Usage::kOnlyIdentityUsed:
+ ++identity_used_smis;
+ break;
+ case ObjectData::Usage::kDataUsed:
+ ++used_smis;
+ break;
+ }
+ } else {
+ InstanceType instance_type =
+ static_cast<const HeapObjectData*>(ref->value)
+ ->map()
+ ->instance_type();
+ CHECK_LE(FIRST_TYPE, instance_type);
+ CHECK_LE(instance_type, LAST_TYPE);
+ switch (ref->value->used_status) {
+ case ObjectData::Usage::kUnused:
+ ++unused[instance_type];
+ break;
+ case ObjectData::Usage::kOnlyIdentityUsed:
+ ++identity_used[instance_type];
+ break;
+ case ObjectData::Usage::kDataUsed:
+ ++used[instance_type];
+ break;
+ }
+ }
+ }
+
+ TRACE_BROKER_MEMORY(
+ this, "Smis: " << used_smis + identity_used_smis + unused_smis
+ << "; data used: " << used_smis << "; only identity used: "
+ << identity_used_smis << "; unused: " << unused_smis);
+ for (uint16_t i = FIRST_TYPE; i <= LAST_TYPE; ++i) {
+ size_t total = used[i] + identity_used[i] + unused[i];
+ if (total == 0) continue;
+ TRACE_BROKER_MEMORY(
+ this, InstanceType(i) << ": " << total << "; data used: " << used[i]
+ << "; only identity used: " << identity_used[i]
+ << "; unused: " << unused[i]);
+ }
+}
+#endif // DEBUG
+
void JSHeapBroker::Retire() {
CHECK_EQ(mode_, kSerialized);
TRACE(this, "Retiring");
mode_ = kRetired;
+
+#ifdef DEBUG
+ PrintRefsAnalysis();
+#endif // DEBUG
}
bool JSHeapBroker::SerializingAllowed() const { return mode() == kSerializing; }
@@ -2473,6 +2546,7 @@ void JSHeapBroker::InitializeAndStartSerializing(
GetOrCreateData(f->empty_fixed_array());
GetOrCreateData(f->empty_string());
GetOrCreateData(f->eval_context_map());
+ GetOrCreateData(f->exec_string());
GetOrCreateData(f->false_string());
GetOrCreateData(f->false_value());
GetOrCreateData(f->fixed_array_map());
@@ -2480,11 +2554,13 @@ void JSHeapBroker::InitializeAndStartSerializing(
GetOrCreateData(f->fixed_double_array_map());
GetOrCreateData(f->function_context_map());
GetOrCreateData(f->function_string());
+ GetOrCreateData(f->has_instance_symbol());
GetOrCreateData(f->heap_number_map());
GetOrCreateData(f->length_string());
GetOrCreateData(f->many_closures_cell_map());
GetOrCreateData(f->minus_zero_value());
GetOrCreateData(f->name_dictionary_map());
+ GetOrCreateData(f->name_string());
GetOrCreateData(f->NaN_string());
GetOrCreateData(f->null_map());
GetOrCreateData(f->null_string());
@@ -2495,6 +2571,7 @@ void JSHeapBroker::InitializeAndStartSerializing(
GetOrCreateData(f->optimized_out());
GetOrCreateData(f->optimized_out_map());
GetOrCreateData(f->property_array_map());
+ GetOrCreateData(f->prototype_string());
GetOrCreateData(f->ReflectHas_string());
GetOrCreateData(f->ReflectGet_string());
GetOrCreateData(f->sloppy_arguments_elements_map());
@@ -2505,6 +2582,7 @@ void JSHeapBroker::InitializeAndStartSerializing(
GetOrCreateData(f->termination_exception_map());
GetOrCreateData(f->the_hole_map());
GetOrCreateData(f->the_hole_value());
+ GetOrCreateData(f->then_string());
GetOrCreateData(f->true_string());
GetOrCreateData(f->true_value());
GetOrCreateData(f->undefined_map());
@@ -2517,7 +2595,9 @@ void JSHeapBroker::InitializeAndStartSerializing(
GetOrCreateData(f->array_buffer_detaching_protector())
->AsPropertyCell()
->Serialize(this);
- GetOrCreateData(f->array_constructor_protector())->AsCell()->Serialize(this);
+ GetOrCreateData(f->array_constructor_protector())
+ ->AsPropertyCell()
+ ->Serialize(this);
GetOrCreateData(f->array_iterator_protector())
->AsPropertyCell()
->Serialize(this);
@@ -2537,7 +2617,9 @@ void JSHeapBroker::InitializeAndStartSerializing(
GetOrCreateData(f->promise_then_protector())
->AsPropertyCell()
->Serialize(this);
- GetOrCreateData(f->string_length_protector())->AsCell()->Serialize(this);
+ GetOrCreateData(f->string_length_protector())
+ ->AsPropertyCell()
+ ->Serialize(this);
// - CEntry stub
GetOrCreateData(
CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs, kArgvOnStack, true));
@@ -2719,16 +2801,6 @@ bool MapRef::supports_fast_array_resize() const {
return data()->AsMap()->supports_fast_array_resize();
}
-bool MapRef::IsMapOfTargetGlobalProxy() const {
- if (broker()->mode() == JSHeapBroker::kDisabled) {
- AllowHandleDereference allow_handle_dereference;
- AllowHandleAllocation handle_allocation;
- return object()->IsMapOfGlobalProxy(
- broker()->target_native_context().object());
- }
- return data()->AsMap()->IsMapOfTargetGlobalProxy();
-}
-
int JSFunctionRef::InitialMapInstanceSizeWithMinSlack() const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleDereference allow_handle_dereference;
@@ -2785,18 +2857,6 @@ OddballType MapRef::oddball_type() const {
return OddballType::kOther;
}
-ObjectRef FeedbackVectorRef::get(FeedbackSlot slot) const {
- if (broker()->mode() == JSHeapBroker::kDisabled) {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference handle_dereference;
- Handle<Object> value(object()->Get(slot)->cast<Object>(),
- broker()->isolate());
- return ObjectRef(broker(), value);
- }
- int i = FeedbackVector::GetIndex(slot);
- return ObjectRef(broker(), data()->AsFeedbackVector()->feedback().at(i));
-}
-
FeedbackCellRef FeedbackVectorRef::GetClosureFeedbackCell(int index) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleAllocation handle_allocation;
@@ -2854,6 +2914,11 @@ bool AllocationSiteRef::IsFastLiteral() const {
return data()->AsAllocationSite()->IsFastLiteral();
}
+void AllocationSiteRef::SerializeBoilerplate() {
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsAllocationSite()->SerializeBoilerplate(broker());
+}
+
void JSObjectRef::SerializeElements() {
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsJSObject()->SerializeElements(broker());
@@ -2880,13 +2945,13 @@ void JSObjectRef::EnsureElementsTenured() {
CHECK(data()->AsJSObject()->cow_or_empty_elements_tenured());
}
-FieldIndex MapRef::GetFieldIndexFor(int descriptor_index) const {
+FieldIndex MapRef::GetFieldIndexFor(InternalIndex descriptor_index) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleDereference allow_handle_dereference;
return FieldIndex::ForDescriptor(*object(), descriptor_index);
}
DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
- return descriptors->contents().at(descriptor_index).field_index;
+ return descriptors->contents().at(descriptor_index.as_int()).field_index;
}
int MapRef::GetInObjectPropertyOffset(int i) const {
@@ -2897,16 +2962,17 @@ int MapRef::GetInObjectPropertyOffset(int i) const {
return (GetInObjectPropertiesStartInWords() + i) * kTaggedSize;
}
-PropertyDetails MapRef::GetPropertyDetails(int descriptor_index) const {
+PropertyDetails MapRef::GetPropertyDetails(
+ InternalIndex descriptor_index) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleDereference allow_handle_dereference;
return object()->instance_descriptors().GetDetails(descriptor_index);
}
DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
- return descriptors->contents().at(descriptor_index).details;
+ return descriptors->contents().at(descriptor_index.as_int()).details;
}
-NameRef MapRef::GetPropertyKey(int descriptor_index) const {
+NameRef MapRef::GetPropertyKey(InternalIndex descriptor_index) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleAllocation handle_allocation;
AllowHandleDereference allow_handle_dereference;
@@ -2916,7 +2982,8 @@ NameRef MapRef::GetPropertyKey(int descriptor_index) const {
broker()->isolate()));
}
DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
- return NameRef(broker(), descriptors->contents().at(descriptor_index).key);
+ return NameRef(broker(),
+ descriptors->contents().at(descriptor_index.as_int()).key);
}
bool MapRef::IsFixedCowArrayMap() const {
@@ -2926,10 +2993,10 @@ bool MapRef::IsFixedCowArrayMap() const {
}
bool MapRef::IsPrimitiveMap() const {
- return instance_type() <= LAST_PRIMITIVE_TYPE;
+ return instance_type() <= LAST_PRIMITIVE_HEAP_OBJECT_TYPE;
}
-MapRef MapRef::FindFieldOwner(int descriptor_index) const {
+MapRef MapRef::FindFieldOwner(InternalIndex descriptor_index) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleAllocation handle_allocation;
AllowHandleDereference allow_handle_dereference;
@@ -2939,11 +3006,12 @@ MapRef MapRef::FindFieldOwner(int descriptor_index) const {
return MapRef(broker(), owner);
}
DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
- return MapRef(broker(),
- descriptors->contents().at(descriptor_index).field_owner);
+ return MapRef(
+ broker(),
+ descriptors->contents().at(descriptor_index.as_int()).field_owner);
}
-ObjectRef MapRef::GetFieldType(int descriptor_index) const {
+ObjectRef MapRef::GetFieldType(InternalIndex descriptor_index) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleAllocation handle_allocation;
AllowHandleDereference allow_handle_dereference;
@@ -2953,18 +3021,21 @@ ObjectRef MapRef::GetFieldType(int descriptor_index) const {
return ObjectRef(broker(), field_type);
}
DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
- return ObjectRef(broker(),
- descriptors->contents().at(descriptor_index).field_type);
+ return ObjectRef(
+ broker(),
+ descriptors->contents().at(descriptor_index.as_int()).field_type);
}
-bool MapRef::IsUnboxedDoubleField(int descriptor_index) const {
+bool MapRef::IsUnboxedDoubleField(InternalIndex descriptor_index) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleDereference allow_handle_dereference;
return object()->IsUnboxedDoubleField(
FieldIndex::ForDescriptor(*object(), descriptor_index));
}
DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
- return descriptors->contents().at(descriptor_index).is_unboxed_double_field;
+ return descriptors->contents()
+ .at(descriptor_index.as_int())
+ .is_unboxed_double_field;
}
uint16_t StringRef::GetFirstChar() {
@@ -3074,11 +3145,6 @@ Smi BytecodeArrayRef::GetConstantAtIndexAsSmi(int index) const {
return data()->AsBytecodeArray()->GetConstantAtIndexAsSmi(index);
}
-bool BytecodeArrayRef::IsSerializedForCompilation() const {
- if (broker()->mode() == JSHeapBroker::kDisabled) return true;
- return data()->AsBytecodeArray()->IsSerializedForCompilation();
-}
-
void BytecodeArrayRef::SerializeForCompilation() {
if (broker()->mode() == JSHeapBroker::kDisabled) return;
data()->AsBytecodeArray()->SerializeForCompilation(broker());
@@ -3191,6 +3257,8 @@ BIMODAL_ACCESSOR(JSFunction, Object, prototype)
BIMODAL_ACCESSOR(JSFunction, SharedFunctionInfo, shared)
BIMODAL_ACCESSOR(JSFunction, FeedbackVector, feedback_vector)
+BIMODAL_ACCESSOR_C(JSGlobalObject, bool, IsDetached)
+
BIMODAL_ACCESSOR_C(JSTypedArray, bool, is_on_heap)
BIMODAL_ACCESSOR_C(JSTypedArray, size_t, length)
BIMODAL_ACCESSOR(JSTypedArray, HeapObject, buffer)
@@ -3345,7 +3413,7 @@ BIMODAL_ACCESSOR_C(String, int, length)
BIMODAL_ACCESSOR(FeedbackCell, HeapObject, value)
-ObjectRef MapRef::GetStrongValue(int descriptor_index) const {
+ObjectRef MapRef::GetStrongValue(InternalIndex descriptor_index) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleDereference allow_handle_dereference;
return ObjectRef(broker(),
@@ -3376,12 +3444,12 @@ base::Optional<MapRef> MapRef::FindRootMap() const {
return base::nullopt;
}
-void* JSTypedArrayRef::external_pointer() const {
+void* JSTypedArrayRef::data_ptr() const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleDereference allow_handle_dereference;
- return object()->external_pointer();
+ return object()->DataPtr();
}
- return data()->AsJSTypedArray()->external_pointer();
+ return data()->AsJSTypedArray()->data_ptr();
}
bool MapRef::IsInobjectSlackTrackingInProgress() const {
@@ -3774,12 +3842,37 @@ ObjectRef JSRegExpRef::source() const {
return ObjectRef(broker(), ObjectRef::data()->AsJSRegExp()->source());
}
-Handle<Object> ObjectRef::object() const { return data_->object(); }
+void JSRegExpRef::SerializeAsRegExpBoilerplate() {
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ JSObjectRef::data()->AsJSRegExp()->SerializeAsRegExpBoilerplate(broker());
+}
+Handle<Object> ObjectRef::object() const {
+#ifdef DEBUG
+ if (broker()->mode() == JSHeapBroker::kSerialized &&
+ data_->used_status == ObjectData::Usage::kUnused) {
+ data_->used_status = ObjectData::Usage::kOnlyIdentityUsed;
+ }
+#endif // DEBUG
+ return data_->object();
+}
+
+#ifdef DEBUG
#define DEF_OBJECT_GETTER(T) \
Handle<T> T##Ref::object() const { \
+ if (broker()->mode() == JSHeapBroker::kSerialized && \
+ data_->used_status == ObjectData::Usage::kUnused) { \
+ data_->used_status = ObjectData::Usage::kOnlyIdentityUsed; \
+ } \
return Handle<T>(reinterpret_cast<Address*>(data_->object().address())); \
}
+#else
+#define DEF_OBJECT_GETTER(T) \
+ Handle<T> T##Ref::object() const { \
+ return Handle<T>(reinterpret_cast<Address*>(data_->object().address())); \
+ }
+#endif // DEBUG
+
HEAP_BROKER_OBJECT_LIST(DEF_OBJECT_GETTER)
#undef DEF_OBJECT_GETTER
@@ -3791,7 +3884,12 @@ ObjectData* ObjectRef::data() const {
CHECK_NE(data_->kind(), kSerializedHeapObject);
return data_;
case JSHeapBroker::kSerializing:
+ CHECK_NE(data_->kind(), kUnserializedHeapObject);
+ return data_;
case JSHeapBroker::kSerialized:
+#ifdef DEBUG
+ data_->used_status = ObjectData::Usage::kDataUsed;
+#endif // DEBUG
CHECK_NE(data_->kind(), kUnserializedHeapObject);
return data_;
case JSHeapBroker::kRetired:
@@ -3857,60 +3955,50 @@ bool JSFunctionRef::serialized() const {
return data()->AsJSFunction()->serialized();
}
-bool JSFunctionRef::IsSerializedForCompilation() const {
- if (broker()->mode() == JSHeapBroker::kDisabled) {
- return handle(object()->shared(), broker()->isolate())->HasBytecodeArray();
- }
-
- // We get a crash if we try to access the shared() getter without
- // checking for `serialized` first. Also it's possible to have a
- // JSFunctionRef without a feedback vector.
- return serialized() && has_feedback_vector() &&
- shared().IsSerializedForCompilation(feedback_vector());
-}
-
JSArrayRef SharedFunctionInfoRef::GetTemplateObject(
- ObjectRef description, FeedbackVectorRef vector, FeedbackSlot slot,
+ TemplateObjectDescriptionRef description, FeedbackSource const& source,
SerializationPolicy policy) {
- // Look in the feedback vector for the array. A Smi indicates that it's
- // not yet cached here.
- ObjectRef candidate = vector.get(slot);
- if (!candidate.IsSmi()) {
- return candidate.AsJSArray();
+ // First, see if we have processed feedback from the vector, respecting
+ // the serialization policy.
+ ProcessedFeedback const& feedback =
+ policy == SerializationPolicy::kSerializeIfNeeded
+ ? broker()->ProcessFeedbackForTemplateObject(source)
+ : broker()->GetFeedbackForTemplateObject(source);
+
+ if (!feedback.IsInsufficient()) {
+ return feedback.AsTemplateObject().value();
}
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleAllocation handle_allocation;
AllowHandleDereference allow_handle_dereference;
- Handle<TemplateObjectDescription> tod =
- Handle<TemplateObjectDescription>::cast(description.object());
Handle<JSArray> template_object =
TemplateObjectDescription::GetTemplateObject(
- broker()->isolate(), broker()->target_native_context().object(),
- tod, object(), slot.ToInt());
+ isolate(), broker()->target_native_context().object(),
+ description.object(), object(), source.slot.ToInt());
return JSArrayRef(broker(), template_object);
}
- JSArrayData* array = data()->AsSharedFunctionInfo()->GetTemplateObject(slot);
+ JSArrayData* array =
+ data()->AsSharedFunctionInfo()->GetTemplateObject(source.slot);
if (array != nullptr) return JSArrayRef(broker(), array);
CHECK_EQ(policy, SerializationPolicy::kSerializeIfNeeded);
CHECK(broker()->SerializingAllowed());
- Handle<TemplateObjectDescription> tod =
- Handle<TemplateObjectDescription>::cast(description.object());
Handle<JSArray> template_object =
TemplateObjectDescription::GetTemplateObject(
- broker()->isolate(), broker()->target_native_context().object(), tod,
- object(), slot.ToInt());
+ broker()->isolate(), broker()->target_native_context().object(),
+ description.object(), object(), source.slot.ToInt());
array = broker()->GetOrCreateData(template_object)->AsJSArray();
- data()->AsSharedFunctionInfo()->SetTemplateObject(slot, array);
+ data()->AsSharedFunctionInfo()->SetTemplateObject(source.slot, array);
return JSArrayRef(broker(), array);
}
void SharedFunctionInfoRef::SetSerializedForCompilation(
FeedbackVectorRef feedback) {
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ CHECK(HasBytecodeArray());
data()->AsSharedFunctionInfo()->SetSerializedForCompilation(broker(),
feedback);
}
@@ -3937,7 +4025,7 @@ SharedFunctionInfoRef::function_template_info() const {
bool SharedFunctionInfoRef::IsSerializedForCompilation(
FeedbackVectorRef feedback) const {
- if (broker()->mode() == JSHeapBroker::kDisabled) return true;
+ if (broker()->mode() == JSHeapBroker::kDisabled) return HasBytecodeArray();
return data()->AsSharedFunctionInfo()->IsSerializedForCompilation(feedback);
}
@@ -3953,19 +4041,19 @@ void MapRef::SerializeOwnDescriptors() {
data()->AsMap()->SerializeOwnDescriptors(broker());
}
-void MapRef::SerializeOwnDescriptor(int descriptor_index) {
+void MapRef::SerializeOwnDescriptor(InternalIndex descriptor_index) {
if (broker()->mode() == JSHeapBroker::kDisabled) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsMap()->SerializeOwnDescriptor(broker(), descriptor_index);
}
-bool MapRef::serialized_own_descriptor(int descriptor_index) const {
- CHECK_LT(descriptor_index, NumberOfOwnDescriptors());
+bool MapRef::serialized_own_descriptor(InternalIndex descriptor_index) const {
+ CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
if (broker()->mode() == JSHeapBroker::kDisabled) return true;
DescriptorArrayData* desc_array_data =
data()->AsMap()->instance_descriptors();
if (!desc_array_data) return false;
- return desc_array_data->contents().find(descriptor_index) !=
+ return desc_array_data->contents().find(descriptor_index.as_int()) !=
desc_array_data->contents().end();
}
@@ -4027,14 +4115,14 @@ void FunctionTemplateInfoRef::SerializeCallCode() {
data()->AsFunctionTemplateInfo()->SerializeCallCode(broker());
}
-base::Optional<PropertyCellRef> JSGlobalProxyRef::GetPropertyCell(
+base::Optional<PropertyCellRef> JSGlobalObjectRef::GetPropertyCell(
NameRef const& name, SerializationPolicy policy) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
return GetPropertyCellFromHeap(broker(), name.object());
}
PropertyCellData* property_cell_data =
- data()->AsJSGlobalProxy()->GetPropertyCell(broker(),
- name.data()->AsName(), policy);
+ data()->AsJSGlobalObject()->GetPropertyCell(
+ broker(), name.data()->AsName(), policy);
if (property_cell_data == nullptr) return base::nullopt;
return PropertyCellRef(broker(), property_cell_data);
}
@@ -4115,7 +4203,6 @@ GlobalAccessFeedback::GlobalAccessFeedback(PropertyCellRef cell,
GlobalAccessFeedback::GlobalAccessFeedback(FeedbackSlotKind slot_kind)
: ProcessedFeedback(kGlobalAccess, slot_kind),
- cell_or_context_(base::nullopt),
index_and_immutable_(0 /* doesn't matter */) {
DCHECK(IsGlobalICKind(slot_kind));
}
@@ -4265,6 +4352,7 @@ void JSHeapBroker::SetFeedback(FeedbackSource const& source,
}
bool JSHeapBroker::HasFeedback(FeedbackSource const& source) const {
+ DCHECK(source.IsValid());
return feedback_.find(source) != feedback_.end();
}
@@ -4315,7 +4403,6 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForPropertyAccess(
MapHandles maps;
nexus.ExtractMaps(&maps);
- DCHECK_NE(nexus.ic_state(), PREMONOMORPHIC);
if (!maps.empty()) {
maps = GetRelevantReceiverMaps(isolate(), maps);
if (maps.empty()) return *new (zone()) InsufficientFeedback(kind);
@@ -4424,6 +4511,47 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForInstanceOf(
return *new (zone()) InstanceOfFeedback(optional_constructor, nexus.kind());
}
+ProcessedFeedback const& JSHeapBroker::ReadFeedbackForArrayOrObjectLiteral(
+ FeedbackSource const& source) {
+ FeedbackNexus nexus(source.vector, source.slot);
+ HeapObject object;
+ if (nexus.IsUninitialized() || !nexus.GetFeedback()->GetHeapObject(&object)) {
+ return *new (zone()) InsufficientFeedback(nexus.kind());
+ }
+
+ AllocationSiteRef site(this, handle(object, isolate()));
+ if (site.IsFastLiteral()) {
+ site.SerializeBoilerplate();
+ }
+
+ return *new (zone()) LiteralFeedback(site, nexus.kind());
+}
+
+ProcessedFeedback const& JSHeapBroker::ReadFeedbackForRegExpLiteral(
+ FeedbackSource const& source) {
+ FeedbackNexus nexus(source.vector, source.slot);
+ HeapObject object;
+ if (nexus.IsUninitialized() || !nexus.GetFeedback()->GetHeapObject(&object)) {
+ return *new (zone()) InsufficientFeedback(nexus.kind());
+ }
+
+ JSRegExpRef regexp(this, handle(object, isolate()));
+ regexp.SerializeAsRegExpBoilerplate();
+ return *new (zone()) RegExpLiteralFeedback(regexp, nexus.kind());
+}
+
+ProcessedFeedback const& JSHeapBroker::ReadFeedbackForTemplateObject(
+ FeedbackSource const& source) {
+ FeedbackNexus nexus(source.vector, source.slot);
+ HeapObject object;
+ if (nexus.IsUninitialized() || !nexus.GetFeedback()->GetHeapObject(&object)) {
+ return *new (zone()) InsufficientFeedback(nexus.kind());
+ }
+
+ JSArrayRef array(this, handle(object, isolate()));
+ return *new (zone()) TemplateObjectFeedback(array, nexus.kind());
+}
+
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForCall(
FeedbackSource const& source) {
FeedbackNexus nexus(source.vector, source.slot);
@@ -4495,6 +4623,50 @@ ProcessedFeedback const& JSHeapBroker::GetFeedbackForGlobalAccess(
: ProcessFeedbackForGlobalAccess(source);
}
+ProcessedFeedback const& JSHeapBroker::GetFeedbackForArrayOrObjectLiteral(
+ FeedbackSource const& source) {
+ return FLAG_concurrent_inlining
+ ? GetFeedback(source)
+ : ProcessFeedbackForArrayOrObjectLiteral(source);
+}
+
+ProcessedFeedback const& JSHeapBroker::GetFeedbackForRegExpLiteral(
+ FeedbackSource const& source) {
+ return FLAG_concurrent_inlining ? GetFeedback(source)
+ : ProcessFeedbackForRegExpLiteral(source);
+}
+
+ProcessedFeedback const& JSHeapBroker::GetFeedbackForTemplateObject(
+ FeedbackSource const& source) {
+ return FLAG_concurrent_inlining ? GetFeedback(source)
+ : ProcessFeedbackForTemplateObject(source);
+}
+
+ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForArrayOrObjectLiteral(
+ FeedbackSource const& source) {
+ if (HasFeedback(source)) return GetFeedback(source);
+ ProcessedFeedback const& feedback =
+ ReadFeedbackForArrayOrObjectLiteral(source);
+ SetFeedback(source, &feedback);
+ return feedback;
+}
+
+ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForRegExpLiteral(
+ FeedbackSource const& source) {
+ if (HasFeedback(source)) return GetFeedback(source);
+ ProcessedFeedback const& feedback = ReadFeedbackForRegExpLiteral(source);
+ SetFeedback(source, &feedback);
+ return feedback;
+}
+
+ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForTemplateObject(
+ FeedbackSource const& source) {
+ if (HasFeedback(source)) return GetFeedback(source);
+ ProcessedFeedback const& feedback = ReadFeedbackForTemplateObject(source);
+ SetFeedback(source, &feedback);
+ return feedback;
+}
+
ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForBinaryOperation(
FeedbackSource const& source) {
if (HasFeedback(source)) return GetFeedback(source);
@@ -4650,9 +4822,10 @@ void ElementAccessFeedback::AddGroup(TransitionGroup&& group) {
}
std::ostream& operator<<(std::ostream& os, const ObjectRef& ref) {
- if (ref.broker()->mode() == JSHeapBroker::kDisabled) {
- // If the broker is disabled we cannot be in a background thread so it's
- // safe to read the heap.
+ if (ref.broker()->mode() == JSHeapBroker::kDisabled ||
+ !FLAG_concurrent_recompilation) {
+ // We cannot be in a background thread so it's safe to read the heap.
+ AllowHandleDereference allow_handle_dereference;
return os << ref.data() << " {" << ref.object() << "}";
} else {
return os << ref.data();
@@ -4734,6 +4907,21 @@ NamedAccessFeedback const& ProcessedFeedback::AsNamedAccess() const {
return *static_cast<NamedAccessFeedback const*>(this);
}
+LiteralFeedback const& ProcessedFeedback::AsLiteral() const {
+ CHECK_EQ(kLiteral, kind());
+ return *static_cast<LiteralFeedback const*>(this);
+}
+
+RegExpLiteralFeedback const& ProcessedFeedback::AsRegExpLiteral() const {
+ CHECK_EQ(kRegExpLiteral, kind());
+ return *static_cast<RegExpLiteralFeedback const*>(this);
+}
+
+TemplateObjectFeedback const& ProcessedFeedback::AsTemplateObject() const {
+ CHECK_EQ(kTemplateObject, kind());
+ return *static_cast<TemplateObjectFeedback const*>(this);
+}
+
BytecodeAnalysis const& JSHeapBroker::GetBytecodeAnalysis(
Handle<BytecodeArray> bytecode_array, BailoutId osr_bailout_id,
bool analyze_liveness, SerializationPolicy policy) {
diff --git a/deps/v8/src/compiler/js-heap-broker.h b/deps/v8/src/compiler/js-heap-broker.h
index 8c2622bf48..c9667a2fed 100644
--- a/deps/v8/src/compiler/js-heap-broker.h
+++ b/deps/v8/src/compiler/js-heap-broker.h
@@ -34,6 +34,12 @@ std::ostream& operator<<(std::ostream& os, const ObjectRef& ref);
broker->Trace() << x << '\n'; \
} while (false)
+#define TRACE_BROKER_MEMORY(broker, x) \
+ do { \
+ if (broker->tracing_enabled() && FLAG_trace_heap_broker_memory) \
+ broker->Trace() << x << std::endl; \
+ } while (false)
+
#define TRACE_BROKER_MISSING(broker, x) \
do { \
if (broker->tracing_enabled()) \
@@ -86,6 +92,10 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
void Retire();
bool SerializingAllowed() const;
+#ifdef DEBUG
+ void PrintRefsAnalysis() const;
+#endif // DEBUG
+
// Returns nullptr iff handle unknown.
ObjectData* GetData(Handle<Object>) const;
// Never returns nullptr.
@@ -125,6 +135,12 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
FeedbackSource const& source);
ProcessedFeedback const& GetFeedbackForInstanceOf(
FeedbackSource const& source);
+ ProcessedFeedback const& GetFeedbackForArrayOrObjectLiteral(
+ FeedbackSource const& source);
+ ProcessedFeedback const& GetFeedbackForRegExpLiteral(
+ FeedbackSource const& source);
+ ProcessedFeedback const& GetFeedbackForTemplateObject(
+ FeedbackSource const& source);
ProcessedFeedback const& GetFeedbackForPropertyAccess(
FeedbackSource const& source, AccessMode mode,
base::Optional<NameRef> static_name);
@@ -143,6 +159,12 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
ProcessedFeedback const& ProcessFeedbackForPropertyAccess(
FeedbackSource const& source, AccessMode mode,
base::Optional<NameRef> static_name);
+ ProcessedFeedback const& ProcessFeedbackForArrayOrObjectLiteral(
+ FeedbackSource const& source);
+ ProcessedFeedback const& ProcessFeedbackForRegExpLiteral(
+ FeedbackSource const& source);
+ ProcessedFeedback const& ProcessFeedbackForTemplateObject(
+ FeedbackSource const& source);
bool FeedbackIsInsufficient(FeedbackSource const& source) const;
@@ -157,7 +179,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
StringRef GetTypedArrayStringTag(ElementsKind kind);
- std::ostream& Trace();
+ std::ostream& Trace() const;
void IncrementTracingIndentation();
void DecrementTracingIndentation();
@@ -182,6 +204,12 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
ProcessedFeedback const& ReadFeedbackForPropertyAccess(
FeedbackSource const& source, AccessMode mode,
base::Optional<NameRef> static_name);
+ ProcessedFeedback const& ReadFeedbackForArrayOrObjectLiteral(
+ FeedbackSource const& source);
+ ProcessedFeedback const& ReadFeedbackForRegExpLiteral(
+ FeedbackSource const& source);
+ ProcessedFeedback const& ReadFeedbackForTemplateObject(
+ FeedbackSource const& source);
void InitializeRefsMap();
void CollectArrayAndObjectPrototypes();
@@ -199,7 +227,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
array_and_object_prototypes_;
BrokerMode mode_ = kDisabled;
bool const tracing_enabled_;
- StdoutStream trace_out_;
+ mutable StdoutStream trace_out_;
unsigned trace_indentation_ = 0;
PerIsolateCompilerCache* compiler_cache_ = nullptr;
ZoneUnorderedMap<FeedbackSource, ProcessedFeedback const*,
diff --git a/deps/v8/src/compiler/js-heap-copy-reducer.cc b/deps/v8/src/compiler/js-heap-copy-reducer.cc
index bf4b79bf92..13bd6a1282 100644
--- a/deps/v8/src/compiler/js-heap-copy-reducer.cc
+++ b/deps/v8/src/compiler/js-heap-copy-reducer.cc
@@ -12,6 +12,7 @@
#include "src/heap/factory-inl.h"
#include "src/objects/map.h"
#include "src/objects/scope-info.h"
+#include "src/objects/template-objects.h"
namespace v8 {
namespace internal {
@@ -27,172 +28,145 @@ JSHeapBroker* JSHeapCopyReducer::broker() { return broker_; }
Reduction JSHeapCopyReducer::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kHeapConstant: {
- if (!FLAG_concurrent_inlining) {
- ObjectRef object(broker(), HeapConstantOf(node->op()));
- if (object.IsJSFunction()) object.AsJSFunction().Serialize();
- if (object.IsJSObject()) {
- object.AsJSObject().SerializeObjectCreateMap();
- }
- if (object.IsSourceTextModule()) {
- object.AsSourceTextModule().Serialize();
- }
+ ObjectRef object(broker(), HeapConstantOf(node->op()));
+ if (object.IsJSFunction()) object.AsJSFunction().Serialize();
+ if (object.IsJSObject()) {
+ object.AsJSObject().SerializeObjectCreateMap();
+ }
+ if (object.IsSourceTextModule()) {
+ object.AsSourceTextModule().Serialize();
}
break;
}
case IrOpcode::kJSCreateArray: {
- if (!FLAG_concurrent_inlining) {
- CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
- Handle<AllocationSite> site;
- if (p.site().ToHandle(&site)) AllocationSiteRef(broker(), site);
- }
+ CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
+ Handle<AllocationSite> site;
+ if (p.site().ToHandle(&site)) AllocationSiteRef(broker(), site);
break;
}
case IrOpcode::kJSCreateArguments: {
- if (!FLAG_concurrent_inlining) {
- Node* const frame_state = NodeProperties::GetFrameStateInput(node);
- FrameStateInfo state_info = FrameStateInfoOf(frame_state->op());
- SharedFunctionInfoRef shared(
- broker(), state_info.shared_info().ToHandleChecked());
- }
+ Node* const frame_state = NodeProperties::GetFrameStateInput(node);
+ FrameStateInfo state_info = FrameStateInfoOf(frame_state->op());
+ SharedFunctionInfoRef shared(broker(),
+ state_info.shared_info().ToHandleChecked());
break;
}
case IrOpcode::kJSCreateBlockContext: {
- if (!FLAG_concurrent_inlining) {
- ScopeInfoRef(broker(), ScopeInfoOf(node->op()));
- }
+ ScopeInfoRef(broker(), ScopeInfoOf(node->op()));
break;
}
case IrOpcode::kJSCreateBoundFunction: {
- if (!FLAG_concurrent_inlining) {
- CreateBoundFunctionParameters const& p =
- CreateBoundFunctionParametersOf(node->op());
- MapRef(broker(), p.map());
- }
+ CreateBoundFunctionParameters const& p =
+ CreateBoundFunctionParametersOf(node->op());
+ MapRef(broker(), p.map());
break;
}
case IrOpcode::kJSCreateCatchContext: {
- if (!FLAG_concurrent_inlining) {
- ScopeInfoRef(broker(), ScopeInfoOf(node->op()));
- }
+ ScopeInfoRef(broker(), ScopeInfoOf(node->op()));
break;
}
case IrOpcode::kJSCreateClosure: {
- if (!FLAG_concurrent_inlining) {
- CreateClosureParameters const& p =
- CreateClosureParametersOf(node->op());
- SharedFunctionInfoRef(broker(), p.shared_info());
- FeedbackCellRef(broker(), p.feedback_cell());
- HeapObjectRef(broker(), p.code());
- }
+ CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
+ SharedFunctionInfoRef(broker(), p.shared_info());
+ FeedbackCellRef(broker(), p.feedback_cell());
+ HeapObjectRef(broker(), p.code());
break;
}
case IrOpcode::kJSCreateEmptyLiteralArray: {
- if (!FLAG_concurrent_inlining) {
- FeedbackParameter const& p = FeedbackParameterOf(node->op());
- FeedbackVectorRef(broker(), p.feedback().vector).Serialize();
+ FeedbackParameter const& p = FeedbackParameterOf(node->op());
+ if (p.feedback().IsValid()) {
+ broker()->ProcessFeedbackForArrayOrObjectLiteral(p.feedback());
}
break;
}
case IrOpcode::kJSCreateFunctionContext: {
- if (!FLAG_concurrent_inlining) {
- CreateFunctionContextParameters const& p =
- CreateFunctionContextParametersOf(node->op());
- ScopeInfoRef(broker(), p.scope_info());
- }
+ CreateFunctionContextParameters const& p =
+ CreateFunctionContextParametersOf(node->op());
+ ScopeInfoRef(broker(), p.scope_info());
break;
}
case IrOpcode::kJSCreateLiteralArray:
case IrOpcode::kJSCreateLiteralObject: {
- if (!FLAG_concurrent_inlining) {
- CreateLiteralParameters const& p =
- CreateLiteralParametersOf(node->op());
- FeedbackVectorRef(broker(), p.feedback().vector).Serialize();
+ CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
+ if (p.feedback().IsValid()) {
+ broker()->ProcessFeedbackForArrayOrObjectLiteral(p.feedback());
}
break;
}
case IrOpcode::kJSCreateLiteralRegExp: {
- if (!FLAG_concurrent_inlining) {
- CreateLiteralParameters const& p =
- CreateLiteralParametersOf(node->op());
- FeedbackVectorRef(broker(), p.feedback().vector).Serialize();
+ CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
+ if (p.feedback().IsValid()) {
+ broker()->ProcessFeedbackForRegExpLiteral(p.feedback());
}
break;
}
+ case IrOpcode::kJSGetTemplateObject: {
+ GetTemplateObjectParameters const& p =
+ GetTemplateObjectParametersOf(node->op());
+ SharedFunctionInfoRef shared(broker(), p.shared());
+ TemplateObjectDescriptionRef description(broker(), p.description());
+ shared.GetTemplateObject(description, p.feedback(),
+ SerializationPolicy::kSerializeIfNeeded);
+ break;
+ }
case IrOpcode::kJSCreateWithContext: {
- if (!FLAG_concurrent_inlining) {
- ScopeInfoRef(broker(), ScopeInfoOf(node->op()));
- }
+ ScopeInfoRef(broker(), ScopeInfoOf(node->op()));
break;
}
case IrOpcode::kJSLoadNamed: {
- if (!FLAG_concurrent_inlining) {
- NamedAccess const& p = NamedAccessOf(node->op());
- NameRef name(broker(), p.name());
- if (p.feedback().IsValid()) {
- broker()->ProcessFeedbackForPropertyAccess(p.feedback(),
- AccessMode::kLoad, name);
- }
+ NamedAccess const& p = NamedAccessOf(node->op());
+ NameRef name(broker(), p.name());
+ if (p.feedback().IsValid()) {
+ broker()->ProcessFeedbackForPropertyAccess(p.feedback(),
+ AccessMode::kLoad, name);
}
break;
}
case IrOpcode::kJSStoreNamed: {
- if (!FLAG_concurrent_inlining) {
- NamedAccess const& p = NamedAccessOf(node->op());
- NameRef name(broker(), p.name());
- }
+ NamedAccess const& p = NamedAccessOf(node->op());
+ NameRef name(broker(), p.name());
break;
}
case IrOpcode::kStoreField:
case IrOpcode::kLoadField: {
- if (!FLAG_concurrent_inlining) {
- FieldAccess access = FieldAccessOf(node->op());
- Handle<Map> map_handle;
- if (access.map.ToHandle(&map_handle)) {
- MapRef(broker(), map_handle);
- }
- Handle<Name> name_handle;
- if (access.name.ToHandle(&name_handle)) {
- NameRef(broker(), name_handle);
- }
+ FieldAccess access = FieldAccessOf(node->op());
+ Handle<Map> map_handle;
+ if (access.map.ToHandle(&map_handle)) {
+ MapRef(broker(), map_handle);
+ }
+ Handle<Name> name_handle;
+ if (access.name.ToHandle(&name_handle)) {
+ NameRef(broker(), name_handle);
}
break;
}
case IrOpcode::kMapGuard: {
- if (!FLAG_concurrent_inlining) {
- ZoneHandleSet<Map> const& maps = MapGuardMapsOf(node->op());
- for (Handle<Map> map : maps) {
- MapRef(broker(), map);
- }
+ ZoneHandleSet<Map> const& maps = MapGuardMapsOf(node->op());
+ for (Handle<Map> map : maps) {
+ MapRef(broker(), map);
}
break;
}
case IrOpcode::kCheckMaps: {
- if (!FLAG_concurrent_inlining) {
- ZoneHandleSet<Map> const& maps =
- CheckMapsParametersOf(node->op()).maps();
- for (Handle<Map> map : maps) {
- MapRef(broker(), map);
- }
+ ZoneHandleSet<Map> const& maps = CheckMapsParametersOf(node->op()).maps();
+ for (Handle<Map> map : maps) {
+ MapRef(broker(), map);
}
break;
}
case IrOpcode::kCompareMaps: {
- if (!FLAG_concurrent_inlining) {
- ZoneHandleSet<Map> const& maps = CompareMapsParametersOf(node->op());
- for (Handle<Map> map : maps) {
- MapRef(broker(), map);
- }
+ ZoneHandleSet<Map> const& maps = CompareMapsParametersOf(node->op());
+ for (Handle<Map> map : maps) {
+ MapRef(broker(), map);
}
break;
}
case IrOpcode::kJSLoadProperty: {
- if (!FLAG_concurrent_inlining) {
- PropertyAccess const& p = PropertyAccessOf(node->op());
- AccessMode access_mode = AccessMode::kLoad;
- if (p.feedback().IsValid()) {
- broker()->ProcessFeedbackForPropertyAccess(p.feedback(), access_mode,
- base::nullopt);
- }
+ PropertyAccess const& p = PropertyAccessOf(node->op());
+ AccessMode access_mode = AccessMode::kLoad;
+ if (p.feedback().IsValid()) {
+ broker()->ProcessFeedbackForPropertyAccess(p.feedback(), access_mode,
+ base::nullopt);
}
break;
}
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index ae271b3af9..cc3f321d6b 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -22,9 +22,35 @@ namespace compiler {
} while (false)
namespace {
-bool IsSmall(BytecodeArrayRef bytecode) {
+bool IsSmall(BytecodeArrayRef const& bytecode) {
return bytecode.length() <= FLAG_max_inlined_bytecode_size_small;
}
+
+bool CanConsiderForInlining(JSHeapBroker* broker,
+ SharedFunctionInfoRef const& shared,
+ FeedbackVectorRef const& feedback_vector) {
+ if (!shared.IsInlineable()) return false;
+ DCHECK(shared.HasBytecodeArray());
+ if (!shared.IsSerializedForCompilation(feedback_vector)) {
+ TRACE_BROKER_MISSING(
+ broker, "data for " << shared << " (not serialized for compilation)");
+ return false;
+ }
+ return true;
+}
+
+bool CanConsiderForInlining(JSHeapBroker* broker,
+ JSFunctionRef const& function) {
+ if (!function.has_feedback_vector()) return false;
+ if (!function.serialized()) {
+ TRACE_BROKER_MISSING(
+ broker, "data for " << function << " (cannot consider for inlining)");
+ return false;
+ }
+ return CanConsiderForInlining(broker, function.shared(),
+ function.feedback_vector());
+}
+
} // namespace
JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
@@ -38,11 +64,11 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
if (m.HasValue() && m.Ref(broker()).IsJSFunction()) {
out.functions[0] = m.Ref(broker()).AsJSFunction();
JSFunctionRef function = out.functions[0].value();
- if (function.IsSerializedForCompilation()) {
+ if (CanConsiderForInlining(broker(), function)) {
out.bytecode[0] = function.shared().GetBytecodeArray();
+ out.num_functions = 1;
+ return out;
}
- out.num_functions = 1;
- return out;
}
if (m.IsPhi()) {
int const value_input_count = m.node()->op()->ValueInputCount();
@@ -59,7 +85,7 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
out.functions[n] = m.Ref(broker()).AsJSFunction();
JSFunctionRef function = out.functions[n].value();
- if (function.IsSerializedForCompilation()) {
+ if (CanConsiderForInlining(broker(), function)) {
out.bytecode[n] = function.shared().GetBytecodeArray();
}
}
@@ -67,11 +93,14 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
return out;
}
if (m.IsJSCreateClosure()) {
- CreateClosureParameters const& p = CreateClosureParametersOf(m.op());
DCHECK(!out.functions[0].has_value());
- out.shared_info = SharedFunctionInfoRef(broker(), p.shared_info());
- SharedFunctionInfoRef shared_info = out.shared_info.value();
- if (shared_info.HasBytecodeArray()) {
+ CreateClosureParameters const& p = CreateClosureParametersOf(m.op());
+ FeedbackCellRef feedback_cell(broker(), p.feedback_cell());
+ SharedFunctionInfoRef shared_info(broker(), p.shared_info());
+ out.shared_info = shared_info;
+ if (feedback_cell.value().IsFeedbackVector() &&
+ CanConsiderForInlining(broker(), shared_info,
+ feedback_cell.value().AsFeedbackVector())) {
out.bytecode[0] = shared_info.GetBytecodeArray();
}
out.num_functions = 1;
@@ -135,7 +164,8 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
SharedFunctionInfoRef shared = candidate.functions[i].has_value()
? candidate.functions[i].value().shared()
: candidate.shared_info.value();
- candidate.can_inline_function[i] = shared.IsInlineable();
+ candidate.can_inline_function[i] = candidate.bytecode[i].has_value();
+ CHECK_IMPLIES(candidate.can_inline_function[i], shared.IsInlineable());
// Do not allow direct recursion i.e. f() -> f(). We still allow indirect
// recurion like f() -> g() -> f(). The indirect recursion is helpful in
// cases where f() is a small dispatch function that calls the appropriate
@@ -151,14 +181,12 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
node->id(), node->op()->mnemonic());
candidate.can_inline_function[i] = false;
}
- // A function reaching this point should always have its bytecode
- // serialized.
- BytecodeArrayRef bytecode = candidate.bytecode[i].value();
if (candidate.can_inline_function[i]) {
can_inline_candidate = true;
+ BytecodeArrayRef bytecode = candidate.bytecode[i].value();
candidate.total_size += bytecode.length();
+ candidate_is_small = candidate_is_small && IsSmall(bytecode);
}
- candidate_is_small = candidate_is_small && IsSmall(bytecode);
}
if (!can_inline_candidate) return NoChange();
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index 51179f1956..6c071438cc 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -321,7 +321,7 @@ base::Optional<SharedFunctionInfoRef> JSInliner::DetermineCallTarget(
// TODO(turbofan): We might consider to eagerly create the feedback vector
// in such a case (in {DetermineCallContext} below) eventually.
- FeedbackCellRef cell(FeedbackCellRef(broker(), p.feedback_cell()));
+ FeedbackCellRef cell(broker(), p.feedback_cell());
if (!cell.value().IsFeedbackVector()) return base::nullopt;
return SharedFunctionInfoRef(broker(), p.shared_info());
@@ -413,11 +413,11 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
Node* exception_target = nullptr;
NodeProperties::IsExceptionalCall(node, &exception_target);
- // JSInliningHeuristic has already filtered candidates without a
- // BytecodeArray by calling SharedFunctionInfoRef::IsInlineable. For the ones
- // passing the IsInlineable check, The broker holds a reference to the
- // bytecode array, which prevents it from getting flushed.
- // Therefore, the following check should always hold true.
+ // JSInliningHeuristic has already filtered candidates without a BytecodeArray
+ // by calling SharedFunctionInfoRef::IsInlineable. For the ones passing the
+ // IsInlineable check, the broker holds a reference to the bytecode array,
+ // which prevents it from getting flushed. Therefore, the following check
+ // should always hold true.
CHECK(shared_info->is_compiled());
if (!FLAG_concurrent_inlining && info_->is_source_positions_enabled()) {
@@ -428,17 +428,10 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
TRACE("Inlining " << *shared_info << " into " << outer_shared_info
<< ((exception_target != nullptr) ? " (inside try-block)"
: ""));
- // Determine the targets feedback vector and its context.
+ // Determine the target's feedback vector and its context.
Node* context;
FeedbackVectorRef feedback_vector = DetermineCallContext(node, &context);
-
- if (FLAG_concurrent_inlining &&
- !shared_info->IsSerializedForCompilation(feedback_vector)) {
- // TODO(neis): Should this be a broker message?
- TRACE("Missed opportunity to inline a function ("
- << *shared_info << " with " << feedback_vector << ")");
- return NoChange();
- }
+ CHECK(shared_info->IsSerializedForCompilation(feedback_vector));
// ----------------------------------------------------------------
// After this point, we've made a decision to inline this function.
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index 9f950c808c..80c620034b 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -784,12 +784,15 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
Node* node, Node* receiver, Node* value, NameRef const& name,
AccessMode access_mode, Node* key) {
base::Optional<PropertyCellRef> cell =
- native_context().global_proxy_object().GetPropertyCell(name);
+ native_context().global_object().GetPropertyCell(name);
return cell.has_value() ? ReduceGlobalAccess(node, receiver, value, name,
access_mode, key, *cell)
: NoChange();
}
+// TODO(neis): Try to merge this with ReduceNamedAccess by introducing a new
+// PropertyAccessInfo kind for global accesses and using the existing mechanism
+// for building loads/stores.
Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
Node* node, Node* receiver, Node* value, NameRef const& name,
AccessMode access_mode, Node* key, PropertyCellRef const& property_cell) {
@@ -838,15 +841,16 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
effect = BuildCheckEqualsName(name, key, effect, control);
}
- // Check if we have a {receiver} to validate. If so, we need to check that
- // the {receiver} is actually the JSGlobalProxy for the native context that
- // we are specializing to.
+ // If we have a {receiver} to validate, we do so by checking that its map is
+ // the (target) global proxy's map. This guarantees that in fact the receiver
+ // is the global proxy.
if (receiver != nullptr) {
- Node* check = graph()->NewNode(simplified()->ReferenceEqual(), receiver,
- jsgraph()->HeapConstant(global_proxy()));
effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kReceiverNotAGlobalProxy),
- check, effect, control);
+ simplified()->CheckMaps(
+ CheckMapsFlag::kNone,
+ ZoneHandleSet<Map>(
+ HeapObjectRef(broker(), global_proxy()).map().object())),
+ receiver, effect, control);
}
if (access_mode == AccessMode::kLoad || access_mode == AccessMode::kHas) {
@@ -1050,28 +1054,6 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreGlobal(Node* node) {
}
}
-void JSNativeContextSpecialization::FilterMapsAndGetPropertyAccessInfos(
- NamedAccessFeedback const& feedback, AccessMode access_mode, Node* receiver,
- Node* effect, ZoneVector<PropertyAccessInfo>* access_infos) {
- ZoneVector<Handle<Map>> receiver_maps(zone());
-
- // Either infer maps from the graph or use the feedback.
- if (!InferReceiverMaps(receiver, effect, &receiver_maps)) {
- receiver_maps = feedback.maps();
- }
- RemoveImpossibleReceiverMaps(receiver, &receiver_maps);
-
- for (Handle<Map> map_handle : receiver_maps) {
- MapRef map(broker(), map_handle);
- if (map.is_deprecated()) continue;
- PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo(
- map, feedback.name(), access_mode, dependencies(),
- FLAG_concurrent_inlining ? SerializationPolicy::kAssumeSerialized
- : SerializationPolicy::kSerializeIfNeeded);
- access_infos->push_back(access_info);
- }
-}
-
Reduction JSNativeContextSpecialization::ReduceNamedAccess(
Node* node, Node* value, NamedAccessFeedback const& feedback,
AccessMode access_mode, Node* key) {
@@ -1081,36 +1063,54 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
node->opcode() == IrOpcode::kJSStoreProperty ||
node->opcode() == IrOpcode::kJSStoreNamedOwn ||
node->opcode() == IrOpcode::kJSStoreDataPropertyInLiteral ||
- node->opcode() == IrOpcode::kJSHasProperty ||
- node->opcode() == IrOpcode::kJSGetIterator);
+ node->opcode() == IrOpcode::kJSHasProperty);
Node* receiver = NodeProperties::GetValueInput(node, 0);
Node* context = NodeProperties::GetContextInput(node);
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- ZoneVector<PropertyAccessInfo> access_infos_for_feedback(zone());
- ZoneVector<PropertyAccessInfo> access_infos(zone());
- FilterMapsAndGetPropertyAccessInfos(feedback, access_mode, receiver, effect,
- &access_infos_for_feedback);
- AccessInfoFactory access_info_factory(broker(), dependencies(),
- graph()->zone());
- if (!access_info_factory.FinalizePropertyAccessInfos(
- access_infos_for_feedback, access_mode, &access_infos)) {
- return NoChange();
+ // Either infer maps from the graph or use the feedback.
+ ZoneVector<Handle<Map>> receiver_maps(zone());
+ if (!InferReceiverMaps(receiver, effect, &receiver_maps)) {
+ receiver_maps = feedback.maps();
}
+ RemoveImpossibleReceiverMaps(receiver, &receiver_maps);
- // Check if we have an access o.x or o.x=v where o is the current
- // native contexts' global proxy, and turn that into a direct access
- // to the current native context's global object instead.
- if (access_infos.size() == 1 && access_infos[0].receiver_maps().size() == 1) {
- MapRef receiver_map(broker(), access_infos[0].receiver_maps()[0]);
- if (receiver_map.IsMapOfTargetGlobalProxy()) {
+ // Check if we have an access o.x or o.x=v where o is the target native
+ // contexts' global proxy, and turn that into a direct access to the
+ // corresponding global object instead.
+ if (receiver_maps.size() == 1) {
+ MapRef receiver_map(broker(), receiver_maps[0]);
+ if (receiver_map.equals(
+ broker()->target_native_context().global_proxy_object().map()) &&
+ !broker()->target_native_context().global_object().IsDetached()) {
return ReduceGlobalAccess(node, receiver, value, feedback.name(),
access_mode, key);
}
}
+ ZoneVector<PropertyAccessInfo> access_infos(zone());
+ {
+ ZoneVector<PropertyAccessInfo> access_infos_for_feedback(zone());
+ for (Handle<Map> map_handle : receiver_maps) {
+ MapRef map(broker(), map_handle);
+ if (map.is_deprecated()) continue;
+ PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo(
+ map, feedback.name(), access_mode, dependencies(),
+ FLAG_concurrent_inlining ? SerializationPolicy::kAssumeSerialized
+ : SerializationPolicy::kSerializeIfNeeded);
+ access_infos_for_feedback.push_back(access_info);
+ }
+
+ AccessInfoFactory access_info_factory(broker(), dependencies(),
+ graph()->zone());
+ if (!access_info_factory.FinalizePropertyAccessInfos(
+ access_infos_for_feedback, access_mode, &access_infos)) {
+ return NoChange();
+ }
+ }
+
// Ensure that {key} matches the specified name (if {key} is given).
if (key != nullptr) {
effect = BuildCheckEqualsName(feedback.name(), key, effect, control);
@@ -1332,24 +1332,6 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
return Replace(value);
}
-Reduction JSNativeContextSpecialization::ReduceNamedAccessFromNexus(
- Node* node, Node* value, FeedbackSource const& source, NameRef const& name,
- AccessMode access_mode) {
- DCHECK(node->opcode() == IrOpcode::kJSLoadNamed ||
- node->opcode() == IrOpcode::kJSStoreNamed ||
- node->opcode() == IrOpcode::kJSStoreNamedOwn);
- Node* const receiver = NodeProperties::GetValueInput(node, 0);
-
- // Optimize accesses to the current native context's global proxy.
- HeapObjectMatcher m(receiver);
- if (m.HasValue() &&
- m.Ref(broker()).equals(native_context().global_proxy_object())) {
- return ReduceGlobalAccess(node, nullptr, value, name, access_mode);
- }
-
- return ReducePropertyAccess(node, nullptr, name, value, source, access_mode);
-}
-
Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode());
NamedAccess const& p = NamedAccessOf(node->op());
@@ -1388,18 +1370,134 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
}
if (!p.feedback().IsValid()) return NoChange();
- return ReduceNamedAccessFromNexus(node, jsgraph()->Dead(),
- FeedbackSource(p.feedback()), name,
- AccessMode::kLoad);
+ return ReducePropertyAccess(node, nullptr, name, jsgraph()->Dead(),
+ FeedbackSource(p.feedback()), AccessMode::kLoad);
}
Reduction JSNativeContextSpecialization::ReduceJSGetIterator(Node* node) {
DCHECK_EQ(IrOpcode::kJSGetIterator, node->opcode());
- PropertyAccess const& p = PropertyAccessOf(node->op());
- NameRef name(broker(), factory()->iterator_symbol());
+ GetIteratorParameters const& p = GetIteratorParametersOf(node->op());
- return ReducePropertyAccess(node, nullptr, name, jsgraph()->Dead(),
- FeedbackSource(p.feedback()), AccessMode::kLoad);
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ Node* iterator_exception_node = nullptr;
+ Node* if_exception_merge = nullptr;
+ Node* if_exception_effect_phi = nullptr;
+ Node* if_exception_phi = nullptr;
+ bool has_exception_node =
+ NodeProperties::IsExceptionalCall(node, &iterator_exception_node);
+ if (has_exception_node) {
+ // If there exists an IfException node for the current {node}, we need
+ // exception handling for all the desugared nodes. Create a combination
+ // of Merge+Phi+EffectPhi nodes that consumes the exception paths from
+ // from all the desugared nodes including the original exception node.
+ // Usages of the original exception node are then rewired to the newly
+ // created combination of Merge+Phi+EffectPhi. Here, use dead_node as a
+ // placeholder for the original exception node until its uses are rewired.
+
+ Node* dead_node = jsgraph()->Dead();
+ if_exception_merge = graph()->NewNode(common()->Merge(1), dead_node);
+ if_exception_effect_phi =
+ graph()->NewNode(common()->EffectPhi(1), dead_node, if_exception_merge);
+ if_exception_phi =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 1),
+ dead_node, if_exception_merge);
+ ReplaceWithValue(iterator_exception_node, if_exception_phi,
+ if_exception_effect_phi, if_exception_merge);
+ if_exception_merge->ReplaceInput(0, iterator_exception_node);
+ if_exception_effect_phi->ReplaceInput(0, iterator_exception_node);
+ if_exception_phi->ReplaceInput(0, iterator_exception_node);
+ }
+
+ // Load iterator property operator
+ Handle<Name> iterator_symbol = factory()->iterator_symbol();
+ const Operator* load_op =
+ javascript()->LoadNamed(iterator_symbol, p.loadFeedback());
+
+ // Lazy deopt of the load iterator property
+ Node* call_slot = jsgraph()->SmiConstant(p.callFeedback().slot.ToInt());
+ Node* call_feedback = jsgraph()->HeapConstant(p.callFeedback().vector);
+ Node* lazy_deopt_parameters[] = {receiver, call_slot, call_feedback};
+ Node* lazy_deopt_frame_state = CreateStubBuiltinContinuationFrameState(
+ jsgraph(), Builtins::kGetIteratorWithFeedbackLazyDeoptContinuation,
+ context, lazy_deopt_parameters, arraysize(lazy_deopt_parameters),
+ frame_state, ContinuationFrameStateMode::LAZY);
+ Node* load_property = graph()->NewNode(
+ load_op, receiver, context, lazy_deopt_frame_state, effect, control);
+ effect = load_property;
+ control = load_property;
+
+ // Handle exception path for the load named property
+ if (has_exception_node) {
+ control =
+ AppendExceptionHandling(effect, control, if_exception_merge,
+ if_exception_phi, if_exception_effect_phi);
+ }
+
+ // Eager deopt of call iterator property
+ Node* parameters[] = {receiver, load_property, call_slot, call_feedback};
+ Node* eager_deopt_frame_state = CreateStubBuiltinContinuationFrameState(
+ jsgraph(), Builtins::kCallIteratorWithFeedback, context, parameters,
+ arraysize(parameters), frame_state, ContinuationFrameStateMode::EAGER);
+ Node* deopt_checkpoint = graph()->NewNode(
+ common()->Checkpoint(), eager_deopt_frame_state, effect, control);
+ effect = deopt_checkpoint;
+
+ // Call iterator property operator
+ ProcessedFeedback const& feedback =
+ broker()->GetFeedbackForCall(p.callFeedback());
+ SpeculationMode mode = feedback.IsInsufficient()
+ ? SpeculationMode::kDisallowSpeculation
+ : feedback.AsCall().speculation_mode();
+ const Operator* call_op =
+ javascript()->Call(2, CallFrequency(), p.callFeedback(),
+ ConvertReceiverMode::kNotNullOrUndefined, mode);
+ Node* call_property = graph()->NewNode(call_op, load_property, receiver,
+ context, frame_state, effect, control);
+ effect = call_property;
+ control = call_property;
+ if (has_exception_node) {
+ control =
+ AppendExceptionHandling(effect, control, if_exception_merge,
+ if_exception_phi, if_exception_effect_phi);
+ }
+
+ // Check if the call property returns a valid JSReceiver else throw an invalid
+ // iterator runtime exception
+ Node* is_receiver =
+ graph()->NewNode(simplified()->ObjectIsReceiver(), call_property);
+ Node* branch_node = graph()->NewNode(
+ common()->Branch(BranchHint::kNone, IsSafetyCheck::kNoSafetyCheck),
+ is_receiver, control);
+ {
+ // Create a version of effect and control for the false path of the branch
+ Node* effect = call_property;
+ Node* control = call_property;
+ Node* if_not_receiver = graph()->NewNode(common()->IfFalse(), branch_node);
+ control = if_not_receiver;
+ const Operator* call_runtime_op =
+ javascript()->CallRuntime(Runtime::kThrowSymbolIteratorInvalid, 0);
+ Node* call_runtime = graph()->NewNode(call_runtime_op, context, frame_state,
+ effect, control);
+ control = call_runtime;
+ effect = call_runtime;
+ if (has_exception_node) {
+ control =
+ AppendExceptionHandling(effect, control, if_exception_merge,
+ if_exception_phi, if_exception_effect_phi);
+ }
+ Node* throw_node =
+ graph()->NewNode(common()->Throw(), call_runtime, control);
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
+ }
+
+ Node* if_receiver = graph()->NewNode(common()->IfTrue(), branch_node);
+ ReplaceWithValue(node, call_property, effect, if_receiver);
+ return Replace(if_receiver);
}
Reduction JSNativeContextSpecialization::ReduceJSStoreNamed(Node* node) {
@@ -1408,9 +1506,8 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreNamed(Node* node) {
Node* const value = NodeProperties::GetValueInput(node, 1);
if (!p.feedback().IsValid()) return NoChange();
- return ReduceNamedAccessFromNexus(node, value, FeedbackSource(p.feedback()),
- NameRef(broker(), p.name()),
- AccessMode::kStore);
+ return ReducePropertyAccess(node, nullptr, NameRef(broker(), p.name()), value,
+ FeedbackSource(p.feedback()), AccessMode::kStore);
}
Reduction JSNativeContextSpecialization::ReduceJSStoreNamedOwn(Node* node) {
@@ -1419,9 +1516,9 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreNamedOwn(Node* node) {
Node* const value = NodeProperties::GetValueInput(node, 1);
if (!p.feedback().IsValid()) return NoChange();
- return ReduceNamedAccessFromNexus(node, value, FeedbackSource(p.feedback()),
- NameRef(broker(), p.name()),
- AccessMode::kStoreInLiteral);
+ return ReducePropertyAccess(node, nullptr, NameRef(broker(), p.name()), value,
+ FeedbackSource(p.feedback()),
+ AccessMode::kStoreInLiteral);
}
Reduction JSNativeContextSpecialization::ReduceElementAccessOnString(
@@ -1578,9 +1675,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
// NoElementsProtector.
for (ElementAccessInfo const& access_info : access_infos) {
if (IsFastElementsKind(access_info.elements_kind())) {
- if (!isolate()->IsNoElementsProtectorIntact()) return NoChange();
- dependencies()->DependOnProtector(
- PropertyCellRef(broker(), factory()->no_elements_protector()));
+ if (!dependencies()->DependOnNoElementsProtector()) return NoChange();
break;
}
}
@@ -1819,8 +1914,7 @@ Reduction JSNativeContextSpecialization::ReducePropertyAccess(
node->opcode() == IrOpcode::kJSHasProperty ||
node->opcode() == IrOpcode::kJSLoadNamed ||
node->opcode() == IrOpcode::kJSStoreNamed ||
- node->opcode() == IrOpcode::kJSStoreNamedOwn ||
- node->opcode() == IrOpcode::kJSGetIterator);
+ node->opcode() == IrOpcode::kJSStoreNamedOwn);
DCHECK_GE(node->op()->ControlOutputCount(), 1);
ProcessedFeedback const& feedback =
@@ -2499,12 +2593,14 @@ JSNativeContextSpecialization::BuildElementAccess(
if (typed_array.has_value()) {
length = jsgraph()->Constant(static_cast<double>(typed_array->length()));
- // Load the (known) base and external pointer for the {receiver}. The
- // {external_pointer} might be invalid if the {buffer} was detached, so
- // we need to make sure that any access is properly guarded.
+ DCHECK(!typed_array->is_on_heap());
+ // Load the (known) data pointer for the {receiver} and set {base_pointer}
+ // and {external_pointer} to the values that will allow to generate typed
+ // element accesses using the known data pointer.
+ // The data pointer might be invalid if the {buffer} was detached,
+ // so we need to make sure that any access is properly guarded.
base_pointer = jsgraph()->ZeroConstant();
- external_pointer =
- jsgraph()->PointerConstant(typed_array->external_pointer());
+ external_pointer = jsgraph()->PointerConstant(typed_array->data_ptr());
} else {
// Load the {receiver}s length.
length = effect = graph()->NewNode(
@@ -3168,6 +3264,22 @@ Node* JSNativeContextSpecialization::BuildCheckEqualsName(NameRef const& name,
control);
}
+Node* JSNativeContextSpecialization::AppendExceptionHandling(
+ Node* effect, Node* control, Node* merge, Node* phi, Node* effect_phi) {
+ DCHECK_EQ(effect, control);
+ int input_count = merge->InputCount() + 1;
+ Node* if_exception =
+ graph()->NewNode(common()->IfException(), effect, control);
+ merge->InsertInput(graph()->zone(), 0, if_exception);
+ NodeProperties::ChangeOp(merge, common()->Merge(input_count));
+ phi->InsertInput(graph()->zone(), 0, if_exception);
+ NodeProperties::ChangeOp(
+ phi, common()->Phi(MachineRepresentation::kTagged, input_count));
+ effect_phi->InsertInput(graph()->zone(), 0, if_exception);
+ NodeProperties::ChangeOp(effect_phi, common()->EffectPhi(input_count));
+ return graph()->NewNode(common()->IfSuccess(), control);
+}
+
bool JSNativeContextSpecialization::CanTreatHoleAsUndefined(
ZoneVector<Handle<Map>> const& receiver_maps) {
// Check if all {receiver_maps} have one of the initial Array.prototype
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index a0707b9830..429be0bb24 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -101,10 +101,6 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
base::Optional<NameRef> static_name,
Node* value, FeedbackSource const& source,
AccessMode access_mode);
- Reduction ReduceNamedAccessFromNexus(Node* node, Node* value,
- FeedbackSource const& source,
- NameRef const& name,
- AccessMode access_mode);
Reduction ReduceNamedAccess(Node* node, Node* value,
NamedAccessFeedback const& processed,
AccessMode access_mode, Node* key = nullptr);
@@ -207,6 +203,12 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
Node* BuildCheckEqualsName(NameRef const& name, Node* value, Node* effect,
Node* control);
+ // Attach a pair of success and exception paths on a given control path.
+ // The exception is joined to the Merge+Phi+EffectPhi nodes while the success
+ // path is returned.
+ Node* AppendExceptionHandling(Node* effect, Node* control, Node* merge,
+ Node* phi, Node* effect_phi);
+
// Checks if we can turn the hole into undefined when loading an element
// from an object with one of the {receiver_maps}; sets up appropriate
// code dependencies and might use the array protector cell.
@@ -219,11 +221,6 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
ElementAccessFeedback const& feedback, Node* receiver,
Node* effect) const;
- void FilterMapsAndGetPropertyAccessInfos(
- NamedAccessFeedback const& feedback, AccessMode access_mode,
- Node* receiver, Node* effect,
- ZoneVector<PropertyAccessInfo>* access_infos);
-
// Try to infer maps for the given {receiver} at the current {effect}.
bool InferReceiverMaps(Node* receiver, Node* effect,
ZoneVector<Handle<Map>>* receiver_maps) const;
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index d0581b59a5..42e5f90057 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -11,6 +11,7 @@
#include "src/compiler/operator.h"
#include "src/handles/handles-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/template-objects.h"
namespace v8 {
namespace internal {
@@ -284,8 +285,7 @@ bool operator!=(PropertyAccess const& lhs, PropertyAccess const& rhs) {
PropertyAccess const& PropertyAccessOf(const Operator* op) {
DCHECK(op->opcode() == IrOpcode::kJSHasProperty ||
op->opcode() == IrOpcode::kJSLoadProperty ||
- op->opcode() == IrOpcode::kJSStoreProperty ||
- op->opcode() == IrOpcode::kJSGetIterator);
+ op->opcode() == IrOpcode::kJSStoreProperty);
return OpParameter<PropertyAccess>(op);
}
@@ -473,6 +473,34 @@ const CreateBoundFunctionParameters& CreateBoundFunctionParametersOf(
return OpParameter<CreateBoundFunctionParameters>(op);
}
+bool operator==(GetTemplateObjectParameters const& lhs,
+ GetTemplateObjectParameters const& rhs) {
+ return lhs.description().location() == rhs.description().location() &&
+ lhs.shared().location() == rhs.shared().location() &&
+ lhs.feedback() == rhs.feedback();
+}
+
+bool operator!=(GetTemplateObjectParameters const& lhs,
+ GetTemplateObjectParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(GetTemplateObjectParameters const& p) {
+ return base::hash_combine(p.description().location(), p.shared().location(),
+ FeedbackSource::Hash()(p.feedback()));
+}
+
+std::ostream& operator<<(std::ostream& os,
+ GetTemplateObjectParameters const& p) {
+ return os << Brief(*p.description()) << ", " << Brief(*p.shared());
+}
+
+const GetTemplateObjectParameters& GetTemplateObjectParametersOf(
+ const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kJSGetTemplateObject);
+ return OpParameter<GetTemplateObjectParameters>(op);
+}
+
bool operator==(CreateClosureParameters const& lhs,
CreateClosureParameters const& rhs) {
return lhs.allocation() == rhs.allocation() &&
@@ -562,6 +590,31 @@ const CloneObjectParameters& CloneObjectParametersOf(const Operator* op) {
return OpParameter<CloneObjectParameters>(op);
}
+std::ostream& operator<<(std::ostream& os, GetIteratorParameters const& p) {
+ return os << p.loadFeedback() << ", " << p.callFeedback();
+}
+
+bool operator==(GetIteratorParameters const& lhs,
+ GetIteratorParameters const& rhs) {
+ return lhs.loadFeedback() == rhs.loadFeedback() &&
+ lhs.callFeedback() == rhs.callFeedback();
+}
+
+bool operator!=(GetIteratorParameters const& lhs,
+ GetIteratorParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+GetIteratorParameters const& GetIteratorParametersOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kJSGetIterator);
+ return OpParameter<GetIteratorParameters>(op);
+}
+
+size_t hash_value(GetIteratorParameters const& p) {
+ return base::hash_combine(FeedbackSource::Hash()(p.loadFeedback()),
+ FeedbackSource::Hash()(p.callFeedback()));
+}
+
size_t hash_value(ForInMode mode) { return static_cast<uint8_t>(mode); }
std::ostream& operator<<(std::ostream& os, ForInMode mode) {
@@ -957,9 +1010,10 @@ const Operator* JSOperatorBuilder::LoadProperty(
access); // parameter
}
-const Operator* JSOperatorBuilder::GetIterator(FeedbackSource const& feedback) {
- PropertyAccess access(LanguageMode::kSloppy, feedback);
- return new (zone()) Operator1<PropertyAccess>( // --
+const Operator* JSOperatorBuilder::GetIterator(
+ FeedbackSource const& load_feedback, FeedbackSource const& call_feedback) {
+ GetIteratorParameters access(load_feedback, call_feedback);
+ return new (zone()) Operator1<GetIteratorParameters>( // --
IrOpcode::kJSGetIterator, Operator::kNoProperties, // opcode
"JSGetIterator", // name
1, 1, 1, 1, 1, 2, // counts
@@ -1257,6 +1311,18 @@ const Operator* JSOperatorBuilder::CreateLiteralObject(
parameters); // parameter
}
+const Operator* JSOperatorBuilder::GetTemplateObject(
+ Handle<TemplateObjectDescription> description,
+ Handle<SharedFunctionInfo> shared, FeedbackSource const& feedback) {
+ GetTemplateObjectParameters parameters(description, shared, feedback);
+ return new (zone()) Operator1<GetTemplateObjectParameters>( // --
+ IrOpcode::kJSGetTemplateObject, // opcode
+ Operator::kEliminatable, // properties
+ "JSGetTemplateObject", // name
+ 0, 1, 1, 1, 1, 0, // counts
+ parameters); // parameter
+}
+
const Operator* JSOperatorBuilder::CloneObject(FeedbackSource const& feedback,
int literal_flags) {
CloneObjectParameters parameters(feedback, literal_flags);
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index f795a2f402..47b0fff05a 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -409,13 +409,13 @@ class StoreGlobalParameters final {
: language_mode_(language_mode), name_(name), feedback_(feedback) {}
LanguageMode language_mode() const { return language_mode_; }
- const FeedbackSource& feedback() const { return feedback_; }
- const Handle<Name>& name() const { return name_; }
+ FeedbackSource const& feedback() const { return feedback_; }
+ Handle<Name> const& name() const { return name_; }
private:
- const LanguageMode language_mode_;
- const Handle<Name> name_;
- const FeedbackSource feedback_;
+ LanguageMode const language_mode_;
+ Handle<Name> const name_;
+ FeedbackSource const feedback_;
};
bool operator==(StoreGlobalParameters const&, StoreGlobalParameters const&);
@@ -598,6 +598,35 @@ std::ostream& operator<<(std::ostream&, CreateClosureParameters const&);
const CreateClosureParameters& CreateClosureParametersOf(const Operator* op);
+class GetTemplateObjectParameters final {
+ public:
+ GetTemplateObjectParameters(Handle<TemplateObjectDescription> description,
+ Handle<SharedFunctionInfo> shared,
+ FeedbackSource const& feedback)
+ : description_(description), shared_(shared), feedback_(feedback) {}
+
+ Handle<TemplateObjectDescription> description() const { return description_; }
+ Handle<SharedFunctionInfo> shared() const { return shared_; }
+ FeedbackSource const& feedback() const { return feedback_; }
+
+ private:
+ Handle<TemplateObjectDescription> const description_;
+ Handle<SharedFunctionInfo> const shared_;
+ FeedbackSource const feedback_;
+};
+
+bool operator==(GetTemplateObjectParameters const&,
+ GetTemplateObjectParameters const&);
+bool operator!=(GetTemplateObjectParameters const&,
+ GetTemplateObjectParameters const&);
+
+size_t hash_value(GetTemplateObjectParameters const&);
+
+std::ostream& operator<<(std::ostream&, GetTemplateObjectParameters const&);
+
+const GetTemplateObjectParameters& GetTemplateObjectParametersOf(
+ const Operator* op);
+
// Defines shared information for the literal that should be created. This is
// used as parameter by JSCreateLiteralArray, JSCreateLiteralObject and
// JSCreateLiteralRegExp operators.
@@ -653,6 +682,31 @@ std::ostream& operator<<(std::ostream&, CloneObjectParameters const&);
const CloneObjectParameters& CloneObjectParametersOf(const Operator* op);
+// Defines the shared information for the iterator symbol thats loaded and
+// called. This is used as a parameter by JSGetIterator operator.
+class GetIteratorParameters final {
+ public:
+ GetIteratorParameters(const FeedbackSource& load_feedback,
+ const FeedbackSource& call_feedback)
+ : load_feedback_(load_feedback), call_feedback_(call_feedback) {}
+
+ FeedbackSource const& loadFeedback() const { return load_feedback_; }
+ FeedbackSource const& callFeedback() const { return call_feedback_; }
+
+ private:
+ FeedbackSource const load_feedback_;
+ FeedbackSource const call_feedback_;
+};
+
+bool operator==(GetIteratorParameters const&, GetIteratorParameters const&);
+bool operator!=(GetIteratorParameters const&, GetIteratorParameters const&);
+
+size_t hash_value(GetIteratorParameters const&);
+
+std::ostream& operator<<(std::ostream&, GetIteratorParameters const&);
+
+const GetIteratorParameters& GetIteratorParametersOf(const Operator* op);
+
// Descriptor used by the JSForInPrepare and JSForInNext opcodes.
enum class ForInMode : uint8_t {
kUseEnumCacheKeysAndIndices,
@@ -742,7 +796,6 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* CreateEmptyLiteralArray(FeedbackSource const& feedback);
const Operator* CreateArrayFromIterable();
const Operator* CreateEmptyLiteralObject();
-
const Operator* CreateLiteralObject(
Handle<ObjectBoilerplateDescription> constant,
FeedbackSource const& feedback, int literal_flags,
@@ -753,6 +806,10 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
FeedbackSource const& feedback,
int literal_flags);
+ const Operator* GetTemplateObject(
+ Handle<TemplateObjectDescription> description,
+ Handle<SharedFunctionInfo> shared, FeedbackSource const& feedback);
+
const Operator* CallForwardVarargs(size_t arity, uint32_t start_index);
const Operator* Call(
size_t arity, CallFrequency const& frequency = CallFrequency(),
@@ -856,7 +913,8 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* ParseInt();
const Operator* RegExpTest();
- const Operator* GetIterator(FeedbackSource const& feedback);
+ const Operator* GetIterator(FeedbackSource const& load_feedback,
+ FeedbackSource const& call_feedback);
private:
Zone* zone() const { return zone_; }
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.cc b/deps/v8/src/compiler/js-type-hint-lowering.cc
index e1ff928cec..9a6b367ddf 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.cc
+++ b/deps/v8/src/compiler/js-type-hint-lowering.cc
@@ -482,12 +482,32 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceConstructOperation(
return LoweringResult::NoChange();
}
+JSTypeHintLowering::LoweringResult
+JSTypeHintLowering::ReduceGetIteratorOperation(const Operator* op,
+ Node* receiver, Node* effect,
+ Node* control,
+ FeedbackSlot load_slot,
+ FeedbackSlot call_slot) const {
+ DCHECK_EQ(IrOpcode::kJSGetIterator, op->opcode());
+ // Insert soft deopt if the load feedback is invalid.
+ if (Node* node = TryBuildSoftDeopt(
+ load_slot, effect, control,
+ DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess)) {
+ return LoweringResult::Exit(node);
+ }
+ // Insert soft deopt if the call feedback is invalid.
+ if (Node* node = TryBuildSoftDeopt(
+ call_slot, effect, control,
+ DeoptimizeReason::kInsufficientTypeFeedbackForCall)) {
+ return LoweringResult::Exit(node);
+ }
+ return LoweringResult::NoChange();
+}
+
JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceLoadNamedOperation(
const Operator* op, Node* receiver, Node* effect, Node* control,
FeedbackSlot slot) const {
- // JSGetIterator involves a named load of the Symbol.iterator property.
- DCHECK(op->opcode() == IrOpcode::kJSLoadNamed ||
- op->opcode() == IrOpcode::kJSGetIterator);
+ DCHECK_EQ(IrOpcode::kJSLoadNamed, op->opcode());
if (Node* node = TryBuildSoftDeopt(
slot, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess)) {
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.h b/deps/v8/src/compiler/js-type-hint-lowering.h
index 3e46fb2ec2..303e2f8dcf 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.h
+++ b/deps/v8/src/compiler/js-type-hint-lowering.h
@@ -134,6 +134,13 @@ class JSTypeHintLowering {
int arg_count, Node* effect,
Node* control,
FeedbackSlot slot) const;
+
+ // Potential reduction of property access and call operations.
+ LoweringResult ReduceGetIteratorOperation(const Operator* op, Node* obj,
+ Node* effect, Node* control,
+ FeedbackSlot load_slot,
+ FeedbackSlot call_slot) const;
+
// Potential reduction of property access operations.
LoweringResult ReduceLoadNamedOperation(const Operator* op, Node* obj,
Node* effect, Node* control,
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 8caafe6aad..035457c62b 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -17,6 +17,7 @@
#include "src/compiler/operator-properties.h"
#include "src/compiler/type-cache.h"
#include "src/compiler/types.h"
+#include "src/execution/protectors.h"
#include "src/objects/js-generator.h"
#include "src/objects/module-inl.h"
#include "src/objects/objects-inl.h"
@@ -567,9 +568,10 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
Node* length =
graph()->NewNode(simplified()->NumberAdd(), left_length, right_length);
- CellRef string_length_protector(broker(),
- factory()->string_length_protector());
- if (string_length_protector.value().AsSmi() == Isolate::kProtectorValid) {
+ PropertyCellRef string_length_protector(
+ broker(), factory()->string_length_protector());
+ if (string_length_protector.value().AsSmi() ==
+ Protectors::kProtectorValid) {
// We can just deoptimize if the {length} is out-of-bounds. Besides
// generating a shorter code sequence than the version below, this
// has the additional benefit of not holding on to the lazy {frame_state}
@@ -2025,8 +2027,7 @@ Reduction JSTypedLowering::ReduceJSLoadMessage(Node* node) {
ExternalReference const ref =
ExternalReference::address_of_pending_message_obj(isolate());
node->ReplaceInput(0, jsgraph()->ExternalConstant(ref));
- NodeProperties::ChangeOp(
- node, simplified()->LoadField(AccessBuilder::ForExternalTaggedValue()));
+ NodeProperties::ChangeOp(node, simplified()->LoadMessage());
return Changed(node);
}
@@ -2037,8 +2038,7 @@ Reduction JSTypedLowering::ReduceJSStoreMessage(Node* node) {
Node* value = NodeProperties::GetValueInput(node, 0);
node->ReplaceInput(0, jsgraph()->ExternalConstant(ref));
node->ReplaceInput(1, value);
- NodeProperties::ChangeOp(
- node, simplified()->StoreField(AccessBuilder::ForExternalTaggedValue()));
+ NodeProperties::ChangeOp(node, simplified()->StoreMessage());
return Changed(node);
}
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index 4c7ee1d141..f6b747c04d 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -241,7 +241,7 @@ class MachineRepresentationInferrer {
MachineType::PointerRepresentation();
break;
case IrOpcode::kBitcastTaggedToWord:
- case IrOpcode::kBitcastTaggedSignedToWord:
+ case IrOpcode::kBitcastTaggedToWordForTagAndSmiBits:
representation_vector_[node->id()] =
MachineType::PointerRepresentation();
break;
@@ -437,7 +437,7 @@ class MachineRepresentationChecker {
MachineRepresentation::kWord64);
break;
case IrOpcode::kBitcastTaggedToWord:
- case IrOpcode::kBitcastTaggedSignedToWord:
+ case IrOpcode::kBitcastTaggedToWordForTagAndSmiBits:
case IrOpcode::kTaggedPoisonOnSpeculation:
CheckValueInputIsTagged(node, 0);
break;
@@ -461,7 +461,7 @@ class MachineRepresentationChecker {
CheckValueInputForFloat64Op(node, 0);
break;
case IrOpcode::kWord64Equal:
- if (Is64()) {
+ if (Is64() && !COMPRESS_POINTERS_BOOL) {
CheckValueInputIsTaggedOrPointer(node, 0);
CheckValueInputIsTaggedOrPointer(node, 1);
if (!is_stub_) {
@@ -1007,6 +1007,13 @@ class MachineRepresentationChecker {
return IsAnyCompressed(actual);
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
+ // TODO(tebbi): At the moment, the machine graph doesn't contain
+ // reliable information if a node is kTaggedSigned, kTaggedPointer or
+ // kTagged, and often this is context-dependent. We should at least
+ // check for obvious violations: kTaggedSigned where we expect
+ // kTaggedPointer and the other way around, but at the moment, this
+ // happens in dead code.
+ return IsAnyTagged(actual);
case MachineRepresentation::kCompressedSigned:
case MachineRepresentation::kCompressedPointer:
case MachineRepresentation::kFloat32:
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 11124579f6..38013d228c 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -681,7 +681,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
Int64Matcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceInt32(static_cast<int32_t>(m.Value()));
if (m.IsChangeInt32ToInt64()) return Replace(m.node()->InputAt(0));
- if (m.IsBitcastTaggedSignedToWord()) {
+ if (m.IsBitcastTaggedToWordForTagAndSmiBits()) {
Int64Matcher n(m.node()->InputAt(0));
if (n.IsChangeCompressedToTagged()) {
DCHECK(machine()->Is64() && SmiValuesAre31Bits());
@@ -725,7 +725,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
case IrOpcode::kFloat64RoundDown:
return ReduceFloat64RoundDown(node);
case IrOpcode::kBitcastTaggedToWord:
- case IrOpcode::kBitcastTaggedSignedToWord: {
+ case IrOpcode::kBitcastTaggedToWordForTagAndSmiBits: {
NodeMatcher m(node->InputAt(0));
if (m.IsBitcastWordToTaggedSigned()) {
RelaxEffectsAndControls(node);
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 0355534408..b450fb60da 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -146,7 +146,8 @@ MachineType AtomicOpType(Operator const* op) {
V(Word64Clz, Operator::kNoProperties, 1, 0, 1) \
V(Word32ReverseBytes, Operator::kNoProperties, 1, 0, 1) \
V(Word64ReverseBytes, Operator::kNoProperties, 1, 0, 1) \
- V(BitcastTaggedSignedToWord, Operator::kNoProperties, 1, 0, 1) \
+ V(Simd128ReverseBytes, Operator::kNoProperties, 1, 0, 1) \
+ V(BitcastTaggedToWordForTagAndSmiBits, Operator::kNoProperties, 1, 0, 1) \
V(BitcastWordToTaggedSigned, Operator::kNoProperties, 1, 0, 1) \
V(BitcastWord32ToCompressedSigned, Operator::kNoProperties, 1, 0, 1) \
V(BitcastCompressedSignedToWord32, Operator::kNoProperties, 1, 0, 1) \
@@ -255,6 +256,7 @@ MachineType AtomicOpType(Operator const* op) {
V(F64x2Splat, Operator::kNoProperties, 1, 0, 1) \
V(F64x2Abs, Operator::kNoProperties, 1, 0, 1) \
V(F64x2Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(F64x2Sqrt, Operator::kNoProperties, 1, 0, 1) \
V(F64x2Add, Operator::kCommutative, 2, 0, 1) \
V(F64x2Sub, Operator::kNoProperties, 2, 0, 1) \
V(F64x2Mul, Operator::kCommutative, 2, 0, 1) \
@@ -265,11 +267,14 @@ MachineType AtomicOpType(Operator const* op) {
V(F64x2Ne, Operator::kCommutative, 2, 0, 1) \
V(F64x2Lt, Operator::kNoProperties, 2, 0, 1) \
V(F64x2Le, Operator::kNoProperties, 2, 0, 1) \
+ V(F64x2Qfma, Operator::kNoProperties, 3, 0, 1) \
+ V(F64x2Qfms, Operator::kNoProperties, 3, 0, 1) \
V(F32x4Splat, Operator::kNoProperties, 1, 0, 1) \
V(F32x4SConvertI32x4, Operator::kNoProperties, 1, 0, 1) \
V(F32x4UConvertI32x4, Operator::kNoProperties, 1, 0, 1) \
V(F32x4Abs, Operator::kNoProperties, 1, 0, 1) \
V(F32x4Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(F32x4Sqrt, Operator::kNoProperties, 1, 0, 1) \
V(F32x4RecipApprox, Operator::kNoProperties, 1, 0, 1) \
V(F32x4RecipSqrtApprox, Operator::kNoProperties, 1, 0, 1) \
V(F32x4Add, Operator::kCommutative, 2, 0, 1) \
@@ -283,6 +288,8 @@ MachineType AtomicOpType(Operator const* op) {
V(F32x4Ne, Operator::kCommutative, 2, 0, 1) \
V(F32x4Lt, Operator::kNoProperties, 2, 0, 1) \
V(F32x4Le, Operator::kNoProperties, 2, 0, 1) \
+ V(F32x4Qfma, Operator::kNoProperties, 3, 0, 1) \
+ V(F32x4Qfms, Operator::kNoProperties, 3, 0, 1) \
V(I64x2Splat, Operator::kNoProperties, 1, 0, 1) \
V(I64x2Neg, Operator::kNoProperties, 1, 0, 1) \
V(I64x2Shl, Operator::kNoProperties, 2, 0, 1) \
@@ -395,6 +402,7 @@ MachineType AtomicOpType(Operator const* op) {
V(S1x8AllTrue, Operator::kNoProperties, 1, 0, 1) \
V(S1x16AnyTrue, Operator::kNoProperties, 1, 0, 1) \
V(S1x16AllTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(S8x16Swizzle, Operator::kNoProperties, 2, 0, 1) \
V(StackPointerGreaterThan, Operator::kNoProperties, 1, 0, 1)
// The format is:
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 17db145f58..1bd806eefb 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -239,6 +239,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const OptionalOperator Word64ReverseBits();
const Operator* Word32ReverseBytes();
const Operator* Word64ReverseBytes();
+ const Operator* Simd128ReverseBytes();
const OptionalOperator Int32AbsWithOverflow();
const OptionalOperator Int64AbsWithOverflow();
@@ -301,8 +302,13 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// This operator reinterprets the bits of a tagged pointer as a word.
const Operator* BitcastTaggedToWord();
- // This operator reinterprets the bits of a Smi as a word.
- const Operator* BitcastTaggedSignedToWord();
+ // This operator reinterprets the bits of a tagged value as a word preserving
+ // non-pointer bits (all the bits that are not modified by GC):
+ // 1) smi tag
+ // 2) weak tag
+ // 3) smi payload if the tagged value is a smi.
+ // Note, that it's illegal to "look" at the pointer bits of non-smi values.
+ const Operator* BitcastTaggedToWordForTagAndSmiBits();
// This operator reinterprets the bits of a tagged MaybeObject pointer as
// word.
@@ -477,6 +483,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* F64x2Splat();
const Operator* F64x2Abs();
const Operator* F64x2Neg();
+ const Operator* F64x2Sqrt();
const Operator* F64x2Add();
const Operator* F64x2Sub();
const Operator* F64x2Mul();
@@ -489,6 +496,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* F64x2Ne();
const Operator* F64x2Lt();
const Operator* F64x2Le();
+ const Operator* F64x2Qfma();
+ const Operator* F64x2Qfms();
const Operator* F32x4Splat();
const Operator* F32x4ExtractLane(int32_t);
@@ -497,6 +506,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* F32x4UConvertI32x4();
const Operator* F32x4Abs();
const Operator* F32x4Neg();
+ const Operator* F32x4Sqrt();
const Operator* F32x4RecipApprox();
const Operator* F32x4RecipSqrtApprox();
const Operator* F32x4Add();
@@ -510,6 +520,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* F32x4Ne();
const Operator* F32x4Lt();
const Operator* F32x4Le();
+ const Operator* F32x4Qfma();
+ const Operator* F32x4Qfms();
const Operator* I64x2Splat();
const Operator* I64x2ExtractLane(int32_t);
@@ -632,6 +644,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* S128Not();
const Operator* S128Select();
+ const Operator* S8x16Swizzle();
const Operator* S8x16Shuffle(const uint8_t shuffle[16]);
const Operator* S1x2AnyTrue();
diff --git a/deps/v8/src/compiler/memory-lowering.cc b/deps/v8/src/compiler/memory-lowering.cc
new file mode 100644
index 0000000000..1e112e8e82
--- /dev/null
+++ b/deps/v8/src/compiler/memory-lowering.cc
@@ -0,0 +1,551 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/memory-lowering.h"
+
+#include "src/codegen/interface-descriptors.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/roots/roots-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// An allocation group represents a set of allocations that have been folded
+// together.
+class MemoryLowering::AllocationGroup final : public ZoneObject {
+ public:
+ AllocationGroup(Node* node, AllocationType allocation, Zone* zone);
+ AllocationGroup(Node* node, AllocationType allocation, Node* size,
+ Zone* zone);
+ ~AllocationGroup() = default;
+
+ void Add(Node* object);
+ bool Contains(Node* object) const;
+ bool IsYoungGenerationAllocation() const {
+ return allocation() == AllocationType::kYoung;
+ }
+
+ AllocationType allocation() const { return allocation_; }
+ Node* size() const { return size_; }
+
+ private:
+ ZoneSet<NodeId> node_ids_;
+ AllocationType const allocation_;
+ Node* const size_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationGroup);
+};
+
+MemoryLowering::MemoryLowering(JSGraph* jsgraph, Zone* zone,
+ PoisoningMitigationLevel poisoning_level,
+ AllocationFolding allocation_folding,
+ WriteBarrierAssertFailedCallback callback,
+ const char* function_debug_name)
+ : jsgraph_(jsgraph),
+ zone_(zone),
+ graph_assembler_(jsgraph, nullptr, nullptr, zone),
+ allocation_folding_(allocation_folding),
+ poisoning_level_(poisoning_level),
+ write_barrier_assert_failed_(callback),
+ function_debug_name_(function_debug_name) {}
+
+Reduction MemoryLowering::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kAllocate:
+ // Allocate nodes were purged from the graph in effect-control
+ // linearization.
+ UNREACHABLE();
+ case IrOpcode::kAllocateRaw:
+ return ReduceAllocateRaw(node);
+ case IrOpcode::kLoadFromObject:
+ return ReduceLoadFromObject(node);
+ case IrOpcode::kLoadElement:
+ return ReduceLoadElement(node);
+ case IrOpcode::kLoadField:
+ return ReduceLoadField(node);
+ case IrOpcode::kStoreToObject:
+ return ReduceStoreToObject(node);
+ case IrOpcode::kStoreElement:
+ return ReduceStoreElement(node);
+ case IrOpcode::kStoreField:
+ return ReduceStoreField(node);
+ case IrOpcode::kStore:
+ return ReduceStore(node);
+ default:
+ return NoChange();
+ }
+}
+
+#define __ gasm()->
+
+Reduction MemoryLowering::ReduceAllocateRaw(
+ Node* node, AllocationType allocation_type,
+ AllowLargeObjects allow_large_objects, AllocationState const** state_ptr) {
+ DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode());
+ DCHECK_IMPLIES(allocation_folding_ == AllocationFolding::kDoAllocationFolding,
+ state_ptr != nullptr);
+ Node* value;
+ Node* size = node->InputAt(0);
+ Node* effect = node->InputAt(1);
+ Node* control = node->InputAt(2);
+
+ gasm()->Reset(effect, control);
+
+ Node* allocate_builtin;
+ if (allocation_type == AllocationType::kYoung) {
+ if (allow_large_objects == AllowLargeObjects::kTrue) {
+ allocate_builtin = __ AllocateInYoungGenerationStubConstant();
+ } else {
+ allocate_builtin = __ AllocateRegularInYoungGenerationStubConstant();
+ }
+ } else {
+ if (allow_large_objects == AllowLargeObjects::kTrue) {
+ allocate_builtin = __ AllocateInOldGenerationStubConstant();
+ } else {
+ allocate_builtin = __ AllocateRegularInOldGenerationStubConstant();
+ }
+ }
+
+ // Determine the top/limit addresses.
+ Node* top_address = __ ExternalConstant(
+ allocation_type == AllocationType::kYoung
+ ? ExternalReference::new_space_allocation_top_address(isolate())
+ : ExternalReference::old_space_allocation_top_address(isolate()));
+ Node* limit_address = __ ExternalConstant(
+ allocation_type == AllocationType::kYoung
+ ? ExternalReference::new_space_allocation_limit_address(isolate())
+ : ExternalReference::old_space_allocation_limit_address(isolate()));
+
+ // Check if we can fold this allocation into a previous allocation represented
+ // by the incoming {state}.
+ IntPtrMatcher m(size);
+ if (m.IsInRange(0, kMaxRegularHeapObjectSize) && FLAG_inline_new &&
+ allocation_folding_ == AllocationFolding::kDoAllocationFolding) {
+ intptr_t const object_size = m.Value();
+ AllocationState const* state = *state_ptr;
+ if (state->size() <= kMaxRegularHeapObjectSize - object_size &&
+ state->group()->allocation() == allocation_type) {
+ // We can fold this Allocate {node} into the allocation {group}
+ // represented by the given {state}. Compute the upper bound for
+ // the new {state}.
+ intptr_t const state_size = state->size() + object_size;
+
+ // Update the reservation check to the actual maximum upper bound.
+ AllocationGroup* const group = state->group();
+ if (machine()->Is64()) {
+ if (OpParameter<int64_t>(group->size()->op()) < state_size) {
+ NodeProperties::ChangeOp(group->size(),
+ common()->Int64Constant(state_size));
+ }
+ } else {
+ if (OpParameter<int32_t>(group->size()->op()) < state_size) {
+ NodeProperties::ChangeOp(
+ group->size(),
+ common()->Int32Constant(static_cast<int32_t>(state_size)));
+ }
+ }
+
+ // Update the allocation top with the new object allocation.
+ // TODO(bmeurer): Defer writing back top as much as possible.
+ Node* top = __ IntAdd(state->top(), size);
+ __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
+ kNoWriteBarrier),
+ top_address, __ IntPtrConstant(0), top);
+
+ // Compute the effective inner allocated address.
+ value = __ BitcastWordToTagged(
+ __ IntAdd(state->top(), __ IntPtrConstant(kHeapObjectTag)));
+ effect = __ ExtractCurrentEffect();
+ control = __ ExtractCurrentControl();
+
+ // Extend the allocation {group}.
+ group->Add(value);
+ *state_ptr =
+ AllocationState::Open(group, state_size, top, effect, zone());
+ } else {
+ auto call_runtime = __ MakeDeferredLabel();
+ auto done = __ MakeLabel(MachineType::PointerRepresentation());
+
+ // Setup a mutable reservation size node; will be patched as we fold
+ // additional allocations into this new group.
+ Node* size = __ UniqueIntPtrConstant(object_size);
+
+ // Load allocation top and limit.
+ Node* top =
+ __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
+ Node* limit =
+ __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
+
+ // Check if we need to collect garbage before we can start bump pointer
+ // allocation (always done for folded allocations).
+ Node* check = __ UintLessThan(__ IntAdd(top, size), limit);
+
+ __ GotoIfNot(check, &call_runtime);
+ __ Goto(&done, top);
+
+ __ Bind(&call_runtime);
+ {
+ if (!allocate_operator_.is_set()) {
+ auto descriptor = AllocateDescriptor{};
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kCanUseRoots, Operator::kNoThrow);
+ allocate_operator_.set(common()->Call(call_descriptor));
+ }
+ Node* vfalse = __ BitcastTaggedToWord(
+ __ Call(allocate_operator_.get(), allocate_builtin, size));
+ vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag));
+ __ Goto(&done, vfalse);
+ }
+
+ __ Bind(&done);
+
+ // Compute the new top and write it back.
+ top = __ IntAdd(done.PhiAt(0), __ IntPtrConstant(object_size));
+ __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
+ kNoWriteBarrier),
+ top_address, __ IntPtrConstant(0), top);
+
+ // Compute the initial object address.
+ value = __ BitcastWordToTagged(
+ __ IntAdd(done.PhiAt(0), __ IntPtrConstant(kHeapObjectTag)));
+ effect = __ ExtractCurrentEffect();
+ control = __ ExtractCurrentControl();
+
+ // Start a new allocation group.
+ AllocationGroup* group =
+ new (zone()) AllocationGroup(value, allocation_type, size, zone());
+ *state_ptr =
+ AllocationState::Open(group, object_size, top, effect, zone());
+ }
+ } else {
+ auto call_runtime = __ MakeDeferredLabel();
+ auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
+
+ // Load allocation top and limit.
+ Node* top =
+ __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
+ Node* limit =
+ __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
+
+ // Compute the new top.
+ Node* new_top = __ IntAdd(top, size);
+
+ // Check if we can do bump pointer allocation here.
+ Node* check = __ UintLessThan(new_top, limit);
+ __ GotoIfNot(check, &call_runtime);
+ if (allow_large_objects == AllowLargeObjects::kTrue) {
+ __ GotoIfNot(
+ __ UintLessThan(size, __ IntPtrConstant(kMaxRegularHeapObjectSize)),
+ &call_runtime);
+ }
+ __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
+ kNoWriteBarrier),
+ top_address, __ IntPtrConstant(0), new_top);
+ __ Goto(&done, __ BitcastWordToTagged(
+ __ IntAdd(top, __ IntPtrConstant(kHeapObjectTag))));
+
+ __ Bind(&call_runtime);
+ if (!allocate_operator_.is_set()) {
+ auto descriptor = AllocateDescriptor{};
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kCanUseRoots, Operator::kNoThrow);
+ allocate_operator_.set(common()->Call(call_descriptor));
+ }
+ __ Goto(&done, __ Call(allocate_operator_.get(), allocate_builtin, size));
+
+ __ Bind(&done);
+ value = done.PhiAt(0);
+ effect = __ ExtractCurrentEffect();
+ control = __ ExtractCurrentControl();
+
+ if (state_ptr) {
+ // Create an unfoldable allocation group.
+ AllocationGroup* group =
+ new (zone()) AllocationGroup(value, allocation_type, zone());
+ *state_ptr = AllocationState::Closed(group, effect, zone());
+ }
+ }
+
+ // Replace all effect uses of {node} with the {effect} and replace
+ // all value uses of {node} with the {value}.
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsEffectEdge(edge)) {
+ edge.UpdateTo(effect);
+ } else if (NodeProperties::IsValueEdge(edge)) {
+ edge.UpdateTo(value);
+ } else {
+ DCHECK(NodeProperties::IsControlEdge(edge));
+ edge.UpdateTo(control);
+ }
+ }
+
+ // Kill the {node} to make sure we don't leave dangling dead uses.
+ node->Kill();
+
+ return Replace(value);
+}
+
+Reduction MemoryLowering::ReduceLoadFromObject(Node* node) {
+ DCHECK_EQ(IrOpcode::kLoadFromObject, node->opcode());
+ ObjectAccess const& access = ObjectAccessOf(node->op());
+ NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
+ return Changed(node);
+}
+
+Reduction MemoryLowering::ReduceLoadElement(Node* node) {
+ DCHECK_EQ(IrOpcode::kLoadElement, node->opcode());
+ ElementAccess const& access = ElementAccessOf(node->op());
+ Node* index = node->InputAt(1);
+ node->ReplaceInput(1, ComputeIndex(access, index));
+ MachineType type = access.machine_type;
+ if (NeedsPoisoning(access.load_sensitivity)) {
+ NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
+ } else {
+ NodeProperties::ChangeOp(node, machine()->Load(type));
+ }
+ return Changed(node);
+}
+
+Reduction MemoryLowering::ReduceLoadField(Node* node) {
+ DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
+ FieldAccess const& access = FieldAccessOf(node->op());
+ Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
+ node->InsertInput(graph()->zone(), 1, offset);
+ MachineType type = access.machine_type;
+ if (NeedsPoisoning(access.load_sensitivity)) {
+ NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
+ } else {
+ NodeProperties::ChangeOp(node, machine()->Load(type));
+ }
+ return Changed(node);
+}
+
+Reduction MemoryLowering::ReduceStoreToObject(Node* node,
+ AllocationState const* state) {
+ DCHECK_EQ(IrOpcode::kStoreToObject, node->opcode());
+ ObjectAccess const& access = ObjectAccessOf(node->op());
+ Node* object = node->InputAt(0);
+ Node* value = node->InputAt(2);
+ WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
+ node, object, value, state, access.write_barrier_kind);
+ NodeProperties::ChangeOp(
+ node, machine()->Store(StoreRepresentation(
+ access.machine_type.representation(), write_barrier_kind)));
+ return Changed(node);
+}
+
+Reduction MemoryLowering::ReduceStoreElement(Node* node,
+ AllocationState const* state) {
+ DCHECK_EQ(IrOpcode::kStoreElement, node->opcode());
+ ElementAccess const& access = ElementAccessOf(node->op());
+ Node* object = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ node->ReplaceInput(1, ComputeIndex(access, index));
+ WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
+ node, object, value, state, access.write_barrier_kind);
+ NodeProperties::ChangeOp(
+ node, machine()->Store(StoreRepresentation(
+ access.machine_type.representation(), write_barrier_kind)));
+ return Changed(node);
+}
+
+Reduction MemoryLowering::ReduceStoreField(Node* node,
+ AllocationState const* state) {
+ DCHECK_EQ(IrOpcode::kStoreField, node->opcode());
+ FieldAccess const& access = FieldAccessOf(node->op());
+ Node* object = node->InputAt(0);
+ Node* value = node->InputAt(1);
+ WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
+ node, object, value, state, access.write_barrier_kind);
+ Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
+ node->InsertInput(graph()->zone(), 1, offset);
+ NodeProperties::ChangeOp(
+ node, machine()->Store(StoreRepresentation(
+ access.machine_type.representation(), write_barrier_kind)));
+ return Changed(node);
+}
+
+Reduction MemoryLowering::ReduceStore(Node* node,
+ AllocationState const* state) {
+ DCHECK_EQ(IrOpcode::kStore, node->opcode());
+ StoreRepresentation representation = StoreRepresentationOf(node->op());
+ Node* object = node->InputAt(0);
+ Node* value = node->InputAt(2);
+ WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
+ node, object, value, state, representation.write_barrier_kind());
+ if (write_barrier_kind != representation.write_barrier_kind()) {
+ NodeProperties::ChangeOp(
+ node, machine()->Store(StoreRepresentation(
+ representation.representation(), write_barrier_kind)));
+ return Changed(node);
+ }
+ return NoChange();
+}
+
+Node* MemoryLowering::ComputeIndex(ElementAccess const& access, Node* index) {
+ int const element_size_shift =
+ ElementSizeLog2Of(access.machine_type.representation());
+ if (element_size_shift) {
+ index = __ WordShl(index, __ IntPtrConstant(element_size_shift));
+ }
+ int const fixed_offset = access.header_size - access.tag();
+ if (fixed_offset) {
+ index = __ IntAdd(index, __ IntPtrConstant(fixed_offset));
+ }
+ return index;
+}
+
+#undef __
+
+namespace {
+
+bool ValueNeedsWriteBarrier(Node* value, Isolate* isolate) {
+ while (true) {
+ switch (value->opcode()) {
+ case IrOpcode::kBitcastWordToTaggedSigned:
+ case IrOpcode::kChangeTaggedSignedToCompressedSigned:
+ case IrOpcode::kChangeTaggedToCompressedSigned:
+ return false;
+ case IrOpcode::kChangeTaggedPointerToCompressedPointer:
+ case IrOpcode::kChangeTaggedToCompressed:
+ value = NodeProperties::GetValueInput(value, 0);
+ continue;
+ case IrOpcode::kHeapConstant: {
+ RootIndex root_index;
+ if (isolate->roots_table().IsRootHandle(HeapConstantOf(value->op()),
+ &root_index) &&
+ RootsTable::IsImmortalImmovable(root_index)) {
+ return false;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ return true;
+ }
+}
+
+} // namespace
+
+Reduction MemoryLowering::ReduceAllocateRaw(Node* node) {
+ DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode());
+ const AllocateParameters& allocation = AllocateParametersOf(node->op());
+ return ReduceAllocateRaw(node, allocation.allocation_type(),
+ allocation.allow_large_objects(), nullptr);
+}
+
+WriteBarrierKind MemoryLowering::ComputeWriteBarrierKind(
+ Node* node, Node* object, Node* value, AllocationState const* state,
+ WriteBarrierKind write_barrier_kind) {
+ if (state && state->IsYoungGenerationAllocation() &&
+ state->group()->Contains(object)) {
+ write_barrier_kind = kNoWriteBarrier;
+ }
+ if (!ValueNeedsWriteBarrier(value, isolate())) {
+ write_barrier_kind = kNoWriteBarrier;
+ }
+ if (write_barrier_kind == WriteBarrierKind::kAssertNoWriteBarrier) {
+ write_barrier_assert_failed_(node, object, function_debug_name_, zone());
+ }
+ return write_barrier_kind;
+}
+
+bool MemoryLowering::NeedsPoisoning(LoadSensitivity load_sensitivity) const {
+ // Safe loads do not need poisoning.
+ if (load_sensitivity == LoadSensitivity::kSafe) return false;
+
+ switch (poisoning_level_) {
+ case PoisoningMitigationLevel::kDontPoison:
+ return false;
+ case PoisoningMitigationLevel::kPoisonAll:
+ return true;
+ case PoisoningMitigationLevel::kPoisonCriticalOnly:
+ return load_sensitivity == LoadSensitivity::kCritical;
+ }
+ UNREACHABLE();
+}
+
+MemoryLowering::AllocationGroup::AllocationGroup(Node* node,
+ AllocationType allocation,
+ Zone* zone)
+ : node_ids_(zone), allocation_(allocation), size_(nullptr) {
+ node_ids_.insert(node->id());
+}
+
+MemoryLowering::AllocationGroup::AllocationGroup(Node* node,
+ AllocationType allocation,
+ Node* size, Zone* zone)
+ : node_ids_(zone), allocation_(allocation), size_(size) {
+ node_ids_.insert(node->id());
+}
+
+void MemoryLowering::AllocationGroup::Add(Node* node) {
+ node_ids_.insert(node->id());
+}
+
+bool MemoryLowering::AllocationGroup::Contains(Node* node) const {
+ // Additions should stay within the same allocated object, so it's safe to
+ // ignore them.
+ while (node_ids_.find(node->id()) == node_ids_.end()) {
+ switch (node->opcode()) {
+ case IrOpcode::kBitcastTaggedToWord:
+ case IrOpcode::kBitcastWordToTagged:
+ case IrOpcode::kInt32Add:
+ case IrOpcode::kInt64Add:
+ node = NodeProperties::GetValueInput(node, 0);
+ break;
+ default:
+ return false;
+ }
+ }
+ return true;
+}
+
+MemoryLowering::AllocationState::AllocationState()
+ : group_(nullptr),
+ size_(std::numeric_limits<int>::max()),
+ top_(nullptr),
+ effect_(nullptr) {}
+
+MemoryLowering::AllocationState::AllocationState(AllocationGroup* group,
+ Node* effect)
+ : group_(group),
+ size_(std::numeric_limits<int>::max()),
+ top_(nullptr),
+ effect_(effect) {}
+
+MemoryLowering::AllocationState::AllocationState(AllocationGroup* group,
+ intptr_t size, Node* top,
+ Node* effect)
+ : group_(group), size_(size), top_(top), effect_(effect) {}
+
+bool MemoryLowering::AllocationState::IsYoungGenerationAllocation() const {
+ return group() && group()->IsYoungGenerationAllocation();
+}
+
+Graph* MemoryLowering::graph() const { return jsgraph()->graph(); }
+
+Isolate* MemoryLowering::isolate() const { return jsgraph()->isolate(); }
+
+CommonOperatorBuilder* MemoryLowering::common() const {
+ return jsgraph()->common();
+}
+
+MachineOperatorBuilder* MemoryLowering::machine() const {
+ return jsgraph()->machine();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/memory-lowering.h b/deps/v8/src/compiler/memory-lowering.h
new file mode 100644
index 0000000000..a1f1fc1861
--- /dev/null
+++ b/deps/v8/src/compiler/memory-lowering.h
@@ -0,0 +1,136 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_MEMORY_LOWERING_H_
+#define V8_COMPILER_MEMORY_LOWERING_H_
+
+#include "src/compiler/graph-assembler.h"
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+struct ElementAccess;
+class Graph;
+class JSGraph;
+class MachineOperatorBuilder;
+class Node;
+class Operator;
+
+// Provides operations to lower all simplified memory access and allocation
+// related nodes (i.e. Allocate, LoadField, StoreField and friends) to machine
+// operators.
+class MemoryLowering final : public Reducer {
+ public:
+ enum class AllocationFolding { kDoAllocationFolding, kDontAllocationFolding };
+ class AllocationGroup;
+
+ // An allocation state is propagated on the effect paths through the graph.
+ class AllocationState final : public ZoneObject {
+ public:
+ static AllocationState const* Empty(Zone* zone) {
+ return new (zone) AllocationState();
+ }
+ static AllocationState const* Closed(AllocationGroup* group, Node* effect,
+ Zone* zone) {
+ return new (zone) AllocationState(group, effect);
+ }
+ static AllocationState const* Open(AllocationGroup* group, intptr_t size,
+ Node* top, Node* effect, Zone* zone) {
+ return new (zone) AllocationState(group, size, top, effect);
+ }
+
+ bool IsYoungGenerationAllocation() const;
+
+ AllocationGroup* group() const { return group_; }
+ Node* top() const { return top_; }
+ Node* effect() const { return effect_; }
+ intptr_t size() const { return size_; }
+
+ private:
+ AllocationState();
+ explicit AllocationState(AllocationGroup* group, Node* effect);
+ AllocationState(AllocationGroup* group, intptr_t size, Node* top,
+ Node* effect);
+
+ AllocationGroup* const group_;
+ // The upper bound of the combined allocated object size on the current path
+ // (max int if allocation folding is impossible on this path).
+ intptr_t const size_;
+ Node* const top_;
+ Node* const effect_;
+
+ DISALLOW_COPY_AND_ASSIGN(AllocationState);
+ };
+
+ using WriteBarrierAssertFailedCallback = std::function<void(
+ Node* node, Node* object, const char* name, Zone* temp_zone)>;
+
+ MemoryLowering(
+ JSGraph* jsgraph, Zone* zone, PoisoningMitigationLevel poisoning_level,
+ AllocationFolding allocation_folding =
+ AllocationFolding::kDontAllocationFolding,
+ WriteBarrierAssertFailedCallback callback = [](Node*, Node*, const char*,
+ Zone*) { UNREACHABLE(); },
+ const char* function_debug_name = nullptr);
+ ~MemoryLowering() = default;
+
+ const char* reducer_name() const override { return "MemoryReducer"; }
+
+ // Perform memory lowering reduction on the given Node.
+ Reduction Reduce(Node* node) override;
+
+ // Specific reducers for each optype to enable keeping track of
+ // AllocationState by the MemoryOptimizer.
+ Reduction ReduceAllocateRaw(Node* node, AllocationType allocation_type,
+ AllowLargeObjects allow_large_objects,
+ AllocationState const** state);
+ Reduction ReduceLoadFromObject(Node* node);
+ Reduction ReduceLoadElement(Node* node);
+ Reduction ReduceLoadField(Node* node);
+ Reduction ReduceStoreToObject(Node* node,
+ AllocationState const* state = nullptr);
+ Reduction ReduceStoreElement(Node* node,
+ AllocationState const* state = nullptr);
+ Reduction ReduceStoreField(Node* node,
+ AllocationState const* state = nullptr);
+ Reduction ReduceStore(Node* node, AllocationState const* state = nullptr);
+
+ private:
+ Reduction ReduceAllocateRaw(Node* node);
+ WriteBarrierKind ComputeWriteBarrierKind(Node* node, Node* object,
+ Node* value,
+ AllocationState const* state,
+ WriteBarrierKind);
+ Node* ComputeIndex(ElementAccess const& access, Node* node);
+ bool NeedsPoisoning(LoadSensitivity load_sensitivity) const;
+
+ Graph* graph() const;
+ Isolate* isolate() const;
+ Zone* zone() const { return zone_; }
+ JSGraph* jsgraph() const { return jsgraph_; }
+ CommonOperatorBuilder* common() const;
+ MachineOperatorBuilder* machine() const;
+ GraphAssembler* gasm() { return &graph_assembler_; }
+
+ SetOncePointer<const Operator> allocate_operator_;
+ JSGraph* const jsgraph_;
+ Zone* zone_;
+ GraphAssembler graph_assembler_;
+ AllocationFolding allocation_folding_;
+ PoisoningMitigationLevel poisoning_level_;
+ WriteBarrierAssertFailedCallback write_barrier_assert_failed_;
+ const char* function_debug_name_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryLowering);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_MEMORY_LOWERING_H_
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index 8684f2ce3c..6527dfb287 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -11,90 +11,12 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
-#include "src/compiler/simplified-operator.h"
#include "src/roots/roots-inl.h"
namespace v8 {
namespace internal {
namespace compiler {
-MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
- PoisoningMitigationLevel poisoning_level,
- AllocationFolding allocation_folding,
- const char* function_debug_name,
- TickCounter* tick_counter)
- : jsgraph_(jsgraph),
- empty_state_(AllocationState::Empty(zone)),
- pending_(zone),
- tokens_(zone),
- zone_(zone),
- graph_assembler_(jsgraph, nullptr, nullptr, zone),
- poisoning_level_(poisoning_level),
- allocation_folding_(allocation_folding),
- function_debug_name_(function_debug_name),
- tick_counter_(tick_counter) {}
-
-void MemoryOptimizer::Optimize() {
- EnqueueUses(graph()->start(), empty_state());
- while (!tokens_.empty()) {
- Token const token = tokens_.front();
- tokens_.pop();
- VisitNode(token.node, token.state);
- }
- DCHECK(pending_.empty());
- DCHECK(tokens_.empty());
-}
-
-MemoryOptimizer::AllocationGroup::AllocationGroup(Node* node,
- AllocationType allocation,
- Zone* zone)
- : node_ids_(zone), allocation_(allocation), size_(nullptr) {
- node_ids_.insert(node->id());
-}
-
-MemoryOptimizer::AllocationGroup::AllocationGroup(Node* node,
- AllocationType allocation,
- Node* size, Zone* zone)
- : node_ids_(zone), allocation_(allocation), size_(size) {
- node_ids_.insert(node->id());
-}
-
-void MemoryOptimizer::AllocationGroup::Add(Node* node) {
- node_ids_.insert(node->id());
-}
-
-bool MemoryOptimizer::AllocationGroup::Contains(Node* node) const {
- // Additions should stay within the same allocated object, so it's safe to
- // ignore them.
- while (node_ids_.find(node->id()) == node_ids_.end()) {
- switch (node->opcode()) {
- case IrOpcode::kBitcastTaggedToWord:
- case IrOpcode::kBitcastWordToTagged:
- case IrOpcode::kInt32Add:
- case IrOpcode::kInt64Add:
- node = NodeProperties::GetValueInput(node, 0);
- break;
- default:
- return false;
- }
- }
- return true;
-}
-
-MemoryOptimizer::AllocationState::AllocationState()
- : group_(nullptr), size_(std::numeric_limits<int>::max()), top_(nullptr) {}
-
-MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group)
- : group_(group), size_(std::numeric_limits<int>::max()), top_(nullptr) {}
-
-MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group,
- intptr_t size, Node* top)
- : group_(group), size_(size), top_(top) {}
-
-bool MemoryOptimizer::AllocationState::IsYoungGenerationAllocation() const {
- return group() && group()->IsYoungGenerationAllocation();
-}
-
namespace {
bool CanAllocate(const Node* node) {
@@ -221,8 +143,67 @@ Node* EffectPhiForPhi(Node* phi) {
return nullptr;
}
+void WriteBarrierAssertFailed(Node* node, Node* object, const char* name,
+ Zone* temp_zone) {
+ std::stringstream str;
+ str << "MemoryOptimizer could not remove write barrier for node #"
+ << node->id() << "\n";
+ str << " Run mksnapshot with --csa-trap-on-node=" << name << ","
+ << node->id() << " to break in CSA code.\n";
+ Node* object_position = object;
+ if (object_position->opcode() == IrOpcode::kPhi) {
+ object_position = EffectPhiForPhi(object_position);
+ }
+ Node* allocating_node = nullptr;
+ if (object_position && object_position->op()->EffectOutputCount() > 0) {
+ allocating_node = SearchAllocatingNode(node, object_position, temp_zone);
+ }
+ if (allocating_node) {
+ str << "\n There is a potentially allocating node in between:\n";
+ str << " " << *allocating_node << "\n";
+ str << " Run mksnapshot with --csa-trap-on-node=" << name << ","
+ << allocating_node->id() << " to break there.\n";
+ if (allocating_node->opcode() == IrOpcode::kCall) {
+ str << " If this is a never-allocating runtime call, you can add an "
+ "exception to Runtime::MayAllocate.\n";
+ }
+ } else {
+ str << "\n It seems the store happened to something different than a "
+ "direct "
+ "allocation:\n";
+ str << " " << *object << "\n";
+ str << " Run mksnapshot with --csa-trap-on-node=" << name << ","
+ << object->id() << " to break there.\n";
+ }
+ FATAL("%s", str.str().c_str());
+}
+
} // namespace
+MemoryOptimizer::MemoryOptimizer(
+ JSGraph* jsgraph, Zone* zone, PoisoningMitigationLevel poisoning_level,
+ MemoryLowering::AllocationFolding allocation_folding,
+ const char* function_debug_name, TickCounter* tick_counter)
+ : memory_lowering_(jsgraph, zone, poisoning_level, allocation_folding,
+ WriteBarrierAssertFailed, function_debug_name),
+ jsgraph_(jsgraph),
+ empty_state_(AllocationState::Empty(zone)),
+ pending_(zone),
+ tokens_(zone),
+ zone_(zone),
+ tick_counter_(tick_counter) {}
+
+void MemoryOptimizer::Optimize() {
+ EnqueueUses(graph()->start(), empty_state());
+ while (!tokens_.empty()) {
+ Token const token = tokens_.front();
+ tokens_.pop();
+ VisitNode(token.node, token.state);
+ }
+ DCHECK(pending_.empty());
+ DCHECK(tokens_.empty());
+}
+
void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
tick_counter_->DoTick();
DCHECK(!node->IsDead());
@@ -259,8 +240,6 @@ void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
DCHECK_EQ(0, node->op()->EffectOutputCount());
}
-#define __ gasm()->
-
bool MemoryOptimizer::AllocationTypeNeedsUpdateToOld(Node* const node,
const Edge edge) {
if (COMPRESS_POINTERS_BOOL && IrOpcode::IsCompressOpcode(node->opcode())) {
@@ -293,13 +272,6 @@ bool MemoryOptimizer::AllocationTypeNeedsUpdateToOld(Node* const node,
void MemoryOptimizer::VisitAllocateRaw(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode());
- Node* value;
- Node* size = node->InputAt(0);
- Node* effect = node->InputAt(1);
- Node* control = node->InputAt(2);
-
- gasm()->Reset(effect, control);
-
const AllocateParameters& allocation = AllocateParametersOf(node->op());
AllocationType allocation_type = allocation.allocation_type();
@@ -310,7 +282,6 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
if (allocation_type == AllocationType::kOld) {
for (Edge const edge : node->use_edges()) {
Node* const user = edge.from();
-
if (user->opcode() == IrOpcode::kStoreField && edge.index() == 0) {
Node* child = user->InputAt(1);
// In Pointer Compression we might have a Compress node between an
@@ -339,299 +310,62 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
}
}
- Node* allocate_builtin;
- if (allocation_type == AllocationType::kYoung) {
- if (allocation.allow_large_objects() == AllowLargeObjects::kTrue) {
- allocate_builtin = __ AllocateInYoungGenerationStubConstant();
- } else {
- allocate_builtin = __ AllocateRegularInYoungGenerationStubConstant();
- }
- } else {
- if (allocation.allow_large_objects() == AllowLargeObjects::kTrue) {
- allocate_builtin = __ AllocateInOldGenerationStubConstant();
- } else {
- allocate_builtin = __ AllocateRegularInOldGenerationStubConstant();
- }
- }
-
- // Determine the top/limit addresses.
- Node* top_address = __ ExternalConstant(
- allocation_type == AllocationType::kYoung
- ? ExternalReference::new_space_allocation_top_address(isolate())
- : ExternalReference::old_space_allocation_top_address(isolate()));
- Node* limit_address = __ ExternalConstant(
- allocation_type == AllocationType::kYoung
- ? ExternalReference::new_space_allocation_limit_address(isolate())
- : ExternalReference::old_space_allocation_limit_address(isolate()));
-
- // Check if we can fold this allocation into a previous allocation represented
- // by the incoming {state}.
- IntPtrMatcher m(size);
- if (m.IsInRange(0, kMaxRegularHeapObjectSize) && FLAG_inline_new) {
- intptr_t const object_size = m.Value();
- if (allocation_folding_ == AllocationFolding::kDoAllocationFolding &&
- state->size() <= kMaxRegularHeapObjectSize - object_size &&
- state->group()->allocation() == allocation_type) {
- // We can fold this Allocate {node} into the allocation {group}
- // represented by the given {state}. Compute the upper bound for
- // the new {state}.
- intptr_t const state_size = state->size() + object_size;
-
- // Update the reservation check to the actual maximum upper bound.
- AllocationGroup* const group = state->group();
- if (machine()->Is64()) {
- if (OpParameter<int64_t>(group->size()->op()) < state_size) {
- NodeProperties::ChangeOp(group->size(),
- common()->Int64Constant(state_size));
- }
- } else {
- if (OpParameter<int32_t>(group->size()->op()) < state_size) {
- NodeProperties::ChangeOp(
- group->size(),
- common()->Int32Constant(static_cast<int32_t>(state_size)));
- }
- }
-
- // Update the allocation top with the new object allocation.
- // TODO(bmeurer): Defer writing back top as much as possible.
- Node* top = __ IntAdd(state->top(), size);
- __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
- kNoWriteBarrier),
- top_address, __ IntPtrConstant(0), top);
-
- // Compute the effective inner allocated address.
- value = __ BitcastWordToTagged(
- __ IntAdd(state->top(), __ IntPtrConstant(kHeapObjectTag)));
-
- // Extend the allocation {group}.
- group->Add(value);
- state = AllocationState::Open(group, state_size, top, zone());
- } else {
- auto call_runtime = __ MakeDeferredLabel();
- auto done = __ MakeLabel(MachineType::PointerRepresentation());
-
- // Setup a mutable reservation size node; will be patched as we fold
- // additional allocations into this new group.
- Node* size = __ UniqueIntPtrConstant(object_size);
-
- // Load allocation top and limit.
- Node* top =
- __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
- Node* limit =
- __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
-
- // Check if we need to collect garbage before we can start bump pointer
- // allocation (always done for folded allocations).
- Node* check = __ UintLessThan(__ IntAdd(top, size), limit);
-
- __ GotoIfNot(check, &call_runtime);
- __ Goto(&done, top);
-
- __ Bind(&call_runtime);
- {
- if (!allocate_operator_.is_set()) {
- auto descriptor = AllocateDescriptor{};
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kCanUseRoots, Operator::kNoThrow);
- allocate_operator_.set(common()->Call(call_descriptor));
- }
- Node* vfalse = __ BitcastTaggedToWord(
- __ Call(allocate_operator_.get(), allocate_builtin, size));
- vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag));
- __ Goto(&done, vfalse);
- }
-
- __ Bind(&done);
-
- // Compute the new top and write it back.
- top = __ IntAdd(done.PhiAt(0), __ IntPtrConstant(object_size));
- __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
- kNoWriteBarrier),
- top_address, __ IntPtrConstant(0), top);
-
- // Compute the initial object address.
- value = __ BitcastWordToTagged(
- __ IntAdd(done.PhiAt(0), __ IntPtrConstant(kHeapObjectTag)));
-
- // Start a new allocation group.
- AllocationGroup* group =
- new (zone()) AllocationGroup(value, allocation_type, size, zone());
- state = AllocationState::Open(group, object_size, top, zone());
- }
- } else {
- auto call_runtime = __ MakeDeferredLabel();
- auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
-
- // Load allocation top and limit.
- Node* top =
- __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
- Node* limit =
- __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
-
- // Compute the new top.
- Node* new_top = __ IntAdd(top, size);
-
- // Check if we can do bump pointer allocation here.
- Node* check = __ UintLessThan(new_top, limit);
- __ GotoIfNot(check, &call_runtime);
- if (allocation.allow_large_objects() == AllowLargeObjects::kTrue) {
- __ GotoIfNot(
- __ UintLessThan(size, __ IntPtrConstant(kMaxRegularHeapObjectSize)),
- &call_runtime);
- }
- __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
- kNoWriteBarrier),
- top_address, __ IntPtrConstant(0), new_top);
- __ Goto(&done, __ BitcastWordToTagged(
- __ IntAdd(top, __ IntPtrConstant(kHeapObjectTag))));
-
- __ Bind(&call_runtime);
- if (!allocate_operator_.is_set()) {
- auto descriptor = AllocateDescriptor{};
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kCanUseRoots, Operator::kNoThrow);
- allocate_operator_.set(common()->Call(call_descriptor));
- }
- __ Goto(&done, __ Call(allocate_operator_.get(), allocate_builtin, size));
-
- __ Bind(&done);
- value = done.PhiAt(0);
-
- // Create an unfoldable allocation group.
- AllocationGroup* group =
- new (zone()) AllocationGroup(value, allocation_type, zone());
- state = AllocationState::Closed(group, zone());
- }
-
- effect = __ ExtractCurrentEffect();
- control = __ ExtractCurrentControl();
-
- // Replace all effect uses of {node} with the {effect}, enqueue the
- // effect uses for further processing, and replace all value uses of
- // {node} with the {value}.
- for (Edge edge : node->use_edges()) {
- if (NodeProperties::IsEffectEdge(edge)) {
- EnqueueUse(edge.from(), edge.index(), state);
- edge.UpdateTo(effect);
- } else if (NodeProperties::IsValueEdge(edge)) {
- edge.UpdateTo(value);
- } else {
- DCHECK(NodeProperties::IsControlEdge(edge));
- edge.UpdateTo(control);
- }
- }
-
- // Kill the {node} to make sure we don't leave dangling dead uses.
- node->Kill();
+ memory_lowering()->ReduceAllocateRaw(
+ node, allocation_type, allocation.allow_large_objects(), &state);
+ EnqueueUses(state->effect(), state);
}
void MemoryOptimizer::VisitLoadFromObject(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kLoadFromObject, node->opcode());
- ObjectAccess const& access = ObjectAccessOf(node->op());
- NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
+ memory_lowering()->ReduceLoadFromObject(node);
EnqueueUses(node, state);
}
void MemoryOptimizer::VisitStoreToObject(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStoreToObject, node->opcode());
- ObjectAccess const& access = ObjectAccessOf(node->op());
- Node* object = node->InputAt(0);
- Node* value = node->InputAt(2);
- WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
- node, object, value, state, access.write_barrier_kind);
- NodeProperties::ChangeOp(
- node, machine()->Store(StoreRepresentation(
- access.machine_type.representation(), write_barrier_kind)));
- EnqueueUses(node, state);
-}
-
-#undef __
-
-void MemoryOptimizer::VisitCall(Node* node, AllocationState const* state) {
- DCHECK_EQ(IrOpcode::kCall, node->opcode());
- // If the call can allocate, we start with a fresh state.
- if (!(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate)) {
- state = empty_state();
- }
+ memory_lowering()->ReduceStoreToObject(node, state);
EnqueueUses(node, state);
}
void MemoryOptimizer::VisitLoadElement(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kLoadElement, node->opcode());
- ElementAccess const& access = ElementAccessOf(node->op());
- Node* index = node->InputAt(1);
- node->ReplaceInput(1, ComputeIndex(access, index));
- MachineType type = access.machine_type;
- if (NeedsPoisoning(access.load_sensitivity)) {
- NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
- } else {
- NodeProperties::ChangeOp(node, machine()->Load(type));
- }
+ memory_lowering()->ReduceLoadElement(node);
EnqueueUses(node, state);
}
void MemoryOptimizer::VisitLoadField(Node* node, AllocationState const* state) {
DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
- FieldAccess const& access = FieldAccessOf(node->op());
- Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
- node->InsertInput(graph()->zone(), 1, offset);
- MachineType type = access.machine_type;
- if (NeedsPoisoning(access.load_sensitivity)) {
- NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
- } else {
- NodeProperties::ChangeOp(node, machine()->Load(type));
- }
+ memory_lowering()->ReduceLoadField(node);
EnqueueUses(node, state);
}
void MemoryOptimizer::VisitStoreElement(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStoreElement, node->opcode());
- ElementAccess const& access = ElementAccessOf(node->op());
- Node* object = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
- WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
- node, object, value, state, access.write_barrier_kind);
- node->ReplaceInput(1, ComputeIndex(access, index));
- NodeProperties::ChangeOp(
- node, machine()->Store(StoreRepresentation(
- access.machine_type.representation(), write_barrier_kind)));
+ memory_lowering()->ReduceStoreElement(node, state);
EnqueueUses(node, state);
}
void MemoryOptimizer::VisitStoreField(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStoreField, node->opcode());
- FieldAccess const& access = FieldAccessOf(node->op());
- Node* object = node->InputAt(0);
- Node* value = node->InputAt(1);
- WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
- node, object, value, state, access.write_barrier_kind);
- Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
- node->InsertInput(graph()->zone(), 1, offset);
- NodeProperties::ChangeOp(
- node, machine()->Store(StoreRepresentation(
- access.machine_type.representation(), write_barrier_kind)));
+ memory_lowering()->ReduceStoreField(node, state);
EnqueueUses(node, state);
}
-
void MemoryOptimizer::VisitStore(Node* node, AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStore, node->opcode());
- StoreRepresentation representation = StoreRepresentationOf(node->op());
- Node* object = node->InputAt(0);
- Node* value = node->InputAt(2);
- WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
- node, object, value, state, representation.write_barrier_kind());
- if (write_barrier_kind != representation.write_barrier_kind()) {
- NodeProperties::ChangeOp(
- node, machine()->Store(StoreRepresentation(
- representation.representation(), write_barrier_kind)));
+ memory_lowering()->ReduceStore(node, state);
+ EnqueueUses(node, state);
+}
+
+void MemoryOptimizer::VisitCall(Node* node, AllocationState const* state) {
+ DCHECK_EQ(IrOpcode::kCall, node->opcode());
+ // If the call can allocate, we start with a fresh state.
+ if (!(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate)) {
+ state = empty_state();
}
EnqueueUses(node, state);
}
@@ -641,109 +375,12 @@ void MemoryOptimizer::VisitOtherEffect(Node* node,
EnqueueUses(node, state);
}
-Node* MemoryOptimizer::ComputeIndex(ElementAccess const& access, Node* index) {
- int const element_size_shift =
- ElementSizeLog2Of(access.machine_type.representation());
- if (element_size_shift) {
- index = graph()->NewNode(machine()->WordShl(), index,
- jsgraph()->IntPtrConstant(element_size_shift));
- }
- int const fixed_offset = access.header_size - access.tag();
- if (fixed_offset) {
- index = graph()->NewNode(machine()->IntAdd(), index,
- jsgraph()->IntPtrConstant(fixed_offset));
- }
- return index;
-}
-
-namespace {
-
-bool ValueNeedsWriteBarrier(Node* value, Isolate* isolate) {
- while (true) {
- switch (value->opcode()) {
- case IrOpcode::kBitcastWordToTaggedSigned:
- case IrOpcode::kChangeTaggedSignedToCompressedSigned:
- case IrOpcode::kChangeTaggedToCompressedSigned:
- return false;
- case IrOpcode::kChangeTaggedPointerToCompressedPointer:
- case IrOpcode::kChangeTaggedToCompressed:
- value = NodeProperties::GetValueInput(value, 0);
- continue;
- case IrOpcode::kHeapConstant: {
- RootIndex root_index;
- if (isolate->roots_table().IsRootHandle(HeapConstantOf(value->op()),
- &root_index) &&
- RootsTable::IsImmortalImmovable(root_index)) {
- return false;
- }
- break;
- }
- default:
- break;
- }
- return true;
- }
-}
-
-void WriteBarrierAssertFailed(Node* node, Node* object, const char* name,
- Zone* temp_zone) {
- std::stringstream str;
- str << "MemoryOptimizer could not remove write barrier for node #"
- << node->id() << "\n";
- str << " Run mksnapshot with --csa-trap-on-node=" << name << ","
- << node->id() << " to break in CSA code.\n";
- Node* object_position = object;
- if (object_position->opcode() == IrOpcode::kPhi) {
- object_position = EffectPhiForPhi(object_position);
- }
- Node* allocating_node = nullptr;
- if (object_position && object_position->op()->EffectOutputCount() > 0) {
- allocating_node = SearchAllocatingNode(node, object_position, temp_zone);
- }
- if (allocating_node) {
- str << "\n There is a potentially allocating node in between:\n";
- str << " " << *allocating_node << "\n";
- str << " Run mksnapshot with --csa-trap-on-node=" << name << ","
- << allocating_node->id() << " to break there.\n";
- if (allocating_node->opcode() == IrOpcode::kCall) {
- str << " If this is a never-allocating runtime call, you can add an "
- "exception to Runtime::MayAllocate.\n";
- }
- } else {
- str << "\n It seems the store happened to something different than a "
- "direct "
- "allocation:\n";
- str << " " << *object << "\n";
- str << " Run mksnapshot with --csa-trap-on-node=" << name << ","
- << object->id() << " to break there.\n";
- }
- FATAL("%s", str.str().c_str());
-}
-
-} // namespace
-
-WriteBarrierKind MemoryOptimizer::ComputeWriteBarrierKind(
- Node* node, Node* object, Node* value, AllocationState const* state,
- WriteBarrierKind write_barrier_kind) {
- if (state->IsYoungGenerationAllocation() &&
- state->group()->Contains(object)) {
- write_barrier_kind = kNoWriteBarrier;
- }
- if (!ValueNeedsWriteBarrier(value, isolate())) {
- write_barrier_kind = kNoWriteBarrier;
- }
- if (write_barrier_kind == WriteBarrierKind::kAssertNoWriteBarrier) {
- WriteBarrierAssertFailed(node, object, function_debug_name_, zone());
- }
- return write_barrier_kind;
-}
-
MemoryOptimizer::AllocationState const* MemoryOptimizer::MergeStates(
AllocationStates const& states) {
// Check if all states are the same; or at least if all allocation
// states belong to the same allocation group.
AllocationState const* state = states.front();
- AllocationGroup* group = state->group();
+ MemoryLowering::AllocationGroup* group = state->group();
for (size_t i = 1; i < states.size(); ++i) {
if (states[i] != state) state = nullptr;
if (states[i]->group() != group) group = nullptr;
@@ -755,7 +392,7 @@ MemoryOptimizer::AllocationState const* MemoryOptimizer::MergeStates(
// TODO(bmeurer): We could potentially just create a Phi here to merge
// the various tops; but we need to pay special attention not to create
// an unschedulable graph.
- state = AllocationState::Closed(group, zone());
+ state = AllocationState::Closed(group, nullptr, zone());
} else {
// The states are from different allocation groups.
state = empty_state();
@@ -830,31 +467,6 @@ void MemoryOptimizer::EnqueueUse(Node* node, int index,
Graph* MemoryOptimizer::graph() const { return jsgraph()->graph(); }
-Isolate* MemoryOptimizer::isolate() const { return jsgraph()->isolate(); }
-
-CommonOperatorBuilder* MemoryOptimizer::common() const {
- return jsgraph()->common();
-}
-
-MachineOperatorBuilder* MemoryOptimizer::machine() const {
- return jsgraph()->machine();
-}
-
-bool MemoryOptimizer::NeedsPoisoning(LoadSensitivity load_sensitivity) const {
- // Safe loads do not need poisoning.
- if (load_sensitivity == LoadSensitivity::kSafe) return false;
-
- switch (poisoning_level_) {
- case PoisoningMitigationLevel::kDontPoison:
- return false;
- case PoisoningMitigationLevel::kPoisonAll:
- return true;
- case PoisoningMitigationLevel::kPoisonCriticalOnly:
- return load_sensitivity == LoadSensitivity::kCritical;
- }
- UNREACHABLE();
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/memory-optimizer.h b/deps/v8/src/compiler/memory-optimizer.h
index a663bf07ed..0e0fc5684c 100644
--- a/deps/v8/src/compiler/memory-optimizer.h
+++ b/deps/v8/src/compiler/memory-optimizer.h
@@ -5,7 +5,7 @@
#ifndef V8_COMPILER_MEMORY_OPTIMIZER_H_
#define V8_COMPILER_MEMORY_OPTIMIZER_H_
-#include "src/compiler/graph-assembler.h"
+#include "src/compiler/memory-lowering.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -15,95 +15,29 @@ class TickCounter;
namespace compiler {
-// Forward declarations.
-class CommonOperatorBuilder;
-struct ElementAccess;
-class Graph;
class JSGraph;
-class MachineOperatorBuilder;
-class Node;
-class Operator;
+class Graph;
// NodeIds are identifying numbers for nodes that can be used to index auxiliary
// out-of-line data associated with each node.
using NodeId = uint32_t;
-// Lowers all simplified memory access and allocation related nodes (i.e.
-// Allocate, LoadField, StoreField and friends) to machine operators.
// Performs allocation folding and store write barrier elimination
-// implicitly.
+// implicitly, while lowering all simplified memory access and allocation
+// related nodes (i.e. Allocate, LoadField, StoreField and friends) to machine
+// operators.
class MemoryOptimizer final {
public:
- enum class AllocationFolding { kDoAllocationFolding, kDontAllocationFolding };
-
MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
PoisoningMitigationLevel poisoning_level,
- AllocationFolding allocation_folding,
+ MemoryLowering::AllocationFolding allocation_folding,
const char* function_debug_name, TickCounter* tick_counter);
~MemoryOptimizer() = default;
void Optimize();
private:
- // An allocation group represents a set of allocations that have been folded
- // together.
- class AllocationGroup final : public ZoneObject {
- public:
- AllocationGroup(Node* node, AllocationType allocation, Zone* zone);
- AllocationGroup(Node* node, AllocationType allocation, Node* size,
- Zone* zone);
- ~AllocationGroup() = default;
-
- void Add(Node* object);
- bool Contains(Node* object) const;
- bool IsYoungGenerationAllocation() const {
- return allocation() == AllocationType::kYoung;
- }
-
- AllocationType allocation() const { return allocation_; }
- Node* size() const { return size_; }
-
- private:
- ZoneSet<NodeId> node_ids_;
- AllocationType const allocation_;
- Node* const size_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationGroup);
- };
-
- // An allocation state is propagated on the effect paths through the graph.
- class AllocationState final : public ZoneObject {
- public:
- static AllocationState const* Empty(Zone* zone) {
- return new (zone) AllocationState();
- }
- static AllocationState const* Closed(AllocationGroup* group, Zone* zone) {
- return new (zone) AllocationState(group);
- }
- static AllocationState const* Open(AllocationGroup* group, intptr_t size,
- Node* top, Zone* zone) {
- return new (zone) AllocationState(group, size, top);
- }
-
- bool IsYoungGenerationAllocation() const;
-
- AllocationGroup* group() const { return group_; }
- Node* top() const { return top_; }
- intptr_t size() const { return size_; }
-
- private:
- AllocationState();
- explicit AllocationState(AllocationGroup* group);
- AllocationState(AllocationGroup* group, intptr_t size, Node* top);
-
- AllocationGroup* const group_;
- // The upper bound of the combined allocated object size on the current path
- // (max int if allocation folding is impossible on this path).
- intptr_t const size_;
- Node* const top_;
-
- DISALLOW_COPY_AND_ASSIGN(AllocationState);
- };
+ using AllocationState = MemoryLowering::AllocationState;
// An array of allocation states used to collect states on merges.
using AllocationStates = ZoneVector<AllocationState const*>;
@@ -127,44 +61,29 @@ class MemoryOptimizer final {
void VisitStore(Node*, AllocationState const*);
void VisitOtherEffect(Node*, AllocationState const*);
- Node* ComputeIndex(ElementAccess const&, Node*);
- WriteBarrierKind ComputeWriteBarrierKind(Node* node, Node* object,
- Node* value,
- AllocationState const* state,
- WriteBarrierKind);
-
AllocationState const* MergeStates(AllocationStates const& states);
void EnqueueMerge(Node*, int, AllocationState const*);
void EnqueueUses(Node*, AllocationState const*);
void EnqueueUse(Node*, int, AllocationState const*);
- bool NeedsPoisoning(LoadSensitivity load_sensitivity) const;
-
// Returns true if the AllocationType of the current AllocateRaw node that we
// are visiting needs to be updated to kOld, due to propagation of tenuring
// from outer to inner allocations.
bool AllocationTypeNeedsUpdateToOld(Node* const user, const Edge edge);
AllocationState const* empty_state() const { return empty_state_; }
+ MemoryLowering* memory_lowering() { return &memory_lowering_; }
Graph* graph() const;
- Isolate* isolate() const;
JSGraph* jsgraph() const { return jsgraph_; }
- CommonOperatorBuilder* common() const;
- MachineOperatorBuilder* machine() const;
Zone* zone() const { return zone_; }
- GraphAssembler* gasm() { return &graph_assembler_; }
- SetOncePointer<const Operator> allocate_operator_;
- JSGraph* const jsgraph_;
+ MemoryLowering memory_lowering_;
+ JSGraph* jsgraph_;
AllocationState const* const empty_state_;
ZoneMap<NodeId, AllocationStates> pending_;
ZoneQueue<Token> tokens_;
Zone* const zone_;
- GraphAssembler graph_assembler_;
- PoisoningMitigationLevel poisoning_level_;
- AllocationFolding allocation_folding_;
- const char* function_debug_name_;
TickCounter* const tick_counter_;
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryOptimizer);
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index 20698f4cd6..82bc179519 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -187,10 +187,11 @@ using Float64Matcher = FloatMatcher<double, IrOpcode::kFloat64Constant>;
using NumberMatcher = FloatMatcher<double, IrOpcode::kNumberConstant>;
// A pattern matcher for heap object constants.
-struct HeapObjectMatcher final
- : public ValueMatcher<Handle<HeapObject>, IrOpcode::kHeapConstant> {
- explicit HeapObjectMatcher(Node* node)
- : ValueMatcher<Handle<HeapObject>, IrOpcode::kHeapConstant>(node) {}
+template <IrOpcode::Value kHeapConstantOpcode>
+struct HeapObjectMatcherImpl final
+ : public ValueMatcher<Handle<HeapObject>, kHeapConstantOpcode> {
+ explicit HeapObjectMatcherImpl(Node* node)
+ : ValueMatcher<Handle<HeapObject>, kHeapConstantOpcode>(node) {}
bool Is(Handle<HeapObject> const& value) const {
return this->HasValue() && this->Value().address() == value.address();
@@ -201,6 +202,9 @@ struct HeapObjectMatcher final
}
};
+using HeapObjectMatcher = HeapObjectMatcherImpl<IrOpcode::kHeapConstant>;
+using CompressedHeapObjectMatcher =
+ HeapObjectMatcherImpl<IrOpcode::kCompressedHeapConstant>;
// A pattern matcher for external reference constants.
struct ExternalReferenceMatcher final
@@ -295,6 +299,8 @@ using Float64BinopMatcher = BinopMatcher<Float64Matcher, Float64Matcher>;
using NumberBinopMatcher = BinopMatcher<NumberMatcher, NumberMatcher>;
using HeapObjectBinopMatcher =
BinopMatcher<HeapObjectMatcher, HeapObjectMatcher>;
+using CompressedHeapObjectBinopMatcher =
+ BinopMatcher<CompressedHeapObjectMatcher, CompressedHeapObjectMatcher>;
template <class BinopMatcher, IrOpcode::Value kMulOpcode,
IrOpcode::Value kShiftOpcode>
diff --git a/deps/v8/src/compiler/node.h b/deps/v8/src/compiler/node.h
index 76ea4bb1a9..b4ff5f7185 100644
--- a/deps/v8/src/compiler/node.h
+++ b/deps/v8/src/compiler/node.h
@@ -149,7 +149,7 @@ class V8_EXPORT_PRIVATE Node final {
Uses uses() { return Uses(this); }
- // Returns true if {owner} is the user of {this} node.
+ // Returns true if {owner} is the only user of {this} node.
bool OwnedBy(Node* owner) const {
return first_use_ && first_use_->from() == owner && !first_use_->next;
}
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index fe45d9276a..76c6bfec2f 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -156,7 +156,8 @@
V(JSCreateObject) \
V(JSCreatePromise) \
V(JSCreateStringIterator) \
- V(JSCreateTypedArray)
+ V(JSCreateTypedArray) \
+ V(JSGetTemplateObject)
#define JS_OBJECT_OP_LIST(V) \
JS_CREATE_OP_LIST(V) \
@@ -425,11 +426,14 @@
V(LoadFieldByIndex) \
V(LoadField) \
V(LoadElement) \
+ V(LoadMessage) \
V(LoadTypedElement) \
V(LoadFromObject) \
V(LoadDataViewElement) \
+ V(LoadStackArgument) \
V(StoreField) \
V(StoreElement) \
+ V(StoreMessage) \
V(StoreTypedElement) \
V(StoreToObject) \
V(StoreDataViewElement) \
@@ -669,9 +673,10 @@
V(Word64Ctz) \
V(Word64ReverseBits) \
V(Word64ReverseBytes) \
+ V(Simd128ReverseBytes) \
V(Int64AbsWithOverflow) \
V(BitcastTaggedToWord) \
- V(BitcastTaggedSignedToWord) \
+ V(BitcastTaggedToWordForTagAndSmiBits) \
V(BitcastWordToTagged) \
V(BitcastWordToTaggedSigned) \
V(BitcastWord32ToCompressedSigned) \
@@ -749,6 +754,7 @@
V(F64x2ReplaceLane) \
V(F64x2Abs) \
V(F64x2Neg) \
+ V(F64x2Sqrt) \
V(F64x2Add) \
V(F64x2Sub) \
V(F64x2Mul) \
@@ -759,6 +765,8 @@
V(F64x2Ne) \
V(F64x2Lt) \
V(F64x2Le) \
+ V(F64x2Qfma) \
+ V(F64x2Qfms) \
V(F32x4Splat) \
V(F32x4ExtractLane) \
V(F32x4ReplaceLane) \
@@ -766,6 +774,7 @@
V(F32x4UConvertI32x4) \
V(F32x4Abs) \
V(F32x4Neg) \
+ V(F32x4Sqrt) \
V(F32x4RecipApprox) \
V(F32x4RecipSqrtApprox) \
V(F32x4Add) \
@@ -781,6 +790,8 @@
V(F32x4Le) \
V(F32x4Gt) \
V(F32x4Ge) \
+ V(F32x4Qfma) \
+ V(F32x4Qfms) \
V(I64x2Splat) \
V(I64x2ExtractLane) \
V(I64x2ReplaceLane) \
@@ -905,6 +916,7 @@
V(S128Or) \
V(S128Xor) \
V(S128Select) \
+ V(S8x16Swizzle) \
V(S8x16Shuffle) \
V(S1x2AnyTrue) \
V(S1x2AllTrue) \
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index 1fcc12291d..731a6c8496 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -41,6 +41,7 @@ bool OperatorProperties::NeedsExactContext(const Operator* op) {
case IrOpcode::kJSCreateEmptyLiteralObject:
case IrOpcode::kJSCreateArrayFromIterable:
case IrOpcode::kJSCreateLiteralRegExp:
+ case IrOpcode::kJSGetTemplateObject:
case IrOpcode::kJSForInEnumerate:
case IrOpcode::kJSForInNext:
case IrOpcode::kJSForInPrepare:
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 8b2f424789..b9648d9195 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -9,7 +9,6 @@
#include <memory>
#include <sstream>
-#include "src/base/adapters.h"
#include "src/base/optional.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/codegen/assembler-inl.h"
@@ -97,6 +96,35 @@ namespace v8 {
namespace internal {
namespace compiler {
+static constexpr char kCodegenZoneName[] = "codegen-zone";
+static constexpr char kGraphZoneName[] = "graph-zone";
+static constexpr char kInstructionZoneName[] = "instruction-zone";
+static constexpr char kMachineGraphVerifierZoneName[] =
+ "machine-graph-verifier-zone";
+static constexpr char kPipelineCompilationJobZoneName[] =
+ "pipeline-compilation-job-zone";
+static constexpr char kRegisterAllocationZoneName[] =
+ "register-allocation-zone";
+static constexpr char kRegisterAllocatorVerifierZoneName[] =
+ "register-allocator-verifier-zone";
+namespace {
+
+Maybe<OuterContext> GetModuleContext(Handle<JSFunction> closure) {
+ Context current = closure->context();
+ size_t distance = 0;
+ while (!current.IsNativeContext()) {
+ if (current.IsModuleContext()) {
+ return Just(
+ OuterContext(handle(current, current.GetIsolate()), distance));
+ }
+ current = current.previous();
+ distance++;
+ }
+ return Nothing<OuterContext>();
+}
+
+} // anonymous namespace
+
class PipelineData {
public:
// For main entry point.
@@ -113,15 +141,16 @@ class PipelineData {
roots_relative_addressing_enabled_(
!isolate->serializer_enabled() &&
!isolate->IsGeneratingEmbeddedBuiltins()),
- graph_zone_scope_(zone_stats_, ZONE_NAME),
+ graph_zone_scope_(zone_stats_, kGraphZoneName),
graph_zone_(graph_zone_scope_.zone()),
- instruction_zone_scope_(zone_stats_, ZONE_NAME),
+ instruction_zone_scope_(zone_stats_, kInstructionZoneName),
instruction_zone_(instruction_zone_scope_.zone()),
- codegen_zone_scope_(zone_stats_, ZONE_NAME),
+ codegen_zone_scope_(zone_stats_, kCodegenZoneName),
codegen_zone_(codegen_zone_scope_.zone()),
broker_(new JSHeapBroker(isolate_, info_->zone(),
info_->trace_heap_broker_enabled())),
- register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
+ register_allocation_zone_scope_(zone_stats_,
+ kRegisterAllocationZoneName),
register_allocation_zone_(register_allocation_zone_scope_.zone()),
assembler_options_(AssemblerOptions::Default(isolate)) {
PhaseScope scope(pipeline_statistics, "V8.TFInitPipelineData");
@@ -158,7 +187,7 @@ class PipelineData {
may_have_unverifiable_graph_(false),
zone_stats_(zone_stats),
pipeline_statistics_(pipeline_statistics),
- graph_zone_scope_(zone_stats_, ZONE_NAME),
+ graph_zone_scope_(zone_stats_, kGraphZoneName),
graph_zone_(graph_zone_scope_.zone()),
graph_(mcgraph->graph()),
source_positions_(source_positions),
@@ -166,11 +195,12 @@ class PipelineData {
machine_(mcgraph->machine()),
common_(mcgraph->common()),
mcgraph_(mcgraph),
- instruction_zone_scope_(zone_stats_, ZONE_NAME),
+ instruction_zone_scope_(zone_stats_, kInstructionZoneName),
instruction_zone_(instruction_zone_scope_.zone()),
- codegen_zone_scope_(zone_stats_, ZONE_NAME),
+ codegen_zone_scope_(zone_stats_, kCodegenZoneName),
codegen_zone_(codegen_zone_scope_.zone()),
- register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
+ register_allocation_zone_scope_(zone_stats_,
+ kRegisterAllocationZoneName),
register_allocation_zone_(register_allocation_zone_scope_.zone()),
assembler_options_(assembler_options) {}
@@ -185,17 +215,18 @@ class PipelineData {
info_(info),
debug_name_(info_->GetDebugName()),
zone_stats_(zone_stats),
- graph_zone_scope_(zone_stats_, ZONE_NAME),
+ graph_zone_scope_(zone_stats_, kGraphZoneName),
graph_zone_(graph_zone_scope_.zone()),
graph_(graph),
source_positions_(source_positions),
node_origins_(node_origins),
schedule_(schedule),
- instruction_zone_scope_(zone_stats_, ZONE_NAME),
+ instruction_zone_scope_(zone_stats_, kInstructionZoneName),
instruction_zone_(instruction_zone_scope_.zone()),
- codegen_zone_scope_(zone_stats_, ZONE_NAME),
+ codegen_zone_scope_(zone_stats_, kCodegenZoneName),
codegen_zone_(codegen_zone_scope_.zone()),
- register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
+ register_allocation_zone_scope_(zone_stats_,
+ kRegisterAllocationZoneName),
register_allocation_zone_(register_allocation_zone_scope_.zone()),
jump_optimization_info_(jump_opt),
assembler_options_(assembler_options) {
@@ -218,13 +249,14 @@ class PipelineData {
info_(info),
debug_name_(info_->GetDebugName()),
zone_stats_(zone_stats),
- graph_zone_scope_(zone_stats_, ZONE_NAME),
- instruction_zone_scope_(zone_stats_, ZONE_NAME),
+ graph_zone_scope_(zone_stats_, kGraphZoneName),
+ instruction_zone_scope_(zone_stats_, kInstructionZoneName),
instruction_zone_(sequence->zone()),
sequence_(sequence),
- codegen_zone_scope_(zone_stats_, ZONE_NAME),
+ codegen_zone_scope_(zone_stats_, kCodegenZoneName),
codegen_zone_(codegen_zone_scope_.zone()),
- register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
+ register_allocation_zone_scope_(zone_stats_,
+ kRegisterAllocationZoneName),
register_allocation_zone_(register_allocation_zone_scope_.zone()),
assembler_options_(AssemblerOptions::Default(isolate)) {}
@@ -323,6 +355,20 @@ class PipelineData {
return assembler_options_;
}
+ void ChooseSpecializationContext() {
+ if (info()->is_function_context_specializing()) {
+ DCHECK(info()->has_context());
+ specialization_context_ =
+ Just(OuterContext(handle(info()->context(), isolate()), 0));
+ } else {
+ specialization_context_ = GetModuleContext(info()->closure());
+ }
+ }
+
+ Maybe<OuterContext> specialization_context() const {
+ return specialization_context_;
+ }
+
size_t* address_of_max_unoptimized_frame_height() {
return &max_unoptimized_frame_height_;
}
@@ -531,6 +577,7 @@ class PipelineData {
JumpOptimizationInfo* jump_optimization_info_ = nullptr;
AssemblerOptions assembler_options_;
+ Maybe<OuterContext> specialization_context_ = Nothing<OuterContext>();
// The maximal combined height of all inlined frames in their unoptimized
// state. Calculated during instruction selection, applied during code
@@ -548,12 +595,19 @@ class PipelineImpl final {
template <typename Phase, typename... Args>
void Run(Args&&... args);
- // Step A. Run the graph creation and initial optimization passes.
+ // Step A.1. Serialize the data needed for the compilation front-end.
+ void Serialize();
+
+ // Step A.2. Run the graph creation and initial optimization passes.
bool CreateGraph();
- // B. Run the concurrent optimization passes.
+ // Step B. Run the concurrent optimization passes.
bool OptimizeGraph(Linkage* linkage);
+ // Alternative step B. Run minimal concurrent optimization passes for
+ // mid-tier.
+ bool OptimizeGraphForMidTier(Linkage* linkage);
+
// Substep B.1. Produce a scheduled graph.
void ComputeScheduledGraph();
@@ -642,8 +696,6 @@ void PrintInlinedFunctionInfo(
// compilation. For inlined functions print source position of their inlining.
void PrintParticipatingSource(OptimizedCompilationInfo* info,
Isolate* isolate) {
- AllowDeferredHandleDereference allow_deference_for_print_code;
-
SourceIdAssigner id_assigner(info->inlined_functions().size());
PrintFunctionSource(info, isolate, -1, info->shared_info());
const auto& inlined = info->inlined_functions();
@@ -662,7 +714,6 @@ void PrintCode(Isolate* isolate, Handle<Code> code,
}
#ifdef ENABLE_DISASSEMBLER
- AllowDeferredHandleDereference allow_deference_for_print_code;
bool print_code =
FLAG_print_code ||
(info->IsOptimizing() && FLAG_print_opt_code &&
@@ -703,7 +754,7 @@ void PrintCode(Isolate* isolate, Handle<Code> code,
Handle<SharedFunctionInfo> shared = info->shared_info();
os << "source_position = " << shared->StartPosition() << "\n";
}
- code->Disassemble(debug_name.get(), os);
+ code->Disassemble(debug_name.get(), os, isolate);
os << "--- End code ---\n";
}
#endif // ENABLE_DISASSEMBLER
@@ -800,8 +851,10 @@ class PipelineRunScope {
public:
PipelineRunScope(PipelineData* data, const char* phase_name)
: phase_scope_(data->pipeline_statistics(), phase_name),
- zone_scope_(data->zone_stats(), ZONE_NAME),
- origin_scope_(data->node_origins(), phase_name) {}
+ zone_scope_(data->zone_stats(), phase_name),
+ origin_scope_(data->node_origins(), phase_name) {
+ DCHECK_NOT_NULL(phase_name);
+ }
Zone* zone() { return zone_scope_.zone(); }
@@ -886,7 +939,7 @@ class PipelineCompilationJob final : public OptimizedCompilationJob {
PipelineCompilationJob(Isolate* isolate,
Handle<SharedFunctionInfo> shared_info,
Handle<JSFunction> function);
- ~PipelineCompilationJob();
+ ~PipelineCompilationJob() final;
protected:
Status PrepareJobImpl(Isolate* isolate) final;
@@ -915,7 +968,8 @@ PipelineCompilationJob::PipelineCompilationJob(
// we pass it to the CompilationJob constructor, but it is not
// dereferenced there.
: OptimizedCompilationJob(&compilation_info_, "TurboFan"),
- zone_(function->GetIsolate()->allocator(), ZONE_NAME),
+ zone_(function->GetIsolate()->allocator(),
+ kPipelineCompilationJobZoneName),
zone_stats_(function->GetIsolate()->allocator()),
compilation_info_(&zone_, function->GetIsolate(), shared_info, function),
pipeline_statistics_(CreatePipelineStatistics(
@@ -976,9 +1030,16 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
compilation_info()->MarkAsAllocationFoldingEnabled();
}
+ // Determine whether to specialize the code for the function's context.
+ // We can't do this in the case of OSR, because we want to cache the
+ // generated code on the native context keyed on SharedFunctionInfo.
+ // TODO(mythria): Check if it is better to key the OSR cache on JSFunction and
+ // allow context specialization for OSR code.
if (compilation_info()->closure()->raw_feedback_cell().map() ==
- ReadOnlyRoots(isolate).one_closure_cell_map()) {
+ ReadOnlyRoots(isolate).one_closure_cell_map() &&
+ !compilation_info()->is_osr()) {
compilation_info()->MarkAsFunctionContextSpecializing();
+ data_.ChooseSpecializationContext();
}
if (compilation_info()->is_source_positions_enabled()) {
@@ -999,9 +1060,13 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
// assembly.
Deoptimizer::EnsureCodeForDeoptimizationEntries(isolate);
- if (!pipeline_.CreateGraph()) {
- CHECK(!isolate->has_pending_exception());
- return AbortOptimization(BailoutReason::kGraphBuildingFailed);
+ pipeline_.Serialize();
+
+ if (!FLAG_concurrent_inlining) {
+ if (!pipeline_.CreateGraph()) {
+ CHECK(!isolate->has_pending_exception());
+ return AbortOptimization(BailoutReason::kGraphBuildingFailed);
+ }
}
return SUCCEEDED;
@@ -1012,7 +1077,21 @@ PipelineCompilationJob::Status PipelineCompilationJob::ExecuteJobImpl() {
TRACE_DISABLED_BY_DEFAULT("v8.compile"), "v8.optimizingCompile.execute",
this, TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT, "function",
compilation_info()->shared_info()->TraceIDRef());
- if (!pipeline_.OptimizeGraph(linkage_)) return FAILED;
+
+ if (FLAG_concurrent_inlining) {
+ if (!pipeline_.CreateGraph()) {
+ return AbortOptimization(BailoutReason::kGraphBuildingFailed);
+ }
+ }
+
+ bool success;
+ if (FLAG_turboprop) {
+ success = pipeline_.OptimizeGraphForMidTier(linkage_);
+ } else {
+ success = pipeline_.OptimizeGraph(linkage_);
+ }
+ if (!success) return FAILED;
+
pipeline_.AssembleCode(linkage_);
return SUCCEEDED;
}
@@ -1091,8 +1170,6 @@ class WasmHeapStubCompilationJob final : public OptimizedCompilationJob {
pipeline_(&data_),
wasm_engine_(wasm_engine) {}
- ~WasmHeapStubCompilationJob() = default;
-
protected:
Status PrepareJobImpl(Isolate* isolate) final;
Status ExecuteJobImpl() final;
@@ -1119,7 +1196,7 @@ Pipeline::NewWasmHeapStubCompilationJob(
CallDescriptor* call_descriptor, std::unique_ptr<Zone> zone, Graph* graph,
Code::Kind kind, std::unique_ptr<char[]> debug_name,
const AssemblerOptions& options, SourcePositionTable* source_positions) {
- return base::make_unique<WasmHeapStubCompilationJob>(
+ return std::make_unique<WasmHeapStubCompilationJob>(
isolate, wasm_engine, call_descriptor, std::move(zone), graph, kind,
std::move(debug_name), options, source_positions);
}
@@ -1175,7 +1252,7 @@ CompilationJob::Status WasmHeapStubCompilationJob::FinalizeJobImpl(
if (FLAG_print_opt_code) {
CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
OFStream os(tracing_scope.file());
- code->Disassemble(compilation_info()->GetDebugName().get(), os);
+ code->Disassemble(compilation_info()->GetDebugName().get(), os, isolate);
}
#endif
return SUCCEEDED;
@@ -1212,38 +1289,10 @@ struct GraphBuilderPhase {
}
};
-namespace {
-
-Maybe<OuterContext> GetModuleContext(Handle<JSFunction> closure) {
- Context current = closure->context();
- size_t distance = 0;
- while (!current.IsNativeContext()) {
- if (current.IsModuleContext()) {
- return Just(
- OuterContext(handle(current, current.GetIsolate()), distance));
- }
- current = current.previous();
- distance++;
- }
- return Nothing<OuterContext>();
-}
-
-Maybe<OuterContext> ChooseSpecializationContext(
- Isolate* isolate, OptimizedCompilationInfo* info) {
- if (info->is_function_context_specializing()) {
- DCHECK(info->has_context());
- return Just(OuterContext(handle(info->context(), isolate), 0));
- }
- return GetModuleContext(info->closure());
-}
-
-} // anonymous namespace
-
struct InliningPhase {
static const char* phase_name() { return "V8.TFInlining"; }
void Run(PipelineData* data, Zone* temp_zone) {
- Isolate* isolate = data->isolate();
OptimizedCompilationInfo* info = data->info();
GraphReducer graph_reducer(temp_zone, data->graph(), &info->tick_counter(),
data->jsgraph()->Dead());
@@ -1260,7 +1309,7 @@ struct InliningPhase {
data->dependencies());
JSContextSpecialization context_specialization(
&graph_reducer, data->jsgraph(), data->broker(),
- ChooseSpecializationContext(isolate, data->info()),
+ data->specialization_context(),
data->info()->is_function_context_specializing()
? data->info()->closure()
: MaybeHandle<JSFunction>());
@@ -1389,9 +1438,13 @@ struct SerializationPhase {
flags |=
SerializerForBackgroundCompilationFlag::kAnalyzeEnvironmentLiveness;
}
- RunSerializerForBackgroundCompilation(data->broker(), data->dependencies(),
- temp_zone, data->info()->closure(),
- flags, data->info()->osr_offset());
+ RunSerializerForBackgroundCompilation(
+ data->zone_stats(), data->broker(), data->dependencies(),
+ data->info()->closure(), flags, data->info()->osr_offset());
+ if (data->specialization_context().IsJust()) {
+ ContextRef(data->broker(),
+ data->specialization_context().FromJust().context);
+ }
}
};
@@ -1682,8 +1735,8 @@ struct MemoryOptimizationPhase {
MemoryOptimizer optimizer(
data->jsgraph(), temp_zone, data->info()->GetPoisoningMitigationLevel(),
data->info()->is_allocation_folding_enabled()
- ? MemoryOptimizer::AllocationFolding::kDoAllocationFolding
- : MemoryOptimizer::AllocationFolding::kDontAllocationFolding,
+ ? MemoryLowering::AllocationFolding::kDoAllocationFolding
+ : MemoryLowering::AllocationFolding::kDontAllocationFolding,
data->debug_name(), &data->info()->tick_counter());
optimizer.Optimize();
}
@@ -1705,13 +1758,15 @@ struct LateOptimizationPhase {
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->broker(), data->common(),
data->machine(), temp_zone);
- SelectLowering select_lowering(data->jsgraph()->graph(),
- data->jsgraph()->common());
-#ifdef V8_COMPRESS_POINTERS
+ SelectLowering select_lowering(data->jsgraph(), temp_zone);
+ // TODO(v8:7703, solanes): go back to using #if guards once
+ // FLAG_turbo_decompression_elimination gets removed.
DecompressionElimination decompression_elimination(
&graph_reducer, data->graph(), data->machine(), data->common());
- AddReducer(data, &graph_reducer, &decompression_elimination);
-#endif
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
+ AddReducer(data, &graph_reducer, &decompression_elimination);
+ }
+ USE(decompression_elimination);
AddReducer(data, &graph_reducer, &branch_condition_elimination);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &machine_reducer);
@@ -1738,6 +1793,23 @@ struct MachineOperatorOptimizationPhase {
}
};
+struct MidTierMachineLoweringPhase {
+ static const char* phase_name() { return "V8.TFMidTierMachineLoweringPhase"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
+ data->jsgraph()->Dead());
+ SelectLowering select_lowering(data->jsgraph(), temp_zone);
+ MemoryLowering memory_lowering(data->jsgraph(), temp_zone,
+ data->info()->GetPoisoningMitigationLevel());
+
+ AddReducer(data, &graph_reducer, &memory_lowering);
+ AddReducer(data, &graph_reducer, &select_lowering);
+ graph_reducer.ReduceGraph();
+ }
+};
+
struct CsaEarlyOptimizationPhase {
static const char* phase_name() { return "V8.CSAEarlyOptimization"; }
@@ -1779,11 +1851,14 @@ struct CsaOptimizationPhase {
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->broker(), data->common(),
data->machine(), temp_zone);
-#ifdef V8_COMPRESS_POINTERS
+ // TODO(v8:7703, solanes): go back to using #if guards once
+ // FLAG_turbo_decompression_elimination gets removed.
DecompressionElimination decompression_elimination(
&graph_reducer, data->graph(), data->machine(), data->common());
- AddReducer(data, &graph_reducer, &decompression_elimination);
-#endif
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
+ AddReducer(data, &graph_reducer, &decompression_elimination);
+ }
+ USE(decompression_elimination);
AddReducer(data, &graph_reducer, &branch_condition_elimination);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &machine_reducer);
@@ -2077,7 +2152,7 @@ struct JumpThreadingPhase {
void Run(PipelineData* data, Zone* temp_zone, bool frame_at_start) {
ZoneVector<RpoNumber> result(temp_zone);
- if (JumpThreading::ComputeForwarding(temp_zone, result, data->sequence(),
+ if (JumpThreading::ComputeForwarding(temp_zone, &result, data->sequence(),
frame_at_start)) {
JumpThreading::ApplyForwarding(temp_zone, result, data->sequence());
}
@@ -2102,7 +2177,7 @@ struct FinalizeCodePhase {
struct PrintGraphPhase {
- static const char* phase_name() { return nullptr; }
+ static const char* phase_name() { return "V8.TFPrintGraph"; }
void Run(PipelineData* data, Zone* temp_zone, const char* phase) {
OptimizedCompilationInfo* info = data->info();
@@ -2143,7 +2218,7 @@ struct PrintGraphPhase {
struct VerifyGraphPhase {
- static const char* phase_name() { return nullptr; }
+ static const char* phase_name() { return "V8.TFVerifyGraph"; }
void Run(PipelineData* data, Zone* temp_zone, const bool untyped,
bool values_only = false) {
@@ -2176,10 +2251,10 @@ void PipelineImpl::RunPrintAndVerify(const char* phase, bool untyped) {
}
}
-bool PipelineImpl::CreateGraph() {
+void PipelineImpl::Serialize() {
PipelineData* data = this->data_;
- data->BeginPhaseKind("V8.TFGraphCreation");
+ data->BeginPhaseKind("V8.TFBrokerInitAndSerialization");
if (info()->trace_turbo_json_enabled() ||
info()->trace_turbo_graph_enabled()) {
@@ -2203,15 +2278,19 @@ bool PipelineImpl::CreateGraph() {
if (FLAG_concurrent_inlining) {
Run<HeapBrokerInitializationPhase>();
Run<SerializationPhase>();
+ data->broker()->StopSerializing();
}
+ data->EndPhaseKind();
+}
+
+bool PipelineImpl::CreateGraph() {
+ PipelineData* data = this->data_;
+
+ data->BeginPhaseKind("V8.TFGraphCreation");
Run<GraphBuilderPhase>();
RunPrintAndVerify(GraphBuilderPhase::phase_name(), true);
- if (FLAG_concurrent_inlining) {
- Run<CopyMetadataForConcurrentCompilePhase>();
- }
-
// Perform function context specialization and inlining (if enabled).
Run<InliningPhase>();
RunPrintAndVerify(InliningPhase::phase_name(), true);
@@ -2222,12 +2301,13 @@ bool PipelineImpl::CreateGraph() {
// Determine the Typer operation flags.
{
- if (is_sloppy(info()->shared_info()->language_mode()) &&
- info()->shared_info()->IsUserJavaScript()) {
+ SharedFunctionInfoRef shared_info(data->broker(), info()->shared_info());
+ if (is_sloppy(shared_info.language_mode()) &&
+ shared_info.IsUserJavaScript()) {
// Sloppy mode functions always have an Object for this.
data->AddTyperFlag(Typer::kThisIsReceiver);
}
- if (IsClassConstructor(info()->shared_info()->kind())) {
+ if (IsClassConstructor(shared_info.kind())) {
// Class constructors cannot be [[Call]]ed.
data->AddTyperFlag(Typer::kNewTargetIsReceiver);
}
@@ -2235,12 +2315,7 @@ bool PipelineImpl::CreateGraph() {
// Run the type-sensitive lowerings and optimizations on the graph.
{
- if (FLAG_concurrent_inlining) {
- // TODO(neis): Remove CopyMetadataForConcurrentCompilePhase call once
- // brokerization of JSNativeContextSpecialization is complete.
- Run<CopyMetadataForConcurrentCompilePhase>();
- data->broker()->StopSerializing();
- } else {
+ if (!FLAG_concurrent_inlining) {
Run<HeapBrokerInitializationPhase>();
Run<CopyMetadataForConcurrentCompilePhase>();
data->broker()->StopSerializing();
@@ -2359,6 +2434,70 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
return SelectInstructions(linkage);
}
+bool PipelineImpl::OptimizeGraphForMidTier(Linkage* linkage) {
+ PipelineData* data = this->data_;
+
+ data->BeginPhaseKind("V8.TFLowering");
+
+ // Type the graph and keep the Typer running such that new nodes get
+ // automatically typed when they are created.
+ Run<TyperPhase>(data->CreateTyper());
+ RunPrintAndVerify(TyperPhase::phase_name());
+ Run<TypedLoweringPhase>();
+ RunPrintAndVerify(TypedLoweringPhase::phase_name());
+
+ // TODO(9684): Consider rolling this into the preceeding phase or not creating
+ // LoopExit nodes at all.
+ Run<LoopExitEliminationPhase>();
+ RunPrintAndVerify(LoopExitEliminationPhase::phase_name(), true);
+
+ data->DeleteTyper();
+
+ if (FLAG_assert_types) {
+ Run<TypeAssertionsPhase>();
+ RunPrintAndVerify(TypeAssertionsPhase::phase_name());
+ }
+
+ // Perform simplified lowering. This has to run w/o the Typer decorator,
+ // because we cannot compute meaningful types anyways, and the computed types
+ // might even conflict with the representation/truncation logic.
+ Run<SimplifiedLoweringPhase>();
+ RunPrintAndVerify(SimplifiedLoweringPhase::phase_name(), true);
+
+ // From now on it is invalid to look at types on the nodes, because the types
+ // on the nodes might not make sense after representation selection due to the
+ // way we handle truncations; if we'd want to look at types afterwards we'd
+ // essentially need to re-type (large portions of) the graph.
+
+ // In order to catch bugs related to type access after this point, we now
+ // remove the types from the nodes (currently only in Debug builds).
+#ifdef DEBUG
+ Run<UntyperPhase>();
+ RunPrintAndVerify(UntyperPhase::phase_name(), true);
+#endif
+
+ // Run generic lowering pass.
+ Run<GenericLoweringPhase>();
+ RunPrintAndVerify(GenericLoweringPhase::phase_name(), true);
+
+ data->BeginPhaseKind("V8.TFBlockBuilding");
+
+ Run<EffectControlLinearizationPhase>();
+ RunPrintAndVerify(EffectControlLinearizationPhase::phase_name(), true);
+
+ Run<MidTierMachineLoweringPhase>();
+ RunPrintAndVerify(MidTierMachineLoweringPhase::phase_name(), true);
+
+ data->source_positions()->RemoveDecorator();
+ if (data->info()->trace_turbo_json_enabled()) {
+ data->node_origins()->RemoveDecorator();
+ }
+
+ ComputeScheduledGraph();
+
+ return SelectInstructions(linkage);
+}
+
MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
SourcePositionTable* source_positions, Code::Kind kind,
@@ -2571,6 +2710,7 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info));
Deoptimizer::EnsureCodeForDeoptimizationEntries(isolate);
+ pipeline.Serialize();
if (!pipeline.CreateGraph()) return MaybeHandle<Code>();
if (!pipeline.OptimizeGraph(&linkage)) return MaybeHandle<Code>();
pipeline.AssembleCode(&linkage);
@@ -2628,7 +2768,7 @@ std::unique_ptr<OptimizedCompilationJob> Pipeline::NewCompilationJob(
Isolate* isolate, Handle<JSFunction> function, bool has_script) {
Handle<SharedFunctionInfo> shared =
handle(function->shared(), function->GetIsolate());
- return base::make_unique<PipelineCompilationJob>(isolate, shared, function);
+ return std::make_unique<PipelineCompilationJob>(isolate, shared, function);
}
// static
@@ -2709,7 +2849,7 @@ void Pipeline::GenerateCodeForWasmFunction(
if (!pipeline.SelectInstructions(&linkage)) return;
pipeline.AssembleCode(&linkage, instruction_buffer->CreateView());
- auto result = base::make_unique<wasm::WasmCompilationResult>();
+ auto result = std::make_unique<wasm::WasmCompilationResult>();
CodeGenerator* code_generator = pipeline.code_generator();
code_generator->tasm()->GetCode(
nullptr, &result->code_desc, code_generator->safepoint_table_builder(),
@@ -2818,7 +2958,7 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
<< "--- End of " << data->debug_name() << " generated by TurboFan\n"
<< "--------------------------------------------------\n";
}
- Zone temp_zone(data->allocator(), ZONE_NAME);
+ Zone temp_zone(data->allocator(), kMachineGraphVerifierZoneName);
MachineGraphVerifier::Run(
data->graph(), data->schedule(), linkage,
data->info()->IsNotOptimizedFunctionOrWasmFunction(),
@@ -2993,6 +3133,7 @@ void PipelineImpl::AssembleCode(Linkage* linkage,
MaybeHandle<Code> PipelineImpl::FinalizeCode(bool retire_broker) {
PipelineData* data = this->data_;
+ data->BeginPhaseKind("V8.TFFinalizeCode");
if (data->broker() && retire_broker) {
data->broker()->Retire();
}
@@ -3007,7 +3148,7 @@ MaybeHandle<Code> PipelineImpl::FinalizeCode(bool retire_broker) {
if (data->profiler_data()) {
#ifdef ENABLE_DISASSEMBLER
std::ostringstream os;
- code->Disassemble(nullptr, os);
+ code->Disassemble(nullptr, os, isolate());
data->profiler_data()->SetCode(&os);
#endif // ENABLE_DISASSEMBLER
}
@@ -3023,7 +3164,7 @@ MaybeHandle<Code> PipelineImpl::FinalizeCode(bool retire_broker) {
<< "\"data\":\"";
#ifdef ENABLE_DISASSEMBLER
std::stringstream disassembly_stream;
- code->Disassemble(nullptr, disassembly_stream);
+ code->Disassemble(nullptr, disassembly_stream, isolate());
std::string disassembly_string(disassembly_stream.str());
for (const auto& c : disassembly_string) {
json_of << AsEscapedUC16ForJSON(c);
@@ -3043,6 +3184,7 @@ MaybeHandle<Code> PipelineImpl::FinalizeCode(bool retire_broker) {
<< "Finished compiling method " << info()->GetDebugName().get()
<< " using TurboFan" << std::endl;
}
+ data->EndPhaseKind();
return code;
}
@@ -3100,7 +3242,8 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
std::unique_ptr<Zone> verifier_zone;
RegisterAllocatorVerifier* verifier = nullptr;
if (run_verifier) {
- verifier_zone.reset(new Zone(data->allocator(), ZONE_NAME));
+ verifier_zone.reset(
+ new Zone(data->allocator(), kRegisterAllocatorVerifierZoneName));
verifier = new (verifier_zone.get()) RegisterAllocatorVerifier(
verifier_zone.get(), config, data->sequence());
}
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index 3707bfb06e..42f31472a9 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -5,6 +5,8 @@
#ifndef V8_COMPILER_PIPELINE_H_
#define V8_COMPILER_PIPELINE_H_
+#include <memory>
+
// Clients of this interface shouldn't depend on lots of compiler internals.
// Do not include anything from src/compiler here!
#include "src/common/globals.h"
diff --git a/deps/v8/src/compiler/processed-feedback.h b/deps/v8/src/compiler/processed-feedback.h
index 17829863de..1d1ee538d8 100644
--- a/deps/v8/src/compiler/processed-feedback.h
+++ b/deps/v8/src/compiler/processed-feedback.h
@@ -18,7 +18,10 @@ class ElementAccessFeedback;
class ForInFeedback;
class GlobalAccessFeedback;
class InstanceOfFeedback;
+class LiteralFeedback;
class NamedAccessFeedback;
+class RegExpLiteralFeedback;
+class TemplateObjectFeedback;
class ProcessedFeedback : public ZoneObject {
public:
@@ -31,7 +34,10 @@ class ProcessedFeedback : public ZoneObject {
kForIn,
kGlobalAccess,
kInstanceOf,
+ kLiteral,
kNamedAccess,
+ kRegExpLiteral,
+ kTemplateObject,
};
Kind kind() const { return kind_; }
@@ -46,6 +52,9 @@ class ProcessedFeedback : public ZoneObject {
GlobalAccessFeedback const& AsGlobalAccess() const;
InstanceOfFeedback const& AsInstanceOf() const;
NamedAccessFeedback const& AsNamedAccess() const;
+ LiteralFeedback const& AsLiteral() const;
+ RegExpLiteralFeedback const& AsRegExpLiteral() const;
+ TemplateObjectFeedback const& AsTemplateObject() const;
protected:
ProcessedFeedback(Kind kind, FeedbackSlotKind slot_kind);
@@ -187,7 +196,9 @@ class SingleValueFeedback : public ProcessedFeedback {
(K == kBinaryOperation && slot_kind == FeedbackSlotKind::kBinaryOp) ||
(K == kCompareOperation && slot_kind == FeedbackSlotKind::kCompareOp) ||
(K == kForIn && slot_kind == FeedbackSlotKind::kForIn) ||
- (K == kInstanceOf && slot_kind == FeedbackSlotKind::kInstanceOf));
+ (K == kInstanceOf && slot_kind == FeedbackSlotKind::kInstanceOf) ||
+ ((K == kLiteral || K == kRegExpLiteral || K == kTemplateObject) &&
+ slot_kind == FeedbackSlotKind::kLiteral));
}
T value() const { return value_; }
@@ -202,6 +213,24 @@ class InstanceOfFeedback
using SingleValueFeedback::SingleValueFeedback;
};
+class LiteralFeedback
+ : public SingleValueFeedback<AllocationSiteRef,
+ ProcessedFeedback::kLiteral> {
+ using SingleValueFeedback::SingleValueFeedback;
+};
+
+class RegExpLiteralFeedback
+ : public SingleValueFeedback<JSRegExpRef,
+ ProcessedFeedback::kRegExpLiteral> {
+ using SingleValueFeedback::SingleValueFeedback;
+};
+
+class TemplateObjectFeedback
+ : public SingleValueFeedback<JSArrayRef,
+ ProcessedFeedback::kTemplateObject> {
+ using SingleValueFeedback::SingleValueFeedback;
+};
+
class BinaryOperationFeedback
: public SingleValueFeedback<BinaryOperationHint,
ProcessedFeedback::kBinaryOperation> {
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index e399b9c4f6..c709729081 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -690,15 +690,14 @@ Node* RawMachineAssembler::CallNWithFrameState(CallDescriptor* call_descriptor,
return AddNode(common()->Call(call_descriptor), input_count, inputs);
}
-Node* RawMachineAssembler::TailCallN(CallDescriptor* call_descriptor,
- int input_count, Node* const* inputs) {
+void RawMachineAssembler::TailCallN(CallDescriptor* call_descriptor,
+ int input_count, Node* const* inputs) {
// +1 is for target.
DCHECK_EQ(input_count, call_descriptor->ParameterCount() + 1);
Node* tail_call =
MakeNode(common()->TailCall(call_descriptor), input_count, inputs);
schedule()->AddTailCall(CurrentBlock(), tail_call);
current_block_ = nullptr;
- return tail_call;
}
namespace {
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index 46940df44f..cbbb719d54 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -131,7 +131,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
std::pair<MachineType, const Operator*> InsertDecompressionIfNeeded(
MachineType type) {
const Operator* decompress_op = nullptr;
- if (COMPRESS_POINTERS_BOOL) {
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
switch (type.representation()) {
case MachineRepresentation::kTaggedPointer:
type = MachineType::CompressedPointer();
@@ -188,7 +188,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
std::pair<MachineRepresentation, Node*> InsertCompressionIfNeeded(
MachineRepresentation rep, Node* value) {
- if (COMPRESS_POINTERS_BOOL) {
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
switch (rep) {
case MachineRepresentation::kTaggedPointer:
rep = MachineRepresentation::kCompressedPointer;
@@ -237,7 +237,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
object, value);
}
void OptimizedStoreMap(Node* object, Node* value) {
- if (COMPRESS_POINTERS_BOOL) {
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
DCHECK(AccessBuilder::ForMap().machine_type.IsCompressedPointer());
value =
AddNode(machine()->ChangeTaggedPointerToCompressedPointer(), value);
@@ -736,8 +736,8 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* BitcastTaggedToWord(Node* a) {
return AddNode(machine()->BitcastTaggedToWord(), a);
}
- Node* BitcastTaggedSignedToWord(Node* a) {
- return AddNode(machine()->BitcastTaggedSignedToWord(), a);
+ Node* BitcastTaggedToWordForTagAndSmiBits(Node* a) {
+ return AddNode(machine()->BitcastTaggedToWordForTagAndSmiBits(), a);
}
Node* BitcastMaybeObjectToWord(Node* a) {
return AddNode(machine()->BitcastMaybeObjectToWord(), a);
@@ -965,8 +965,8 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
// Tail call a given call descriptor and the given arguments.
// The call target is passed as part of the {inputs} array.
- Node* TailCallN(CallDescriptor* call_descriptor, int input_count,
- Node* const* inputs);
+ void TailCallN(CallDescriptor* call_descriptor, int input_count,
+ Node* const* inputs);
// Type representing C function argument with type info.
using CFunctionArg = std::pair<MachineType, Node*>;
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index fd0cbabe66..ca1b1e221f 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -1272,8 +1272,13 @@ Node* RepresentationChanger::GetBitRepresentationFor(
}
}
} else if (output_rep == MachineRepresentation::kTaggedSigned) {
- node = jsgraph()->graph()->NewNode(machine()->WordEqual(), node,
- jsgraph()->IntPtrConstant(0));
+ if (COMPRESS_POINTERS_BOOL) {
+ node = jsgraph()->graph()->NewNode(machine()->Word32Equal(), node,
+ jsgraph()->Int32Constant(0));
+ } else {
+ node = jsgraph()->graph()->NewNode(machine()->WordEqual(), node,
+ jsgraph()->IntPtrConstant(0));
+ }
return jsgraph()->graph()->NewNode(machine()->Word32Equal(), node,
jsgraph()->Int32Constant(0));
} else if (output_rep == MachineRepresentation::kCompressed) {
@@ -1546,14 +1551,17 @@ const Operator* RepresentationChanger::TaggedSignedOperatorFor(
IrOpcode::Value opcode) {
switch (opcode) {
case IrOpcode::kSpeculativeNumberLessThan:
- return machine()->Is32() ? machine()->Int32LessThan()
- : machine()->Int64LessThan();
+ return (COMPRESS_POINTERS_BOOL || machine()->Is32())
+ ? machine()->Int32LessThan()
+ : machine()->Int64LessThan();
case IrOpcode::kSpeculativeNumberLessThanOrEqual:
- return machine()->Is32() ? machine()->Int32LessThanOrEqual()
- : machine()->Int64LessThanOrEqual();
+ return (COMPRESS_POINTERS_BOOL || machine()->Is32())
+ ? machine()->Int32LessThanOrEqual()
+ : machine()->Int64LessThanOrEqual();
case IrOpcode::kSpeculativeNumberEqual:
- return machine()->Is32() ? machine()->Word32Equal()
- : machine()->Word64Equal();
+ return (COMPRESS_POINTERS_BOOL || machine()->Is32())
+ ? machine()->Word32Equal()
+ : machine()->Word64Equal();
default:
UNREACHABLE();
}
diff --git a/deps/v8/src/compiler/scheduler.cc b/deps/v8/src/compiler/scheduler.cc
index bf23e436f6..2999cbfcd6 100644
--- a/deps/v8/src/compiler/scheduler.cc
+++ b/deps/v8/src/compiler/scheduler.cc
@@ -6,7 +6,7 @@
#include <iomanip>
-#include "src/base/adapters.h"
+#include "src/base/iterator.h"
#include "src/codegen/tick-counter.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/control-equivalence.h"
diff --git a/deps/v8/src/compiler/select-lowering.cc b/deps/v8/src/compiler/select-lowering.cc
index 4d5bb99053..290306a966 100644
--- a/deps/v8/src/compiler/select-lowering.cc
+++ b/deps/v8/src/compiler/select-lowering.cc
@@ -14,29 +14,39 @@ namespace v8 {
namespace internal {
namespace compiler {
-SelectLowering::SelectLowering(Graph* graph, CommonOperatorBuilder* common)
- : common_(common), graph_(graph) {}
+SelectLowering::SelectLowering(JSGraph* jsgraph, Zone* zone)
+ : graph_assembler_(jsgraph, nullptr, nullptr, zone),
+ start_(jsgraph->graph()->start()) {}
SelectLowering::~SelectLowering() = default;
-
Reduction SelectLowering::Reduce(Node* node) {
if (node->opcode() != IrOpcode::kSelect) return NoChange();
+ return Changed(LowerSelect(node));
+}
+
+#define __ gasm()->
+
+Node* SelectLowering::LowerSelect(Node* node) {
SelectParameters const p = SelectParametersOf(node->op());
- Node* cond = node->InputAt(0);
- Node* vthen = node->InputAt(1);
- Node* velse = node->InputAt(2);
-
- // Create a diamond and a phi.
- Diamond d(graph(), common(), cond, p.hint());
- node->ReplaceInput(0, vthen);
- node->ReplaceInput(1, velse);
- node->ReplaceInput(2, d.merge);
- NodeProperties::ChangeOp(node, common()->Phi(p.representation(), 2));
- return Changed(node);
+ Node* condition = node->InputAt(0);
+ Node* vtrue = node->InputAt(1);
+ Node* vfalse = node->InputAt(2);
+
+ gasm()->Reset(start(), start());
+
+ auto done = __ MakeLabel(p.representation());
+
+ __ GotoIf(condition, &done, vtrue);
+ __ Goto(&done, vfalse);
+ __ Bind(&done);
+
+ return done.PhiAt(0);
}
+#undef __
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/select-lowering.h b/deps/v8/src/compiler/select-lowering.h
index d8c12d4d54..53890a7898 100644
--- a/deps/v8/src/compiler/select-lowering.h
+++ b/deps/v8/src/compiler/select-lowering.h
@@ -5,33 +5,31 @@
#ifndef V8_COMPILER_SELECT_LOWERING_H_
#define V8_COMPILER_SELECT_LOWERING_H_
+#include "src/compiler/graph-assembler.h"
#include "src/compiler/graph-reducer.h"
namespace v8 {
namespace internal {
namespace compiler {
-// Forward declarations.
-class CommonOperatorBuilder;
-class Graph;
-
-
// Lowers Select nodes to diamonds.
class SelectLowering final : public Reducer {
public:
- SelectLowering(Graph* graph, CommonOperatorBuilder* common);
+ SelectLowering(JSGraph* jsgraph, Zone* zone);
~SelectLowering() override;
const char* reducer_name() const override { return "SelectLowering"; }
Reduction Reduce(Node* node) override;
+ Node* LowerSelect(Node* node);
+
private:
- CommonOperatorBuilder* common() const { return common_; }
- Graph* graph() const { return graph_; }
+ GraphAssembler* gasm() { return &graph_assembler_; }
+ Node* start() { return start_; }
- CommonOperatorBuilder* common_;
- Graph* graph_;
+ GraphAssembler graph_assembler_;
+ Node* start_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.cc b/deps/v8/src/compiler/serializer-for-background-compilation.cc
index 20d405b775..0391e8742d 100644
--- a/deps/v8/src/compiler/serializer-for-background-compilation.cc
+++ b/deps/v8/src/compiler/serializer-for-background-compilation.cc
@@ -10,7 +10,9 @@
#include "src/compiler/access-info.h"
#include "src/compiler/bytecode-analysis.h"
#include "src/compiler/compilation-dependencies.h"
+#include "src/compiler/functional-list.h"
#include "src/compiler/js-heap-broker.h"
+#include "src/compiler/zone-stats.h"
#include "src/handles/handles-inl.h"
#include "src/ic/call-optimization.h"
#include "src/interpreter/bytecode-array-iterator.h"
@@ -41,7 +43,6 @@ namespace compiler {
V(CallRuntime) \
V(CloneObject) \
V(CreateArrayFromIterable) \
- V(CreateEmptyArrayLiteral) \
V(CreateEmptyObjectLiteral) \
V(CreateMappedArguments) \
V(CreateRestParameter) \
@@ -160,6 +161,7 @@ namespace compiler {
V(CreateBlockContext) \
V(CreateCatchContext) \
V(CreateClosure) \
+ V(CreateEmptyArrayLiteral) \
V(CreateEvalContext) \
V(CreateFunctionContext) \
V(CreateObjectLiteral) \
@@ -230,13 +232,41 @@ namespace compiler {
UNCONDITIONAL_JUMPS_LIST(V) \
UNREACHABLE_BYTECODE_LIST(V)
-template <typename T>
-struct HandleComparator {
- bool operator()(const Handle<T>& lhs, const Handle<T>& rhs) const {
- return lhs.address() < rhs.address();
+template <typename T, typename EqualTo>
+class FunctionalSet {
+ public:
+ void Add(T const& elem, Zone* zone) {
+ for (auto const& l : data_) {
+ if (equal_to(l, elem)) return;
+ }
+ data_.PushFront(elem, zone);
+ }
+
+ bool Includes(FunctionalSet<T, EqualTo> const& other) const {
+ return std::all_of(other.begin(), other.end(), [&](T const& other_elem) {
+ return std::any_of(this->begin(), this->end(), [&](T const& this_elem) {
+ return equal_to(this_elem, other_elem);
+ });
+ });
}
+
+ bool IsEmpty() const { return data_.begin() == data_.end(); }
+
+ void Clear() { data_.Clear(); }
+
+ using iterator = typename FunctionalList<T>::iterator;
+
+ iterator begin() const { return data_.begin(); }
+ iterator end() const { return data_.end(); }
+
+ private:
+ static EqualTo equal_to;
+ FunctionalList<T> data_;
};
+template <typename T, typename EqualTo>
+EqualTo FunctionalSet<T, EqualTo>::equal_to;
+
struct VirtualContext {
unsigned int distance;
Handle<Context> context;
@@ -245,21 +275,22 @@ struct VirtualContext {
: distance(distance_in), context(context_in) {
CHECK_GT(distance, 0);
}
- bool operator<(const VirtualContext& other) const {
- return HandleComparator<Context>()(context, other.context) &&
- distance < other.distance;
+ bool operator==(const VirtualContext& other) const {
+ return context.equals(other.context) && distance == other.distance;
}
};
class FunctionBlueprint;
-using ConstantsSet = ZoneSet<Handle<Object>, HandleComparator<Object>>;
-using VirtualContextsSet = ZoneSet<VirtualContext>;
-using MapsSet = ZoneSet<Handle<Map>, HandleComparator<Map>>;
-using BlueprintsSet = ZoneSet<FunctionBlueprint>;
+using ConstantsSet = FunctionalSet<Handle<Object>, Handle<Object>::equal_to>;
+using VirtualContextsSet =
+ FunctionalSet<VirtualContext, std::equal_to<VirtualContext>>;
+using MapsSet = FunctionalSet<Handle<Map>, Handle<Map>::equal_to>;
+using BlueprintsSet =
+ FunctionalSet<FunctionBlueprint, std::equal_to<FunctionBlueprint>>;
class Hints {
public:
- explicit Hints(Zone* zone);
+ Hints() = default;
static Hints SingleConstant(Handle<Object> constant, Zone* zone);
@@ -268,12 +299,13 @@ class Hints {
const BlueprintsSet& function_blueprints() const;
const VirtualContextsSet& virtual_contexts() const;
- void AddConstant(Handle<Object> constant);
- void AddMap(Handle<Map> map);
- void AddFunctionBlueprint(FunctionBlueprint function_blueprint);
- void AddVirtualContext(VirtualContext virtual_context);
+ void AddConstant(Handle<Object> constant, Zone* zone);
+ void AddMap(Handle<Map> map, Zone* zone);
+ void AddFunctionBlueprint(FunctionBlueprint function_blueprint, Zone* zone);
+ void AddVirtualContext(VirtualContext virtual_context, Zone* zone);
- void Add(const Hints& other);
+ void Add(const Hints& other, Zone* zone);
+ void AddFromChildSerializer(const Hints& other, Zone* zone);
void Clear();
bool IsEmpty() const;
@@ -292,6 +324,8 @@ class Hints {
using HintsVector = ZoneVector<Hints>;
+// A FunctionBlueprint is a SharedFunctionInfo and a FeedbackVector, plus
+// Hints about the context in which a closure will be created from them.
class FunctionBlueprint {
public:
FunctionBlueprint(Handle<JSFunction> function, Isolate* isolate, Zone* zone);
@@ -304,13 +338,23 @@ class FunctionBlueprint {
Handle<FeedbackVector> feedback_vector() const { return feedback_vector_; }
const Hints& context_hints() const { return context_hints_; }
- bool operator<(const FunctionBlueprint& other) const {
- // A feedback vector is never used for more than one SFI, so it can
- // be used for strict ordering of blueprints.
+ bool operator==(const FunctionBlueprint& other) const {
+ // A feedback vector is never used for more than one SFI. Moreover, we can
+ // never have two blueprints with identical feedback vector (and SFI) but
+ // different hints, because:
+ // (1) A blueprint originates either (i) from the data associated with a
+ // CreateClosure bytecode, in which case two different CreateClosure
+ // bytecodes never have the same feedback vector, or (ii) from a
+ // JSFunction, in which case the hints are determined by the closure.
+ // (2) We never extend a blueprint's hints after construction.
+ //
+ // It is therefore sufficient to look at the feedback vector in order to
+ // decide equality.
DCHECK_IMPLIES(feedback_vector_.equals(other.feedback_vector_),
shared_.equals(other.shared_));
- return HandleComparator<FeedbackVector>()(feedback_vector_,
- other.feedback_vector_);
+ SLOW_DCHECK(!feedback_vector_.equals(other.feedback_vector_) ||
+ context_hints_.Equals(other.context_hints_));
+ return feedback_vector_.equals(other.feedback_vector_);
}
private:
@@ -319,6 +363,8 @@ class FunctionBlueprint {
Hints context_hints_;
};
+// A CompilationSubject is a FunctionBlueprint, optionally with a matching
+// closure.
class CompilationSubject {
public:
explicit CompilationSubject(FunctionBlueprint blueprint)
@@ -336,24 +382,65 @@ class CompilationSubject {
MaybeHandle<JSFunction> closure_;
};
+// A Callee is either a JSFunction (which may not have a feedback vector), or a
+// FunctionBlueprint. Note that this is different from CompilationSubject, which
+// always has a FunctionBlueprint.
+class Callee {
+ public:
+ explicit Callee(Handle<JSFunction> jsfunction) : jsfunction_(jsfunction) {}
+ explicit Callee(FunctionBlueprint const& blueprint) : blueprint_(blueprint) {}
+
+ Handle<SharedFunctionInfo> shared(Isolate* isolate) const {
+ return blueprint_.has_value()
+ ? blueprint_->shared()
+ : handle(jsfunction_.ToHandleChecked()->shared(), isolate);
+ }
+
+ bool HasFeedbackVector() const {
+ Handle<JSFunction> function;
+ return blueprint_.has_value() ||
+ jsfunction_.ToHandleChecked()->has_feedback_vector();
+ }
+
+ CompilationSubject ToCompilationSubject(Isolate* isolate, Zone* zone) const {
+ CHECK(HasFeedbackVector());
+ return blueprint_.has_value()
+ ? CompilationSubject(*blueprint_)
+ : CompilationSubject(jsfunction_.ToHandleChecked(), isolate,
+ zone);
+ }
+
+ private:
+ MaybeHandle<JSFunction> const jsfunction_;
+ base::Optional<FunctionBlueprint> const blueprint_;
+};
+
+// If a list of arguments (hints) is shorter than the function's parameter
+// count, this enum expresses what we know about the missing arguments.
+enum MissingArgumentsPolicy {
+ kMissingArgumentsAreUndefined, // ... as in the JS undefined value
+ kMissingArgumentsAreUnknown,
+};
+
// The SerializerForBackgroundCompilation makes sure that the relevant function
// data such as bytecode, SharedFunctionInfo and FeedbackVector, used by later
// optimizations in the compiler, is copied to the heap broker.
class SerializerForBackgroundCompilation {
public:
SerializerForBackgroundCompilation(
- JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
- Handle<JSFunction> closure, SerializerForBackgroundCompilationFlags flags,
- BailoutId osr_offset);
+ ZoneStats* zone_stats, JSHeapBroker* broker,
+ CompilationDependencies* dependencies, Handle<JSFunction> closure,
+ SerializerForBackgroundCompilationFlags flags, BailoutId osr_offset);
Hints Run(); // NOTE: Returns empty for an already-serialized function.
class Environment;
private:
SerializerForBackgroundCompilation(
- JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
- CompilationSubject function, base::Optional<Hints> new_target,
- const HintsVector& arguments,
+ ZoneStats* zone_stats, JSHeapBroker* broker,
+ CompilationDependencies* dependencies, CompilationSubject function,
+ base::Optional<Hints> new_target, const HintsVector& arguments,
+ MissingArgumentsPolicy padding,
SerializerForBackgroundCompilationFlags flags);
bool BailoutOnUninitialized(ProcessedFeedback const& feedback);
@@ -365,36 +452,39 @@ class SerializerForBackgroundCompilation {
SUPPORTED_BYTECODE_LIST(DECLARE_VISIT_BYTECODE)
#undef DECLARE_VISIT_BYTECODE
- // Returns whether the callee with the given SFI should be processed further,
- // i.e. whether it's inlineable.
- bool ProcessSFIForCallOrConstruct(Handle<SharedFunctionInfo> shared,
+ void ProcessSFIForCallOrConstruct(Callee const& callee,
+ base::Optional<Hints> new_target,
const HintsVector& arguments,
- SpeculationMode speculation_mode);
- // Returns whether {function} should be serialized for compilation.
- bool ProcessCalleeForCallOrConstruct(Handle<JSFunction> function,
+ SpeculationMode speculation_mode,
+ MissingArgumentsPolicy padding);
+ void ProcessCalleeForCallOrConstruct(Handle<Object> callee,
+ base::Optional<Hints> new_target,
const HintsVector& arguments,
- SpeculationMode speculation_mode);
+ SpeculationMode speculation_mode,
+ MissingArgumentsPolicy padding);
void ProcessCallOrConstruct(Hints callee, base::Optional<Hints> new_target,
const HintsVector& arguments, FeedbackSlot slot,
- bool with_spread = false);
- void ProcessCallVarArgs(ConvertReceiverMode receiver_mode,
- Hints const& callee, interpreter::Register first_reg,
- int reg_count, FeedbackSlot slot,
- bool with_spread = false);
+ MissingArgumentsPolicy padding);
+ void ProcessCallVarArgs(
+ ConvertReceiverMode receiver_mode, Hints const& callee,
+ interpreter::Register first_reg, int reg_count, FeedbackSlot slot,
+ MissingArgumentsPolicy padding = kMissingArgumentsAreUndefined);
void ProcessApiCall(Handle<SharedFunctionInfo> target,
const HintsVector& arguments);
void ProcessReceiverMapForApiCall(FunctionTemplateInfoRef target,
Handle<Map> receiver);
void ProcessBuiltinCall(Handle<SharedFunctionInfo> target,
+ base::Optional<Hints> new_target,
const HintsVector& arguments,
- SpeculationMode speculation_mode);
+ SpeculationMode speculation_mode,
+ MissingArgumentsPolicy padding);
void ProcessJump(interpreter::BytecodeArrayIterator* iterator);
void ProcessKeyedPropertyAccess(Hints const& receiver, Hints const& key,
FeedbackSlot slot, AccessMode access_mode,
bool honor_bailout_on_uninitialized);
- void ProcessNamedPropertyAccess(Hints receiver, NameRef const& name,
+ void ProcessNamedPropertyAccess(Hints const& receiver, NameRef const& name,
FeedbackSlot slot, AccessMode access_mode);
void ProcessNamedAccess(Hints receiver, NamedAccessFeedback const& feedback,
AccessMode access_mode, Hints* new_accumulator_hints);
@@ -411,7 +501,6 @@ class SerializerForBackgroundCompilation {
void ProcessHintsForHasInPrototypeChain(Hints const& instance_hints);
void ProcessHintsForRegExpTest(Hints const& regexp_hints);
PropertyAccessInfo ProcessMapForRegExpTest(MapRef map);
- void ProcessHintsForFunctionCall(Hints const& target_hints);
void ProcessHintsForFunctionBind(Hints const& receiver_hints);
void ProcessHintsForObjectGetPrototype(Hints const& object_hints);
void ProcessConstantForOrdinaryHasInstance(HeapObjectRef const& constructor,
@@ -456,7 +545,8 @@ class SerializerForBackgroundCompilation {
Hints RunChildSerializer(CompilationSubject function,
base::Optional<Hints> new_target,
- const HintsVector& arguments, bool with_spread);
+ const HintsVector& arguments,
+ MissingArgumentsPolicy padding);
// When (forward-)branching bytecodes are encountered, e.g. a conditional
// jump, we call ContributeToJumpTargetEnvironment to "remember" the current
@@ -475,14 +565,14 @@ class SerializerForBackgroundCompilation {
JSHeapBroker* broker() const { return broker_; }
CompilationDependencies* dependencies() const { return dependencies_; }
- Zone* zone() const { return zone_; }
+ Zone* zone() { return zone_scope_.zone(); }
Environment* environment() const { return environment_; }
SerializerForBackgroundCompilationFlags flags() const { return flags_; }
BailoutId osr_offset() const { return osr_offset_; }
JSHeapBroker* const broker_;
CompilationDependencies* const dependencies_;
- Zone* const zone_;
+ ZoneStats::Scope zone_scope_;
Environment* const environment_;
ZoneUnorderedMap<int, Environment*> jump_target_environments_;
SerializerForBackgroundCompilationFlags const flags_;
@@ -490,11 +580,11 @@ class SerializerForBackgroundCompilation {
};
void RunSerializerForBackgroundCompilation(
- JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
- Handle<JSFunction> closure, SerializerForBackgroundCompilationFlags flags,
- BailoutId osr_offset) {
- SerializerForBackgroundCompilation serializer(broker, dependencies, zone,
- closure, flags, osr_offset);
+ ZoneStats* zone_stats, JSHeapBroker* broker,
+ CompilationDependencies* dependencies, Handle<JSFunction> closure,
+ SerializerForBackgroundCompilationFlags flags, BailoutId osr_offset) {
+ SerializerForBackgroundCompilation serializer(
+ zone_stats, broker, dependencies, closure, flags, osr_offset);
serializer.Run();
}
@@ -505,14 +595,19 @@ FunctionBlueprint::FunctionBlueprint(Handle<SharedFunctionInfo> shared,
const Hints& context_hints)
: shared_(shared),
feedback_vector_(feedback_vector),
- context_hints_(context_hints) {}
+ context_hints_(context_hints) {
+ // The checked invariant rules out recursion and thus avoids complexity.
+ CHECK(context_hints_.function_blueprints().IsEmpty());
+}
FunctionBlueprint::FunctionBlueprint(Handle<JSFunction> function,
Isolate* isolate, Zone* zone)
: shared_(handle(function->shared(), isolate)),
- feedback_vector_(handle(function->feedback_vector(), isolate)),
- context_hints_(zone) {
- context_hints_.AddConstant(handle(function->context(), isolate));
+ feedback_vector_(function->feedback_vector(), isolate),
+ context_hints_() {
+ context_hints_.AddConstant(handle(function->context(), isolate), zone);
+ // The checked invariant rules out recursion and thus avoids complexity.
+ CHECK(context_hints_.function_blueprints().IsEmpty());
}
CompilationSubject::CompilationSubject(Handle<JSFunction> closure,
@@ -521,25 +616,11 @@ CompilationSubject::CompilationSubject(Handle<JSFunction> closure,
CHECK(closure->has_feedback_vector());
}
-Hints::Hints(Zone* zone)
- : virtual_contexts_(zone),
- constants_(zone),
- maps_(zone),
- function_blueprints_(zone) {}
-
#ifdef ENABLE_SLOW_DCHECKS
-namespace {
-template <typename K, typename Compare>
-bool SetIncludes(ZoneSet<K, Compare> const& lhs,
- ZoneSet<K, Compare> const& rhs) {
- return std::all_of(rhs.cbegin(), rhs.cend(),
- [&](K const& x) { return lhs.find(x) != lhs.cend(); });
-}
-} // namespace
bool Hints::Includes(Hints const& other) const {
- return SetIncludes(constants(), other.constants()) &&
- SetIncludes(function_blueprints(), other.function_blueprints()) &&
- SetIncludes(maps(), other.maps());
+ return constants().Includes(other.constants()) &&
+ function_blueprints().Includes(other.function_blueprints()) &&
+ maps().Includes(other.maps());
}
bool Hints::Equals(Hints const& other) const {
return this->Includes(other) && other.Includes(*this);
@@ -547,8 +628,8 @@ bool Hints::Equals(Hints const& other) const {
#endif
Hints Hints::SingleConstant(Handle<Object> constant, Zone* zone) {
- Hints result(zone);
- result.AddConstant(constant);
+ Hints result;
+ result.AddConstant(constant, zone);
return result;
}
@@ -564,30 +645,49 @@ const VirtualContextsSet& Hints::virtual_contexts() const {
return virtual_contexts_;
}
-void Hints::AddVirtualContext(VirtualContext virtual_context) {
- virtual_contexts_.insert(virtual_context);
+void Hints::AddVirtualContext(VirtualContext virtual_context, Zone* zone) {
+ virtual_contexts_.Add(virtual_context, zone);
}
-void Hints::AddConstant(Handle<Object> constant) {
- constants_.insert(constant);
+void Hints::AddConstant(Handle<Object> constant, Zone* zone) {
+ constants_.Add(constant, zone);
}
-void Hints::AddMap(Handle<Map> map) { maps_.insert(map); }
+void Hints::AddMap(Handle<Map> map, Zone* zone) { maps_.Add(map, zone); }
+
+void Hints::AddFunctionBlueprint(FunctionBlueprint function_blueprint,
+ Zone* zone) {
+ function_blueprints_.Add(function_blueprint, zone);
+}
-void Hints::AddFunctionBlueprint(FunctionBlueprint function_blueprint) {
- function_blueprints_.insert(function_blueprint);
+void Hints::Add(const Hints& other, Zone* zone) {
+ for (auto x : other.constants()) AddConstant(x, zone);
+ for (auto x : other.maps()) AddMap(x, zone);
+ for (auto x : other.function_blueprints()) AddFunctionBlueprint(x, zone);
+ for (auto x : other.virtual_contexts()) AddVirtualContext(x, zone);
}
-void Hints::Add(const Hints& other) {
- for (auto x : other.constants()) AddConstant(x);
- for (auto x : other.maps()) AddMap(x);
- for (auto x : other.function_blueprints()) AddFunctionBlueprint(x);
- for (auto x : other.virtual_contexts()) AddVirtualContext(x);
+void Hints::AddFromChildSerializer(const Hints& other, Zone* zone) {
+ for (auto x : other.constants()) AddConstant(x, zone);
+ for (auto x : other.maps()) AddMap(x, zone);
+ for (auto x : other.virtual_contexts()) AddVirtualContext(x, zone);
+
+ // Adding hints from a child serializer run means copying data out from
+ // a zone that's being destroyed. FunctionBlueprints have zone allocated
+ // data, so we've got to make a deep copy to eliminate traces of the
+ // dying zone.
+ for (auto x : other.function_blueprints()) {
+ Hints new_blueprint_hints;
+ new_blueprint_hints.AddFromChildSerializer(x.context_hints(), zone);
+ FunctionBlueprint new_blueprint(x.shared(), x.feedback_vector(),
+ new_blueprint_hints);
+ AddFunctionBlueprint(new_blueprint, zone);
+ }
}
bool Hints::IsEmpty() const {
- return constants().empty() && maps().empty() &&
- function_blueprints().empty() && virtual_contexts().empty();
+ return constants().IsEmpty() && maps().IsEmpty() &&
+ function_blueprints().IsEmpty() && virtual_contexts().IsEmpty();
}
std::ostream& operator<<(std::ostream& out,
@@ -625,10 +725,10 @@ std::ostream& operator<<(std::ostream& out, const Hints& hints) {
}
void Hints::Clear() {
- virtual_contexts_.clear();
- constants_.clear();
- maps_.clear();
- function_blueprints_.clear();
+ virtual_contexts_.Clear();
+ constants_.Clear();
+ maps_.Clear();
+ function_blueprints_.Clear();
DCHECK(IsEmpty());
}
@@ -636,7 +736,8 @@ class SerializerForBackgroundCompilation::Environment : public ZoneObject {
public:
Environment(Zone* zone, CompilationSubject function);
Environment(Zone* zone, Isolate* isolate, CompilationSubject function,
- base::Optional<Hints> new_target, const HintsVector& arguments);
+ base::Optional<Hints> new_target, const HintsVector& arguments,
+ MissingArgumentsPolicy padding);
bool IsDead() const { return ephemeral_hints_.empty(); }
@@ -648,7 +749,7 @@ class SerializerForBackgroundCompilation::Environment : public ZoneObject {
void Revive() {
DCHECK(IsDead());
- ephemeral_hints_.resize(ephemeral_hints_size(), Hints(zone()));
+ ephemeral_hints_.resize(ephemeral_hints_size(), Hints());
DCHECK(!IsDead());
}
@@ -691,7 +792,6 @@ class SerializerForBackgroundCompilation::Environment : public ZoneObject {
int RegisterToLocalIndex(interpreter::Register reg) const;
- Zone* zone() const { return zone_; }
int parameter_count() const { return parameter_count_; }
int register_count() const { return register_count_; }
@@ -722,24 +822,25 @@ SerializerForBackgroundCompilation::Environment::Environment(
parameter_count_(
function_.shared()->GetBytecodeArray().parameter_count()),
register_count_(function_.shared()->GetBytecodeArray().register_count()),
- closure_hints_(zone),
- current_context_hints_(zone),
- return_value_hints_(zone),
- ephemeral_hints_(ephemeral_hints_size(), Hints(zone), zone) {
+ closure_hints_(),
+ current_context_hints_(),
+ return_value_hints_(),
+ ephemeral_hints_(ephemeral_hints_size(), Hints(), zone) {
Handle<JSFunction> closure;
if (function.closure().ToHandle(&closure)) {
- closure_hints_.AddConstant(closure);
+ closure_hints_.AddConstant(closure, zone);
} else {
- closure_hints_.AddFunctionBlueprint(function.blueprint());
+ closure_hints_.AddFunctionBlueprint(function.blueprint(), zone);
}
// Consume blueprint context hint information.
- current_context_hints().Add(function.blueprint().context_hints());
+ current_context_hints().Add(function.blueprint().context_hints(), zone);
}
SerializerForBackgroundCompilation::Environment::Environment(
Zone* zone, Isolate* isolate, CompilationSubject function,
- base::Optional<Hints> new_target, const HintsVector& arguments)
+ base::Optional<Hints> new_target, const HintsVector& arguments,
+ MissingArgumentsPolicy padding)
: Environment(zone, function) {
// Copy the hints for the actually passed arguments, at most up to
// the parameter_count.
@@ -748,11 +849,14 @@ SerializerForBackgroundCompilation::Environment::Environment(
ephemeral_hints_[i] = arguments[i];
}
- // Pad the rest with "undefined".
- Hints undefined_hint =
- Hints::SingleConstant(isolate->factory()->undefined_value(), zone);
- for (size_t i = arguments.size(); i < param_count; ++i) {
- ephemeral_hints_[i] = undefined_hint;
+ if (padding == kMissingArgumentsAreUndefined) {
+ Hints undefined_hint =
+ Hints::SingleConstant(isolate->factory()->undefined_value(), zone);
+ for (size_t i = arguments.size(); i < param_count; ++i) {
+ ephemeral_hints_[i] = undefined_hint;
+ }
+ } else {
+ DCHECK_EQ(padding, kMissingArgumentsAreUnknown);
}
interpreter::Register new_target_reg =
@@ -762,7 +866,7 @@ SerializerForBackgroundCompilation::Environment::Environment(
if (new_target_reg.is_valid()) {
DCHECK(register_hints(new_target_reg).IsEmpty());
if (new_target.has_value()) {
- register_hints(new_target_reg).Add(*new_target);
+ register_hints(new_target_reg).Add(*new_target, zone);
}
}
}
@@ -785,10 +889,10 @@ void SerializerForBackgroundCompilation::Environment::Merge(
CHECK_EQ(ephemeral_hints_.size(), other->ephemeral_hints_.size());
for (size_t i = 0; i < ephemeral_hints_.size(); ++i) {
- ephemeral_hints_[i].Add(other->ephemeral_hints_[i]);
+ ephemeral_hints_[i].Add(other->ephemeral_hints_[i], zone_);
}
- return_value_hints_.Add(other->return_value_hints_);
+ return_value_hints_.Add(other->return_value_hints_, zone_);
}
std::ostream& operator<<(
@@ -845,30 +949,33 @@ int SerializerForBackgroundCompilation::Environment::RegisterToLocalIndex(
}
SerializerForBackgroundCompilation::SerializerForBackgroundCompilation(
- JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
- Handle<JSFunction> closure, SerializerForBackgroundCompilationFlags flags,
- BailoutId osr_offset)
+ ZoneStats* zone_stats, JSHeapBroker* broker,
+ CompilationDependencies* dependencies, Handle<JSFunction> closure,
+ SerializerForBackgroundCompilationFlags flags, BailoutId osr_offset)
: broker_(broker),
dependencies_(dependencies),
- zone_(zone),
- environment_(new (zone) Environment(
- zone, CompilationSubject(closure, broker_->isolate(), zone))),
- jump_target_environments_(zone),
+ zone_scope_(zone_stats, ZONE_NAME),
+ environment_(new (zone()) Environment(
+ zone(), CompilationSubject(closure, broker_->isolate(), zone()))),
+ jump_target_environments_(zone()),
flags_(flags),
osr_offset_(osr_offset) {
JSFunctionRef(broker, closure).Serialize();
}
SerializerForBackgroundCompilation::SerializerForBackgroundCompilation(
- JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
- CompilationSubject function, base::Optional<Hints> new_target,
- const HintsVector& arguments, SerializerForBackgroundCompilationFlags flags)
+ ZoneStats* zone_stats, JSHeapBroker* broker,
+ CompilationDependencies* dependencies, CompilationSubject function,
+ base::Optional<Hints> new_target, const HintsVector& arguments,
+ MissingArgumentsPolicy padding,
+ SerializerForBackgroundCompilationFlags flags)
: broker_(broker),
dependencies_(dependencies),
- zone_(zone),
- environment_(new (zone) Environment(zone, broker_->isolate(), function,
- new_target, arguments)),
- jump_target_environments_(zone),
+ zone_scope_(zone_stats, ZONE_NAME),
+ environment_(new (zone())
+ Environment(zone(), broker_->isolate(), function,
+ new_target, arguments, padding)),
+ jump_target_environments_(zone()),
flags_(flags),
osr_offset_(BailoutId::None()) {
TraceScope tracer(
@@ -902,13 +1009,15 @@ bool SerializerForBackgroundCompilation::BailoutOnUninitialized(
Hints SerializerForBackgroundCompilation::Run() {
TraceScope tracer(broker(), this, "SerializerForBackgroundCompilation::Run");
+ TRACE_BROKER_MEMORY(broker(), "[serializer start] Broker zone usage: "
+ << broker()->zone()->allocation_size());
SharedFunctionInfoRef shared(broker(), environment()->function().shared());
FeedbackVectorRef feedback_vector_ref(broker(), feedback_vector());
if (shared.IsSerializedForCompilation(feedback_vector_ref)) {
TRACE_BROKER(broker(), "Already ran serializer for SharedFunctionInfo "
<< Brief(*shared.object())
<< ", bailing out.\n");
- return Hints(zone());
+ return Hints();
}
shared.SetSerializedForCompilation(feedback_vector_ref);
@@ -923,6 +1032,9 @@ Hints SerializerForBackgroundCompilation::Run() {
feedback_vector_ref.Serialize();
TraverseBytecode();
+
+ TRACE_BROKER_MEMORY(broker(), "[serializer end] Broker zone usage: "
+ << broker()->zone()->allocation_size());
return environment()->return_value_hints();
}
@@ -1036,12 +1148,19 @@ void SerializerForBackgroundCompilation::TraverseBytecode() {
void SerializerForBackgroundCompilation::VisitGetIterator(
BytecodeArrayIterator* iterator) {
- AccessMode mode = AccessMode::kLoad;
Hints const& receiver =
environment()->register_hints(iterator->GetRegisterOperand(0));
Handle<Name> name = broker()->isolate()->factory()->iterator_symbol();
- FeedbackSlot slot = iterator->GetSlotOperand(1);
- ProcessNamedPropertyAccess(receiver, NameRef(broker(), name), slot, mode);
+ FeedbackSlot load_slot = iterator->GetSlotOperand(1);
+ ProcessNamedPropertyAccess(receiver, NameRef(broker(), name), load_slot,
+ AccessMode::kLoad);
+ if (environment()->IsDead()) return;
+
+ const Hints& callee = Hints();
+ FeedbackSlot call_slot = iterator->GetSlotOperand(2);
+ HintsVector parameters({receiver}, zone());
+ ProcessCallOrConstruct(callee, base::nullopt, parameters, call_slot,
+ kMissingArgumentsAreUndefined);
}
void SerializerForBackgroundCompilation::VisitGetSuperConstructor(
@@ -1057,72 +1176,74 @@ void SerializerForBackgroundCompilation::VisitGetSuperConstructor(
map.SerializePrototype();
ObjectRef proto = map.prototype();
if (proto.IsHeapObject() && proto.AsHeapObject().map().is_constructor()) {
- environment()->register_hints(dst).AddConstant(proto.object());
+ environment()->register_hints(dst).AddConstant(proto.object(), zone());
}
}
}
void SerializerForBackgroundCompilation::VisitGetTemplateObject(
BytecodeArrayIterator* iterator) {
- ObjectRef description(
+ TemplateObjectDescriptionRef description(
broker(), iterator->GetConstantForIndexOperand(0, broker()->isolate()));
FeedbackSlot slot = iterator->GetSlotOperand(1);
- FeedbackVectorRef feedback_vector_ref(broker(), feedback_vector());
+ FeedbackSource source(feedback_vector(), slot);
SharedFunctionInfoRef shared(broker(), environment()->function().shared());
- JSArrayRef template_object =
- shared.GetTemplateObject(description, feedback_vector_ref, slot,
- SerializationPolicy::kSerializeIfNeeded);
+ JSArrayRef template_object = shared.GetTemplateObject(
+ description, source, SerializationPolicy::kSerializeIfNeeded);
environment()->accumulator_hints().Clear();
- environment()->accumulator_hints().AddConstant(template_object.object());
+ environment()->accumulator_hints().AddConstant(template_object.object(),
+ zone());
}
void SerializerForBackgroundCompilation::VisitLdaTrue(
BytecodeArrayIterator* iterator) {
environment()->accumulator_hints().Clear();
environment()->accumulator_hints().AddConstant(
- broker()->isolate()->factory()->true_value());
+ broker()->isolate()->factory()->true_value(), zone());
}
void SerializerForBackgroundCompilation::VisitLdaFalse(
BytecodeArrayIterator* iterator) {
environment()->accumulator_hints().Clear();
environment()->accumulator_hints().AddConstant(
- broker()->isolate()->factory()->false_value());
+ broker()->isolate()->factory()->false_value(), zone());
}
void SerializerForBackgroundCompilation::VisitLdaTheHole(
BytecodeArrayIterator* iterator) {
environment()->accumulator_hints().Clear();
environment()->accumulator_hints().AddConstant(
- broker()->isolate()->factory()->the_hole_value());
+ broker()->isolate()->factory()->the_hole_value(), zone());
}
void SerializerForBackgroundCompilation::VisitLdaUndefined(
BytecodeArrayIterator* iterator) {
environment()->accumulator_hints().Clear();
environment()->accumulator_hints().AddConstant(
- broker()->isolate()->factory()->undefined_value());
+ broker()->isolate()->factory()->undefined_value(), zone());
}
void SerializerForBackgroundCompilation::VisitLdaNull(
BytecodeArrayIterator* iterator) {
environment()->accumulator_hints().Clear();
environment()->accumulator_hints().AddConstant(
- broker()->isolate()->factory()->null_value());
+ broker()->isolate()->factory()->null_value(), zone());
}
void SerializerForBackgroundCompilation::VisitLdaZero(
BytecodeArrayIterator* iterator) {
environment()->accumulator_hints().Clear();
environment()->accumulator_hints().AddConstant(
- handle(Smi::FromInt(0), broker()->isolate()));
+ handle(Smi::FromInt(0), broker()->isolate()), zone());
}
void SerializerForBackgroundCompilation::VisitLdaSmi(
BytecodeArrayIterator* iterator) {
environment()->accumulator_hints().Clear();
- environment()->accumulator_hints().AddConstant(handle(
- Smi::FromInt(iterator->GetImmediateOperand(0)), broker()->isolate()));
+ environment()->accumulator_hints().AddConstant(
+ handle(Smi::FromInt(iterator->GetImmediateOperand(0)),
+ broker()->isolate()),
+ zone());
}
void SerializerForBackgroundCompilation::VisitInvokeIntrinsic(
@@ -1215,7 +1336,7 @@ void SerializerForBackgroundCompilation::VisitLdaConstant(
ObjectRef object(
broker(), iterator->GetConstantForIndexOperand(0, broker()->isolate()));
environment()->accumulator_hints().Clear();
- environment()->accumulator_hints().AddConstant(object.object());
+ environment()->accumulator_hints().AddConstant(object.object(), zone());
}
void SerializerForBackgroundCompilation::VisitPushContext(
@@ -1225,12 +1346,12 @@ void SerializerForBackgroundCompilation::VisitPushContext(
Hints& saved_context_hints =
environment()->register_hints(iterator->GetRegisterOperand(0));
saved_context_hints.Clear();
- saved_context_hints.Add(current_context_hints);
+ saved_context_hints.Add(current_context_hints, zone());
// New context is in the accumulator. Put those hints into the current context
// register hints.
current_context_hints.Clear();
- current_context_hints.Add(environment()->accumulator_hints());
+ current_context_hints.Add(environment()->accumulator_hints(), zone());
}
void SerializerForBackgroundCompilation::VisitPopContext(
@@ -1239,7 +1360,7 @@ void SerializerForBackgroundCompilation::VisitPopContext(
Hints& new_context_hints =
environment()->register_hints(iterator->GetRegisterOperand(0));
environment()->current_context_hints().Clear();
- environment()->current_context_hints().Add(new_context_hints);
+ environment()->current_context_hints().Add(new_context_hints, zone());
}
void SerializerForBackgroundCompilation::ProcessImmutableLoad(
@@ -1251,7 +1372,7 @@ void SerializerForBackgroundCompilation::ProcessImmutableLoad(
// If requested, record the object as a hint for the result value.
if (result_hints != nullptr && slot_value.has_value()) {
- result_hints->AddConstant(slot_value.value().object());
+ result_hints->AddConstant(slot_value.value().object(), zone());
}
}
@@ -1294,11 +1415,11 @@ void SerializerForBackgroundCompilation::VisitLdaContextSlot(
environment()->register_hints(iterator->GetRegisterOperand(0));
const int slot = iterator->GetIndexOperand(1);
const int depth = iterator->GetUnsignedImmediateOperand(2);
- Hints new_accumulator_hints(zone());
+ Hints new_accumulator_hints;
ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot,
&new_accumulator_hints);
environment()->accumulator_hints().Clear();
- environment()->accumulator_hints().Add(new_accumulator_hints);
+ environment()->accumulator_hints().Add(new_accumulator_hints, zone());
}
void SerializerForBackgroundCompilation::VisitLdaCurrentContextSlot(
@@ -1306,11 +1427,11 @@ void SerializerForBackgroundCompilation::VisitLdaCurrentContextSlot(
const int slot = iterator->GetIndexOperand(0);
const int depth = 0;
Hints const& context_hints = environment()->current_context_hints();
- Hints new_accumulator_hints(zone());
+ Hints new_accumulator_hints;
ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot,
&new_accumulator_hints);
environment()->accumulator_hints().Clear();
- environment()->accumulator_hints().Add(new_accumulator_hints);
+ environment()->accumulator_hints().Add(new_accumulator_hints, zone());
}
void SerializerForBackgroundCompilation::VisitLdaImmutableContextSlot(
@@ -1319,11 +1440,11 @@ void SerializerForBackgroundCompilation::VisitLdaImmutableContextSlot(
const int depth = iterator->GetUnsignedImmediateOperand(2);
Hints const& context_hints =
environment()->register_hints(iterator->GetRegisterOperand(0));
- Hints new_accumulator_hints(zone());
+ Hints new_accumulator_hints;
ProcessContextAccess(context_hints, slot, depth, kSerializeSlot,
&new_accumulator_hints);
environment()->accumulator_hints().Clear();
- environment()->accumulator_hints().Add(new_accumulator_hints);
+ environment()->accumulator_hints().Add(new_accumulator_hints, zone());
}
void SerializerForBackgroundCompilation::VisitLdaImmutableCurrentContextSlot(
@@ -1331,11 +1452,11 @@ void SerializerForBackgroundCompilation::VisitLdaImmutableCurrentContextSlot(
const int slot = iterator->GetIndexOperand(0);
const int depth = 0;
Hints const& context_hints = environment()->current_context_hints();
- Hints new_accumulator_hints(zone());
+ Hints new_accumulator_hints;
ProcessContextAccess(context_hints, slot, depth, kSerializeSlot,
&new_accumulator_hints);
environment()->accumulator_hints().Clear();
- environment()->accumulator_hints().Add(new_accumulator_hints);
+ environment()->accumulator_hints().Add(new_accumulator_hints, zone());
}
void SerializerForBackgroundCompilation::ProcessModuleVariableAccess(
@@ -1344,7 +1465,7 @@ void SerializerForBackgroundCompilation::ProcessModuleVariableAccess(
const int depth = iterator->GetUnsignedImmediateOperand(1);
Hints const& context_hints = environment()->current_context_hints();
- Hints result_hints(zone());
+ Hints result_hints;
ProcessContextAccess(context_hints, slot, depth, kSerializeSlot,
&result_hints);
for (Handle<Object> constant : result_hints.constants()) {
@@ -1392,14 +1513,15 @@ void SerializerForBackgroundCompilation::VisitLdar(
BytecodeArrayIterator* iterator) {
environment()->accumulator_hints().Clear();
environment()->accumulator_hints().Add(
- environment()->register_hints(iterator->GetRegisterOperand(0)));
+ environment()->register_hints(iterator->GetRegisterOperand(0)), zone());
}
void SerializerForBackgroundCompilation::VisitStar(
BytecodeArrayIterator* iterator) {
interpreter::Register reg = iterator->GetRegisterOperand(0);
environment()->register_hints(reg).Clear();
- environment()->register_hints(reg).Add(environment()->accumulator_hints());
+ environment()->register_hints(reg).Add(environment()->accumulator_hints(),
+ zone());
}
void SerializerForBackgroundCompilation::VisitMov(
@@ -1407,7 +1529,8 @@ void SerializerForBackgroundCompilation::VisitMov(
interpreter::Register src = iterator->GetRegisterOperand(0);
interpreter::Register dst = iterator->GetRegisterOperand(1);
environment()->register_hints(dst).Clear();
- environment()->register_hints(dst).Add(environment()->register_hints(src));
+ environment()->register_hints(dst).Add(environment()->register_hints(src),
+ zone());
}
void SerializerForBackgroundCompilation::VisitCreateRegExpLiteral(
@@ -1415,6 +1538,9 @@ void SerializerForBackgroundCompilation::VisitCreateRegExpLiteral(
Handle<String> constant_pattern = Handle<String>::cast(
iterator->GetConstantForIndexOperand(0, broker()->isolate()));
StringRef description(broker(), constant_pattern);
+ FeedbackSlot slot = iterator->GetSlotOperand(1);
+ FeedbackSource source(feedback_vector(), slot);
+ broker()->ProcessFeedbackForRegExpLiteral(source);
environment()->accumulator_hints().Clear();
}
@@ -1425,6 +1551,17 @@ void SerializerForBackgroundCompilation::VisitCreateArrayLiteral(
iterator->GetConstantForIndexOperand(0, broker()->isolate()));
ArrayBoilerplateDescriptionRef description(broker(),
array_boilerplate_description);
+ FeedbackSlot slot = iterator->GetSlotOperand(1);
+ FeedbackSource source(feedback_vector(), slot);
+ broker()->ProcessFeedbackForArrayOrObjectLiteral(source);
+ environment()->accumulator_hints().Clear();
+}
+
+void SerializerForBackgroundCompilation::VisitCreateEmptyArrayLiteral(
+ BytecodeArrayIterator* iterator) {
+ FeedbackSlot slot = iterator->GetSlotOperand(0);
+ FeedbackSource source(feedback_vector(), slot);
+ broker()->ProcessFeedbackForArrayOrObjectLiteral(source);
environment()->accumulator_hints().Clear();
}
@@ -1434,6 +1571,9 @@ void SerializerForBackgroundCompilation::VisitCreateObjectLiteral(
Handle<ObjectBoilerplateDescription>::cast(
iterator->GetConstantForIndexOperand(0, broker()->isolate()));
ObjectBoilerplateDescriptionRef description(broker(), constant_properties);
+ FeedbackSlot slot = iterator->GetSlotOperand(1);
+ FeedbackSource source(feedback_vector(), slot);
+ broker()->ProcessFeedbackForArrayOrObjectLiteral(source);
environment()->accumulator_hints().Clear();
}
@@ -1490,7 +1630,8 @@ void SerializerForBackgroundCompilation::ProcessCreateContext(
for (auto x : current_context_hints.constants()) {
if (x->IsContext()) {
Handle<Context> as_context(Handle<Context>::cast(x));
- accumulator_hints.AddVirtualContext(VirtualContext(1, as_context));
+ accumulator_hints.AddVirtualContext(VirtualContext(1, as_context),
+ zone());
}
}
@@ -1498,7 +1639,7 @@ void SerializerForBackgroundCompilation::ProcessCreateContext(
// it of distance {existing distance} + 1.
for (auto x : current_context_hints.virtual_contexts()) {
accumulator_hints.AddVirtualContext(
- VirtualContext(x.distance + 1, x.context));
+ VirtualContext(x.distance + 1, x.context), zone());
}
}
@@ -1518,7 +1659,7 @@ void SerializerForBackgroundCompilation::VisitCreateClosure(
FunctionBlueprint blueprint(shared,
Handle<FeedbackVector>::cast(cell_value),
environment()->current_context_hints());
- environment()->accumulator_hints().AddFunctionBlueprint(blueprint);
+ environment()->accumulator_hints().AddFunctionBlueprint(blueprint, zone());
}
}
@@ -1542,7 +1683,8 @@ void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver0(
Hints receiver = Hints::SingleConstant(
broker()->isolate()->factory()->undefined_value(), zone());
HintsVector parameters({receiver}, zone());
- ProcessCallOrConstruct(callee, base::nullopt, parameters, slot);
+ ProcessCallOrConstruct(callee, base::nullopt, parameters, slot,
+ kMissingArgumentsAreUndefined);
}
void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver1(
@@ -1556,7 +1698,8 @@ void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver1(
Hints receiver = Hints::SingleConstant(
broker()->isolate()->factory()->undefined_value(), zone());
HintsVector parameters({receiver, arg0}, zone());
- ProcessCallOrConstruct(callee, base::nullopt, parameters, slot);
+ ProcessCallOrConstruct(callee, base::nullopt, parameters, slot,
+ kMissingArgumentsAreUndefined);
}
void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver2(
@@ -1572,7 +1715,8 @@ void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver2(
Hints receiver = Hints::SingleConstant(
broker()->isolate()->factory()->undefined_value(), zone());
HintsVector parameters({receiver, arg0, arg1}, zone());
- ProcessCallOrConstruct(callee, base::nullopt, parameters, slot);
+ ProcessCallOrConstruct(callee, base::nullopt, parameters, slot,
+ kMissingArgumentsAreUndefined);
}
void SerializerForBackgroundCompilation::VisitCallAnyReceiver(
@@ -1616,7 +1760,8 @@ void SerializerForBackgroundCompilation::VisitCallProperty0(
FeedbackSlot slot = iterator->GetSlotOperand(2);
HintsVector parameters({receiver}, zone());
- ProcessCallOrConstruct(callee, base::nullopt, parameters, slot);
+ ProcessCallOrConstruct(callee, base::nullopt, parameters, slot,
+ kMissingArgumentsAreUndefined);
}
void SerializerForBackgroundCompilation::VisitCallProperty1(
@@ -1630,7 +1775,8 @@ void SerializerForBackgroundCompilation::VisitCallProperty1(
FeedbackSlot slot = iterator->GetSlotOperand(3);
HintsVector parameters({receiver, arg0}, zone());
- ProcessCallOrConstruct(callee, base::nullopt, parameters, slot);
+ ProcessCallOrConstruct(callee, base::nullopt, parameters, slot,
+ kMissingArgumentsAreUndefined);
}
void SerializerForBackgroundCompilation::VisitCallProperty2(
@@ -1646,7 +1792,8 @@ void SerializerForBackgroundCompilation::VisitCallProperty2(
FeedbackSlot slot = iterator->GetSlotOperand(4);
HintsVector parameters({receiver, arg0, arg1}, zone());
- ProcessCallOrConstruct(callee, base::nullopt, parameters, slot);
+ ProcessCallOrConstruct(callee, base::nullopt, parameters, slot,
+ kMissingArgumentsAreUndefined);
}
void SerializerForBackgroundCompilation::VisitCallWithSpread(
@@ -1657,7 +1804,7 @@ void SerializerForBackgroundCompilation::VisitCallWithSpread(
int reg_count = static_cast<int>(iterator->GetRegisterCountOperand(2));
FeedbackSlot slot = iterator->GetSlotOperand(3);
ProcessCallVarArgs(ConvertReceiverMode::kAny, callee, first_reg, reg_count,
- slot, true);
+ slot, kMissingArgumentsAreUnknown);
}
void SerializerForBackgroundCompilation::VisitCallJSRuntime(
@@ -1677,61 +1824,45 @@ void SerializerForBackgroundCompilation::VisitCallJSRuntime(
Hints SerializerForBackgroundCompilation::RunChildSerializer(
CompilationSubject function, base::Optional<Hints> new_target,
- const HintsVector& arguments, bool with_spread) {
- if (with_spread) {
- DCHECK_LT(0, arguments.size());
- // Pad the missing arguments in case we were called with spread operator.
- // Drop the last actually passed argument, which contains the spread.
- // We don't know what the spread element produces. Therefore we pretend
- // that the function is called with the maximal number of parameters and
- // that we have no information about the parameters that were not
- // explicitly provided.
- HintsVector padded = arguments;
- padded.pop_back(); // Remove the spread element.
- // Fill the rest with empty hints.
- padded.resize(
- function.blueprint().shared()->GetBytecodeArray().parameter_count(),
- Hints(zone()));
- return RunChildSerializer(function, new_target, padded, false);
- }
-
+ const HintsVector& arguments, MissingArgumentsPolicy padding) {
SerializerForBackgroundCompilation child_serializer(
- broker(), dependencies(), zone(), function, new_target, arguments,
- flags());
- return child_serializer.Run();
-}
-
-bool SerializerForBackgroundCompilation::ProcessSFIForCallOrConstruct(
- Handle<SharedFunctionInfo> shared, const HintsVector& arguments,
- SpeculationMode speculation_mode) {
+ zone_scope_.zone_stats(), broker(), dependencies(), function, new_target,
+ arguments, padding, flags());
+ // The Hints returned by the call to Run are allocated in the zone
+ // created by the child serializer. Adding those hints to a hints
+ // object created in our zone will preserve the information.
+ Hints hints;
+ hints.AddFromChildSerializer(child_serializer.Run(), zone());
+ return hints;
+}
+
+void SerializerForBackgroundCompilation::ProcessSFIForCallOrConstruct(
+ Callee const& callee, base::Optional<Hints> new_target,
+ const HintsVector& arguments, SpeculationMode speculation_mode,
+ MissingArgumentsPolicy padding) {
+ Handle<SharedFunctionInfo> shared = callee.shared(broker()->isolate());
if (shared->IsApiFunction()) {
ProcessApiCall(shared, arguments);
DCHECK(!shared->IsInlineable());
} else if (shared->HasBuiltinId()) {
- ProcessBuiltinCall(shared, arguments, speculation_mode);
+ ProcessBuiltinCall(shared, new_target, arguments, speculation_mode,
+ padding);
DCHECK(!shared->IsInlineable());
+ } else if (shared->IsInlineable() && callee.HasFeedbackVector()) {
+ CompilationSubject subject =
+ callee.ToCompilationSubject(broker()->isolate(), zone());
+ environment()->accumulator_hints().Add(
+ RunChildSerializer(subject, new_target, arguments, padding), zone());
}
- return shared->IsInlineable();
-}
-
-bool SerializerForBackgroundCompilation::ProcessCalleeForCallOrConstruct(
- Handle<JSFunction> function, const HintsVector& arguments,
- SpeculationMode speculation_mode) {
- JSFunctionRef(broker(), function).Serialize();
-
- Handle<SharedFunctionInfo> shared(function->shared(), broker()->isolate());
-
- return ProcessSFIForCallOrConstruct(shared, arguments, speculation_mode) &&
- function->has_feedback_vector();
}
namespace {
-// Returns the innermost bound target, if it's a JSFunction and inserts
-// all bound arguments and {original_arguments} into {expanded_arguments}
-// in the appropriate order.
-MaybeHandle<JSFunction> UnrollBoundFunction(
- JSBoundFunctionRef const& bound_function, JSHeapBroker* broker,
- const HintsVector& original_arguments, HintsVector* expanded_arguments) {
+// Returns the innermost bound target and inserts all bound arguments and
+// {original_arguments} into {expanded_arguments} in the appropriate order.
+JSReceiverRef UnrollBoundFunction(JSBoundFunctionRef const& bound_function,
+ JSHeapBroker* broker,
+ const HintsVector& original_arguments,
+ HintsVector* expanded_arguments) {
DCHECK(expanded_arguments->empty());
JSReceiverRef target = bound_function.AsJSReceiver();
@@ -1750,8 +1881,6 @@ MaybeHandle<JSFunction> UnrollBoundFunction(
reversed_bound_arguments.push_back(arg);
}
- if (!target.IsJSFunction()) return MaybeHandle<JSFunction>();
-
expanded_arguments->insert(expanded_arguments->end(),
reversed_bound_arguments.rbegin(),
reversed_bound_arguments.rend());
@@ -1759,13 +1888,38 @@ MaybeHandle<JSFunction> UnrollBoundFunction(
original_arguments.begin(),
original_arguments.end());
- return target.AsJSFunction().object();
+ return target;
}
} // namespace
+void SerializerForBackgroundCompilation::ProcessCalleeForCallOrConstruct(
+ Handle<Object> callee, base::Optional<Hints> new_target,
+ const HintsVector& arguments, SpeculationMode speculation_mode,
+ MissingArgumentsPolicy padding) {
+ const HintsVector* actual_arguments = &arguments;
+ HintsVector expanded_arguments(zone());
+ if (callee->IsJSBoundFunction()) {
+ JSBoundFunctionRef bound_function(broker(),
+ Handle<JSBoundFunction>::cast(callee));
+ bound_function.Serialize();
+ callee = UnrollBoundFunction(bound_function, broker(), arguments,
+ &expanded_arguments)
+ .object();
+ actual_arguments = &expanded_arguments;
+ }
+ if (!callee->IsJSFunction()) return;
+
+ JSFunctionRef function(broker(), Handle<JSFunction>::cast(callee));
+ function.Serialize();
+ Callee new_callee(function.object());
+ ProcessSFIForCallOrConstruct(new_callee, new_target, *actual_arguments,
+ speculation_mode, padding);
+}
+
void SerializerForBackgroundCompilation::ProcessCallOrConstruct(
Hints callee, base::Optional<Hints> new_target,
- const HintsVector& arguments, FeedbackSlot slot, bool with_spread) {
+ const HintsVector& arguments, FeedbackSlot slot,
+ MissingArgumentsPolicy padding) {
SpeculationMode speculation_mode = SpeculationMode::kDisallowSpeculation;
if (!slot.IsInvalid()) {
FeedbackSource source(feedback_vector(), slot);
@@ -1782,11 +1936,11 @@ void SerializerForBackgroundCompilation::ProcessCallOrConstruct(
// site, and it may make sense to add the Array JSFunction constant.
if (new_target.has_value()) {
// Construct; feedback is new_target, which often is also the callee.
- new_target->AddConstant(target->object());
- callee.AddConstant(target->object());
+ new_target->AddConstant(target->object(), zone());
+ callee.AddConstant(target->object(), zone());
} else {
// Call; target is callee.
- callee.AddConstant(target->object());
+ callee.AddConstant(target->object(), zone());
}
}
}
@@ -1795,50 +1949,22 @@ void SerializerForBackgroundCompilation::ProcessCallOrConstruct(
environment()->accumulator_hints().Clear();
// For JSCallReducer::ReduceJSCall and JSCallReducer::ReduceJSConstruct.
- for (auto hint : callee.constants()) {
- const HintsVector* actual_arguments = &arguments;
- Handle<JSFunction> function;
- HintsVector expanded_arguments(zone());
- if (hint->IsJSBoundFunction()) {
- JSBoundFunctionRef bound_function(broker(),
- Handle<JSBoundFunction>::cast(hint));
- bound_function.Serialize();
-
- MaybeHandle<JSFunction> maybe_function = UnrollBoundFunction(
- bound_function, broker(), arguments, &expanded_arguments);
- if (maybe_function.is_null()) continue;
- function = maybe_function.ToHandleChecked();
- actual_arguments = &expanded_arguments;
- } else if (hint->IsJSFunction()) {
- function = Handle<JSFunction>::cast(hint);
- } else {
- continue;
- }
-
- if (ProcessCalleeForCallOrConstruct(function, *actual_arguments,
- speculation_mode)) {
- environment()->accumulator_hints().Add(RunChildSerializer(
- CompilationSubject(function, broker()->isolate(), zone()), new_target,
- *actual_arguments, with_spread));
- }
+ for (auto constant : callee.constants()) {
+ ProcessCalleeForCallOrConstruct(constant, new_target, arguments,
+ speculation_mode, padding);
}
// For JSCallReducer::ReduceJSCall and JSCallReducer::ReduceJSConstruct.
for (auto hint : callee.function_blueprints()) {
- Handle<SharedFunctionInfo> shared = hint.shared();
- if (!ProcessSFIForCallOrConstruct(shared, arguments, speculation_mode)) {
- continue;
- }
-
- environment()->accumulator_hints().Add(RunChildSerializer(
- CompilationSubject(hint), new_target, arguments, with_spread));
+ ProcessSFIForCallOrConstruct(Callee(hint), new_target, arguments,
+ speculation_mode, padding);
}
}
void SerializerForBackgroundCompilation::ProcessCallVarArgs(
ConvertReceiverMode receiver_mode, Hints const& callee,
interpreter::Register first_reg, int reg_count, FeedbackSlot slot,
- bool with_spread) {
+ MissingArgumentsPolicy padding) {
HintsVector arguments(zone());
// The receiver is either given in the first register or it is implicitly
// the {undefined} value.
@@ -1848,7 +1974,7 @@ void SerializerForBackgroundCompilation::ProcessCallVarArgs(
}
environment()->ExportRegisterHints(first_reg, reg_count, &arguments);
- ProcessCallOrConstruct(callee, base::nullopt, arguments, slot);
+ ProcessCallOrConstruct(callee, base::nullopt, arguments, slot, padding);
}
void SerializerForBackgroundCompilation::ProcessApiCall(
@@ -1866,17 +1992,17 @@ void SerializerForBackgroundCompilation::ProcessApiCall(
FunctionTemplateInfoRef target_template_info(
broker(), handle(target->function_data(), broker()->isolate()));
if (!target_template_info.has_call_code()) return;
-
target_template_info.SerializeCallCode();
SharedFunctionInfoRef target_ref(broker(), target);
target_ref.SerializeFunctionTemplateInfo();
if (target_template_info.accept_any_receiver() &&
- target_template_info.is_signature_undefined())
+ target_template_info.is_signature_undefined()) {
return;
+ }
- CHECK_GE(arguments.size(), 1);
+ if (arguments.empty()) return;
Hints const& receiver_hints = arguments[0];
for (auto hint : receiver_hints.constants()) {
if (hint->IsUndefined()) {
@@ -1920,8 +2046,9 @@ void SerializerForBackgroundCompilation::ProcessHintsForObjectCreate(
}
void SerializerForBackgroundCompilation::ProcessBuiltinCall(
- Handle<SharedFunctionInfo> target, const HintsVector& arguments,
- SpeculationMode speculation_mode) {
+ Handle<SharedFunctionInfo> target, base::Optional<Hints> new_target,
+ const HintsVector& arguments, SpeculationMode speculation_mode,
+ MissingArgumentsPolicy padding) {
DCHECK(target->HasBuiltinId());
const int builtin_id = target->builtin_id();
const char* name = Builtins::name(builtin_id);
@@ -1963,20 +2090,31 @@ void SerializerForBackgroundCompilation::ProcessBuiltinCall(
case Builtins::kPromiseResolveTrampoline:
// For JSCallReducer::ReducePromiseInternalResolve and
// JSNativeContextSpecialization::ReduceJSResolvePromise.
- if (arguments.size() >= 2) {
- Hints const& resolution_hints = arguments[1];
+ if (arguments.size() >= 1) {
+ Hints const& resolution_hints =
+ arguments.size() >= 2
+ ? arguments[1]
+ : Hints::SingleConstant(
+ broker()->isolate()->factory()->undefined_value(),
+ zone());
ProcessHintsForPromiseResolve(resolution_hints);
}
break;
case Builtins::kPromiseInternalResolve:
// For JSCallReducer::ReducePromiseInternalResolve and
// JSNativeContextSpecialization::ReduceJSResolvePromise.
- if (arguments.size() >= 3) {
- Hints const& resolution_hints = arguments[2];
+ if (arguments.size() >= 2) {
+ Hints const& resolution_hints =
+ arguments.size() >= 3
+ ? arguments[2]
+ : Hints::SingleConstant(
+ broker()->isolate()->factory()->undefined_value(),
+ zone());
ProcessHintsForPromiseResolve(resolution_hints);
}
break;
case Builtins::kRegExpPrototypeTest:
+ case Builtins::kRegExpPrototypeTestFast:
// For JSCallReducer::ReduceRegExpPrototypeTest.
if (arguments.size() >= 1 &&
speculation_mode != SpeculationMode::kDisallowSpeculation) {
@@ -1990,35 +2128,105 @@ void SerializerForBackgroundCompilation::ProcessBuiltinCall(
case Builtins::kArrayPrototypeFind:
case Builtins::kArrayPrototypeFindIndex:
case Builtins::kArrayMap:
+ case Builtins::kArraySome:
+ if (arguments.size() >= 2 &&
+ speculation_mode != SpeculationMode::kDisallowSpeculation) {
+ Hints const& callback = arguments[1];
+ // "Call(callbackfn, T, « kValue, k, O »)"
+ HintsVector new_arguments(zone());
+ new_arguments.push_back(
+ arguments.size() < 3
+ ? Hints::SingleConstant(
+ broker()->isolate()->factory()->undefined_value(), zone())
+ : arguments[2]); // T
+ new_arguments.push_back(Hints()); // kValue
+ new_arguments.push_back(Hints()); // k
+ new_arguments.push_back(arguments[0]); // O
+ for (auto constant : callback.constants()) {
+ ProcessCalleeForCallOrConstruct(constant, base::nullopt,
+ new_arguments,
+ SpeculationMode::kDisallowSpeculation,
+ kMissingArgumentsAreUndefined);
+ }
+ }
+ break;
case Builtins::kArrayReduce:
case Builtins::kArrayReduceRight:
- case Builtins::kArraySome:
if (arguments.size() >= 2 &&
speculation_mode != SpeculationMode::kDisallowSpeculation) {
- Hints const& callback_hints = arguments[1];
- ProcessHintsForFunctionCall(callback_hints);
+ Hints const& callback = arguments[1];
+ // "Call(callbackfn, undefined, « accumulator, kValue, k, O »)"
+ HintsVector new_arguments(zone());
+ new_arguments.push_back(Hints::SingleConstant(
+ broker()->isolate()->factory()->undefined_value(), zone()));
+ new_arguments.push_back(Hints()); // accumulator
+ new_arguments.push_back(Hints()); // kValue
+ new_arguments.push_back(Hints()); // k
+ new_arguments.push_back(arguments[0]); // O
+ for (auto constant : callback.constants()) {
+ ProcessCalleeForCallOrConstruct(constant, base::nullopt,
+ new_arguments,
+ SpeculationMode::kDisallowSpeculation,
+ kMissingArgumentsAreUndefined);
+ }
}
break;
+ // TODO(neis): At least for Array* we should look at blueprints too.
+ // TODO(neis): Might need something like a FunctionBlueprint but for
+ // creating bound functions rather than creating closures.
case Builtins::kFunctionPrototypeApply:
- case Builtins::kFunctionPrototypeCall:
+ if (arguments.size() >= 1) {
+ // Drop hints for all arguments except the user-given receiver.
+ Hints new_receiver =
+ arguments.size() >= 2
+ ? arguments[1]
+ : Hints::SingleConstant(
+ broker()->isolate()->factory()->undefined_value(),
+ zone());
+ HintsVector new_arguments({new_receiver}, zone());
+ for (auto constant : arguments[0].constants()) {
+ ProcessCalleeForCallOrConstruct(constant, base::nullopt,
+ new_arguments,
+ SpeculationMode::kDisallowSpeculation,
+ kMissingArgumentsAreUnknown);
+ }
+ }
+ break;
case Builtins::kPromiseConstructor:
- // TODO(mslekova): Since the reducer for all these introduce a
- // JSCall/JSConstruct that will again get optimized by the JSCallReducer,
- // we basically might have to do all the serialization that we do for that
- // here as well. The only difference is that the new JSCall/JSConstruct
- // has speculation disabled, causing the JSCallReducer to do much less
- // work. To account for that, ProcessCallOrConstruct should have a way of
- // taking the speculation mode as an argument rather than getting that
- // from the feedback. (Also applies to Reflect.apply and
- // Reflect.construct.)
if (arguments.size() >= 1) {
- ProcessHintsForFunctionCall(arguments[0]);
+ // "Call(executor, undefined, « resolvingFunctions.[[Resolve]],
+ // resolvingFunctions.[[Reject]] »)"
+ HintsVector new_arguments(
+ {Hints::SingleConstant(
+ broker()->isolate()->factory()->undefined_value(), zone())},
+ zone());
+ for (auto constant : arguments[0].constants()) {
+ ProcessCalleeForCallOrConstruct(constant, base::nullopt,
+ new_arguments,
+ SpeculationMode::kDisallowSpeculation,
+ kMissingArgumentsAreUnknown);
+ }
+ }
+ break;
+ case Builtins::kFunctionPrototypeCall:
+ if (arguments.size() >= 1) {
+ HintsVector new_arguments(arguments.begin() + 1, arguments.end(),
+ zone());
+ for (auto constant : arguments[0].constants()) {
+ ProcessCalleeForCallOrConstruct(
+ constant, base::nullopt, new_arguments,
+ SpeculationMode::kDisallowSpeculation, padding);
+ }
}
break;
case Builtins::kReflectApply:
case Builtins::kReflectConstruct:
if (arguments.size() >= 2) {
- ProcessHintsForFunctionCall(arguments[1]);
+ for (auto constant : arguments[1].constants()) {
+ if (constant->IsJSFunction()) {
+ JSFunctionRef(broker(), constant).Serialize();
+ }
+ }
}
break;
case Builtins::kObjectPrototypeIsPrototypeOf:
@@ -2181,13 +2389,6 @@ void SerializerForBackgroundCompilation::ProcessHintsForRegExpTest(
}
}
-void SerializerForBackgroundCompilation::ProcessHintsForFunctionCall(
- Hints const& target_hints) {
- for (auto constant : target_hints.constants()) {
- if (constant->IsJSFunction()) JSFunctionRef(broker(), constant).Serialize();
- }
-}
-
namespace {
void ProcessMapForFunctionBind(MapRef map) {
map.SerializePrototype();
@@ -2195,8 +2396,9 @@ void ProcessMapForFunctionBind(MapRef map) {
JSFunction::kNameDescriptorIndex) +
1;
if (map.NumberOfOwnDescriptors() >= min_nof_descriptors) {
- map.SerializeOwnDescriptor(JSFunction::kLengthDescriptorIndex);
- map.SerializeOwnDescriptor(JSFunction::kNameDescriptorIndex);
+ map.SerializeOwnDescriptor(
+ InternalIndex(JSFunction::kLengthDescriptorIndex));
+ map.SerializeOwnDescriptor(InternalIndex(JSFunction::kNameDescriptorIndex));
}
}
} // namespace
@@ -2261,7 +2463,8 @@ void SerializerForBackgroundCompilation::ProcessJump(
void SerializerForBackgroundCompilation::VisitReturn(
BytecodeArrayIterator* iterator) {
- environment()->return_value_hints().Add(environment()->accumulator_hints());
+ environment()->return_value_hints().Add(environment()->accumulator_hints(),
+ zone());
environment()->ClearEphemeralHints();
}
@@ -2301,7 +2504,8 @@ void SerializerForBackgroundCompilation::VisitConstruct(
HintsVector arguments(zone());
environment()->ExportRegisterHints(first_reg, reg_count, &arguments);
- ProcessCallOrConstruct(callee, new_target, arguments, slot);
+ ProcessCallOrConstruct(callee, new_target, arguments, slot,
+ kMissingArgumentsAreUndefined);
}
void SerializerForBackgroundCompilation::VisitConstructWithSpread(
@@ -2315,8 +2519,10 @@ void SerializerForBackgroundCompilation::VisitConstructWithSpread(
HintsVector arguments(zone());
environment()->ExportRegisterHints(first_reg, reg_count, &arguments);
-
- ProcessCallOrConstruct(callee, new_target, arguments, slot, true);
+ DCHECK(!arguments.empty());
+ arguments.pop_back(); // Remove the spread element.
+ ProcessCallOrConstruct(callee, new_target, arguments, slot,
+ kMissingArgumentsAreUnknown);
}
void SerializerForBackgroundCompilation::ProcessGlobalAccess(FeedbackSlot slot,
@@ -2333,7 +2539,7 @@ void SerializerForBackgroundCompilation::ProcessGlobalAccess(FeedbackSlot slot,
base::Optional<ObjectRef> value =
feedback.AsGlobalAccess().GetConstantHint();
if (value.has_value()) {
- environment()->accumulator_hints().AddConstant(value->object());
+ environment()->accumulator_hints().AddConstant(value->object(), zone());
}
} else {
DCHECK(feedback.IsInsufficient());
@@ -2480,9 +2686,16 @@ SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess(
receiver_map.SerializeRootMap();
// For JSNativeContextSpecialization::ReduceNamedAccess.
- if (receiver_map.IsMapOfTargetGlobalProxy()) {
- broker()->target_native_context().global_proxy_object().GetPropertyCell(
+ JSGlobalProxyRef global_proxy =
+ broker()->target_native_context().global_proxy_object();
+ JSGlobalObjectRef global_object =
+ broker()->target_native_context().global_object();
+ if (receiver_map.equals(global_proxy.map())) {
+ base::Optional<PropertyCellRef> cell = global_object.GetPropertyCell(
name, SerializationPolicy::kSerializeIfNeeded);
+ if (access_mode == AccessMode::kLoad && cell.has_value()) {
+ new_accumulator_hints->AddConstant(cell->value().object(), zone());
+ }
}
PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo(
@@ -2515,6 +2728,10 @@ SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess(
FunctionTemplateInfoRef fti(broker(), access_info.constant());
if (fti.has_call_code()) fti.SerializeCallCode();
}
+ } else if (access_info.IsModuleExport()) {
+ // For JSNativeContextSpecialization::BuildPropertyLoad
+ DCHECK(!access_info.constant().is_null());
+ CellRef(broker(), access_info.constant());
}
// For PropertyAccessBuilder::TryBuildLoadConstantDataField
@@ -2535,7 +2752,7 @@ SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess(
access_info.field_representation(), access_info.field_index(),
SerializationPolicy::kSerializeIfNeeded));
if (constant.has_value()) {
- new_accumulator_hints->AddConstant(constant->object());
+ new_accumulator_hints->AddConstant(constant->object(), zone());
}
}
}
@@ -2565,7 +2782,7 @@ void SerializerForBackgroundCompilation::ProcessKeyedPropertyAccess(
return;
}
- Hints new_accumulator_hints(zone());
+ Hints new_accumulator_hints;
switch (feedback.kind()) {
case ProcessedFeedback::kElementAccess:
ProcessElementAccess(receiver, key, feedback.AsElementAccess(),
@@ -2583,14 +2800,14 @@ void SerializerForBackgroundCompilation::ProcessKeyedPropertyAccess(
if (access_mode == AccessMode::kLoad) {
environment()->accumulator_hints().Clear();
- environment()->accumulator_hints().Add(new_accumulator_hints);
+ environment()->accumulator_hints().Add(new_accumulator_hints, zone());
} else {
DCHECK(new_accumulator_hints.IsEmpty());
}
}
void SerializerForBackgroundCompilation::ProcessNamedPropertyAccess(
- Hints receiver, NameRef const& name, FeedbackSlot slot,
+ Hints const& receiver, NameRef const& name, FeedbackSlot slot,
AccessMode access_mode) {
if (slot.IsInvalid() || feedback_vector().is_null()) return;
FeedbackSource source(feedback_vector(), slot);
@@ -2598,12 +2815,13 @@ void SerializerForBackgroundCompilation::ProcessNamedPropertyAccess(
broker()->ProcessFeedbackForPropertyAccess(source, access_mode, name);
if (BailoutOnUninitialized(feedback)) return;
- Hints new_accumulator_hints(zone());
+ Hints new_accumulator_hints;
switch (feedback.kind()) {
case ProcessedFeedback::kNamedAccess:
DCHECK(name.equals(feedback.AsNamedAccess().name()));
ProcessNamedAccess(receiver, feedback.AsNamedAccess(), access_mode,
&new_accumulator_hints);
+ // TODO(neis): Propagate feedback maps to receiver hints.
break;
case ProcessedFeedback::kInsufficient:
break;
@@ -2613,7 +2831,7 @@ void SerializerForBackgroundCompilation::ProcessNamedPropertyAccess(
if (access_mode == AccessMode::kLoad) {
environment()->accumulator_hints().Clear();
- environment()->accumulator_hints().Add(new_accumulator_hints);
+ environment()->accumulator_hints().Add(new_accumulator_hints, zone());
} else {
DCHECK(new_accumulator_hints.IsEmpty());
}
@@ -2622,7 +2840,7 @@ void SerializerForBackgroundCompilation::ProcessNamedPropertyAccess(
void SerializerForBackgroundCompilation::ProcessNamedAccess(
Hints receiver, NamedAccessFeedback const& feedback, AccessMode access_mode,
Hints* new_accumulator_hints) {
- for (Handle<Map> map : feedback.AsNamedAccess().maps()) {
+ for (Handle<Map> map : feedback.maps()) {
MapRef map_ref(broker(), map);
ProcessMapForNamedPropertyAccess(map_ref, feedback.name(), access_mode,
base::nullopt, new_accumulator_hints);
@@ -2635,8 +2853,6 @@ void SerializerForBackgroundCompilation::ProcessNamedAccess(
base::nullopt, new_accumulator_hints);
}
- JSGlobalProxyRef global_proxy =
- broker()->target_native_context().global_proxy_object();
for (Handle<Object> hint : receiver.constants()) {
ObjectRef object(broker(), hint);
if (access_mode == AccessMode::kLoad && object.IsJSObject()) {
@@ -2645,13 +2861,6 @@ void SerializerForBackgroundCompilation::ProcessNamedAccess(
object.AsJSObject(),
new_accumulator_hints);
}
- // For JSNativeContextSpecialization::ReduceNamedAccessFromNexus.
- if (object.equals(global_proxy)) {
- // TODO(neis): Record accumulator hint? Also for string.length and maybe
- // more.
- global_proxy.GetPropertyCell(feedback.name(),
- SerializationPolicy::kSerializeIfNeeded);
- }
// For JSNativeContextSpecialization::ReduceJSLoadNamed.
if (access_mode == AccessMode::kLoad && object.IsJSFunction() &&
feedback.name().equals(ObjectRef(
@@ -2659,9 +2868,12 @@ void SerializerForBackgroundCompilation::ProcessNamedAccess(
JSFunctionRef function = object.AsJSFunction();
function.Serialize();
if (new_accumulator_hints != nullptr && function.has_prototype()) {
- new_accumulator_hints->AddConstant(function.prototype().object());
+ new_accumulator_hints->AddConstant(function.prototype().object(),
+ zone());
}
}
+ // TODO(neis): Also record accumulator hint for string.length and maybe
+ // more?
}
}
@@ -2841,7 +3053,7 @@ void SerializerForBackgroundCompilation::VisitTestInstanceOf(
environment()->register_hints(iterator->GetRegisterOperand(0));
Hints rhs = environment()->accumulator_hints();
FeedbackSlot slot = iterator->GetSlotOperand(1);
- Hints new_accumulator_hints(zone());
+ Hints new_accumulator_hints;
if (slot.IsInvalid() || feedback_vector().is_null()) return;
FeedbackSource source(feedback_vector(), slot);
@@ -2853,7 +3065,7 @@ void SerializerForBackgroundCompilation::VisitTestInstanceOf(
InstanceOfFeedback const& rhs_feedback = feedback.AsInstanceOf();
if (rhs_feedback.value().has_value()) {
Handle<JSObject> constructor = rhs_feedback.value()->object();
- rhs.AddConstant(constructor);
+ rhs.AddConstant(constructor, zone());
}
}
@@ -2865,7 +3077,7 @@ void SerializerForBackgroundCompilation::VisitTestInstanceOf(
if (walk_prototypes) ProcessHintsForHasInPrototypeChain(lhs);
environment()->accumulator_hints().Clear();
- environment()->accumulator_hints().Add(new_accumulator_hints);
+ environment()->accumulator_hints().Add(new_accumulator_hints, zone());
}
void SerializerForBackgroundCompilation::VisitToNumeric(
diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.h b/deps/v8/src/compiler/serializer-for-background-compilation.h
index 881ed61a55..8f7883eeba 100644
--- a/deps/v8/src/compiler/serializer-for-background-compilation.h
+++ b/deps/v8/src/compiler/serializer-for-background-compilation.h
@@ -17,6 +17,7 @@ namespace compiler {
class CompilationDependencies;
class JSHeapBroker;
+class ZoneStats;
enum class SerializerForBackgroundCompilationFlag : uint8_t {
kBailoutOnUninitialized = 1 << 0,
@@ -27,9 +28,9 @@ using SerializerForBackgroundCompilationFlags =
base::Flags<SerializerForBackgroundCompilationFlag>;
void RunSerializerForBackgroundCompilation(
- JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
- Handle<JSFunction> closure, SerializerForBackgroundCompilationFlags flags,
- BailoutId osr_offset);
+ ZoneStats* zone_stats, JSHeapBroker* broker,
+ CompilationDependencies* dependencies, Handle<JSFunction> closure,
+ SerializerForBackgroundCompilationFlags flags, BailoutId osr_offset);
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index 783f3bcc11..2781cc248f 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -132,6 +132,7 @@ void SimdScalarLowering::LowerGraph() {
V(F32x4UConvertI32x4) \
V(F32x4Abs) \
V(F32x4Neg) \
+ V(F32x4Sqrt) \
V(F32x4RecipApprox) \
V(F32x4RecipSqrtApprox) \
V(F32x4Add) \
@@ -210,6 +211,7 @@ void SimdScalarLowering::LowerGraph() {
V(I8x16LeS) \
V(I8x16LtU) \
V(I8x16LeU) \
+ V(S8x16Swizzle) \
V(S8x16Shuffle)
MachineType SimdScalarLowering::MachineTypeFrom(SimdType simdType) {
@@ -940,6 +942,28 @@ void SimdScalarLowering::LowerNode(Node* node) {
}
break;
}
+ case IrOpcode::kSimd128ReverseBytes: {
+ DCHECK_EQ(1, node->InputCount());
+ bool is_float = ReplacementType(node->InputAt(0)) == SimdType::kFloat32x4;
+ replacements_[node->id()].type =
+ is_float ? SimdType::kFloat32x4 : SimdType::kInt32x4;
+ Node** rep = GetReplacementsWithType(
+ node->InputAt(0),
+ is_float ? SimdType::kFloat32x4 : SimdType::kInt32x4);
+ Node* rep_node[kNumLanes32];
+ for (int i = 0; i < kNumLanes32; ++i) {
+ Node* temp = is_float ? graph()->NewNode(
+ machine()->BitcastFloat32ToInt32(), rep[i])
+ : rep[i];
+ temp = graph()->NewNode(machine()->Word32ReverseBytes(), temp);
+ rep_node[kNumLanes32 - 1 - i] =
+ is_float
+ ? graph()->NewNode(machine()->BitcastInt32ToFloat32(), temp)
+ : temp;
+ }
+ ReplaceNode(node, rep_node, kNumLanes32);
+ break;
+ }
case IrOpcode::kLoad:
case IrOpcode::kUnalignedLoad:
case IrOpcode::kProtectedLoad: {
@@ -1219,6 +1243,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
}
F32X4_UNOP_CASE(Abs)
F32X4_UNOP_CASE(Neg)
+ F32X4_UNOP_CASE(Sqrt)
#undef F32X4_UNOP_CASE
case IrOpcode::kF32x4RecipApprox:
case IrOpcode::kF32x4RecipSqrtApprox: {
@@ -1368,6 +1393,45 @@ void SimdScalarLowering::LowerNode(Node* node) {
ReplaceNode(node, rep_node, num_lanes);
break;
}
+ case IrOpcode::kS8x16Swizzle: {
+ DCHECK_EQ(2, node->InputCount());
+ Node** rep_left = GetReplacementsWithType(node->InputAt(0), rep_type);
+ Node** indices = GetReplacementsWithType(node->InputAt(1), rep_type);
+ Node** rep_nodes = zone()->NewArray<Node*>(num_lanes);
+ Node* stack_slot = graph()->NewNode(
+ machine()->StackSlot(MachineRepresentation::kSimd128));
+
+ // Push all num_lanes values into stack slot.
+ const Operator* store_op = machine()->Store(
+ StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier));
+ Node* effect_input = graph()->start();
+ for (int i = num_lanes - 1; i >= 0; i--) {
+ // We want all the stores to happen first before any of the loads
+ // below, so connect them via effect edge from i-1 to i.
+ Node* store =
+ graph()->NewNode(store_op, stack_slot, mcgraph_->Int32Constant(i),
+ rep_left[i], effect_input, graph()->start());
+ effect_input = store;
+ }
+
+ for (int i = num_lanes - 1; i >= 0; i--) {
+ // Only select lane when index is < num_lanes, otherwise write 0 to
+ // lane. Use Uint32 to take care of negative indices.
+ Diamond d(graph(), common(),
+ graph()->NewNode(machine()->Uint32LessThan(), indices[i],
+ mcgraph_->Int32Constant(num_lanes)));
+
+ Node* load =
+ graph()->NewNode(machine()->Load(LoadRepresentation::Uint8()),
+ stack_slot, indices[i], effect_input, d.if_true);
+
+ rep_nodes[i] = d.Phi(MachineRepresentation::kWord8, load,
+ mcgraph_->Int32Constant(0));
+ }
+
+ ReplaceNode(node, rep_nodes, num_lanes);
+ break;
+ }
case IrOpcode::kS8x16Shuffle: {
DCHECK_EQ(2, node->InputCount());
const uint8_t* shuffle = S8x16ShuffleOf(node->op());
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 1ca7bfe707..fadc9bf6d9 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -1197,7 +1197,7 @@ class RepresentationSelector {
// TODO(nicohartmann): Remove, once the deoptimizer can rematerialize
// truncated BigInts.
if (TypeOf(input).Is(Type::BigInt())) {
- ProcessInput(node, i, UseInfo::AnyTagged());
+ ConvertInput(node, i, UseInfo::AnyTagged());
}
(*types)[i] =
@@ -1220,11 +1220,22 @@ class RepresentationSelector {
// Accumulator is a special flower - we need to remember its type in
// a singleton typed-state-values node (as if it was a singleton
// state-values node).
+ Node* accumulator = node->InputAt(2);
if (propagate()) {
- EnqueueInput(node, 2, UseInfo::Any());
+ // TODO(nicohartmann): Remove, once the deoptimizer can rematerialize
+ // truncated BigInts.
+ if (TypeOf(accumulator).Is(Type::BigInt())) {
+ EnqueueInput(node, 2, UseInfo::AnyTagged());
+ } else {
+ EnqueueInput(node, 2, UseInfo::Any());
+ }
} else if (lower()) {
+ // TODO(nicohartmann): Remove, once the deoptimizer can rematerialize
+ // truncated BigInts.
+ if (TypeOf(accumulator).Is(Type::BigInt())) {
+ ConvertInput(node, 2, UseInfo::AnyTagged());
+ }
Zone* zone = jsgraph_->zone();
- Node* accumulator = node->InputAt(2);
if (accumulator == jsgraph_->OptimizedOutConstant()) {
node->ReplaceInput(2, jsgraph_->SingleDeadTypedStateValues());
} else {
@@ -1237,7 +1248,7 @@ class RepresentationSelector {
node->ReplaceInput(
2, jsgraph_->graph()->NewNode(jsgraph_->common()->TypedStateValues(
types, SparseInputMask::Dense()),
- accumulator));
+ node->InputAt(2)));
}
}
@@ -2667,7 +2678,11 @@ class RepresentationSelector {
case IrOpcode::kReferenceEqual: {
VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
if (lower()) {
- NodeProperties::ChangeOp(node, lowering->machine()->WordEqual());
+ if (COMPRESS_POINTERS_BOOL) {
+ NodeProperties::ChangeOp(node, lowering->machine()->Word32Equal());
+ } else {
+ NodeProperties::ChangeOp(node, lowering->machine()->WordEqual());
+ }
}
return;
}
@@ -2894,6 +2909,18 @@ class RepresentationSelector {
SetOutput(node, MachineRepresentation::kTaggedPointer);
return;
}
+ case IrOpcode::kLoadMessage: {
+ if (truncation.IsUnused()) return VisitUnused(node);
+ VisitUnop(node, UseInfo::Word(), MachineRepresentation::kTagged);
+ return;
+ }
+ case IrOpcode::kStoreMessage: {
+ ProcessInput(node, 0, UseInfo::Word());
+ ProcessInput(node, 1, UseInfo::AnyTagged());
+ ProcessRemainingInputs(node, 2);
+ SetOutput(node, MachineRepresentation::kNone);
+ return;
+ }
case IrOpcode::kLoadFieldByIndex: {
if (truncation.IsUnused()) return VisitUnused(node);
VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
@@ -2945,6 +2972,11 @@ class RepresentationSelector {
access.machine_type.representation());
return;
}
+ case IrOpcode::kLoadStackArgument: {
+ if (truncation.IsUnused()) return VisitUnused(node);
+ VisitBinop(node, UseInfo::Word(), MachineRepresentation::kTagged);
+ return;
+ }
case IrOpcode::kStoreElement: {
ElementAccess access = ElementAccessOf(node->op());
Node* value_node = node->InputAt(2);
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.cc b/deps/v8/src/compiler/simplified-operator-reducer.cc
index 885a86286e..0f293d2b38 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.cc
+++ b/deps/v8/src/compiler/simplified-operator-reducer.cc
@@ -155,23 +155,6 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
Node* new_node = graph()->NewNode(
simplified()->ChangeInt31ToCompressedSigned(), m.InputAt(0));
return Replace(new_node);
- } else if (m.IsCheckedInt32ToTaggedSigned()) {
- // Create a new checked node that outputs CompressedSigned values, with
- // an explicit decompression after it.
- Node* new_checked = graph()->CloneNode(m.node());
- NodeProperties::ChangeOp(
- new_checked, simplified()->CheckedInt32ToCompressedSigned(
- CheckParametersOf(m.node()->op()).feedback()));
- Node* new_decompression = graph()->NewNode(
- machine()->ChangeCompressedSignedToTaggedSigned(), new_checked);
-
- // For all uses of the old checked node, instead insert the new "checked
- // + decompression". Also, update control and effect.
- ReplaceWithValue(m.node(), new_decompression, new_checked, new_checked);
-
- // In the current node, we can skip the decompression since we are going
- // to have a Decompression + Compression combo.
- return Replace(new_checked);
}
break;
}
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 6b86a95e01..63d24274ec 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -1149,6 +1149,17 @@ struct SimplifiedOperatorGlobalCache final {
};
LoadFieldByIndexOperator kLoadFieldByIndex;
+ struct LoadStackArgumentOperator final : public Operator {
+ LoadStackArgumentOperator()
+ : Operator( // --
+ IrOpcode::kLoadStackArgument, // opcode
+ Operator::kNoDeopt | Operator::kNoThrow |
+ Operator::kNoWrite, // flags
+ "LoadStackArgument", // name
+ 2, 1, 1, 1, 1, 0) {} // counts
+ };
+ LoadStackArgumentOperator kLoadStackArgument;
+
#define SPECULATIVE_NUMBER_BINOP(Name) \
template <NumberOperationHint kHint> \
struct Name##Operator final : public Operator1<NumberOperationHint> { \
@@ -1754,6 +1765,24 @@ SPECULATIVE_NUMBER_BINOP_LIST(SPECULATIVE_NUMBER_BINOP)
ACCESS_OP_LIST(ACCESS)
#undef ACCESS
+const Operator* SimplifiedOperatorBuilder::LoadMessage() {
+ return new (zone())
+ Operator(IrOpcode::kLoadMessage,
+ Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,
+ "LoadMessage", 1, 1, 1, 1, 1, 0);
+}
+
+const Operator* SimplifiedOperatorBuilder::StoreMessage() {
+ return new (zone())
+ Operator(IrOpcode::kStoreMessage,
+ Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoRead,
+ "StoreMessage", 2, 1, 1, 0, 1, 0);
+}
+
+const Operator* SimplifiedOperatorBuilder::LoadStackArgument() {
+ return &cache_.kLoadStackArgument;
+}
+
const Operator* SimplifiedOperatorBuilder::TransitionAndStoreElement(
Handle<Map> double_map, Handle<Map> fast_map) {
TransitionAndStoreElementParameters parameters(double_map, fast_map);
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 58e9bfdffb..a1438cdce0 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -876,6 +876,9 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
Type type, AllocationType allocation = AllocationType::kYoung,
AllowLargeObjects allow_large_objects = AllowLargeObjects::kFalse);
+ const Operator* LoadMessage();
+ const Operator* StoreMessage();
+
const Operator* LoadFieldByIndex();
const Operator* LoadField(FieldAccess const&);
const Operator* StoreField(FieldAccess const&);
@@ -883,6 +886,9 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
// load-element [base + index]
const Operator* LoadElement(ElementAccess const&);
+ // load-stack-argument [base + index]
+ const Operator* LoadStackArgument();
+
// store-element [base + index], value
const Operator* StoreElement(ElementAccess const&);
diff --git a/deps/v8/src/compiler/store-store-elimination.cc b/deps/v8/src/compiler/store-store-elimination.cc
index bd53fb895f..08accd61c5 100644
--- a/deps/v8/src/compiler/store-store-elimination.cc
+++ b/deps/v8/src/compiler/store-store-elimination.cc
@@ -2,14 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <iterator>
-
#include "src/compiler/store-store-elimination.h"
#include "src/codegen/tick-counter.h"
#include "src/compiler/all-nodes.h"
+#include "src/compiler/common-operator.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/persistent-map.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -41,7 +43,199 @@ namespace compiler {
#define DCHECK_EXTRA(condition, fmt, ...) ((void)0)
#endif
-void StoreStoreElimination::RedundantStoreFinder::Find() {
+namespace {
+
+using StoreOffset = uint32_t;
+
+struct UnobservableStore {
+ NodeId id_;
+ StoreOffset offset_;
+
+ bool operator==(const UnobservableStore other) const {
+ return (id_ == other.id_) && (offset_ == other.offset_);
+ }
+
+ bool operator<(const UnobservableStore other) const {
+ return (id_ < other.id_) || (id_ == other.id_ && offset_ < other.offset_);
+ }
+};
+
+size_t hash_value(const UnobservableStore& p) {
+ return base::hash_combine(p.id_, p.offset_);
+}
+
+// Instances of UnobservablesSet are immutable. They represent either a set of
+// UnobservableStores, or the "unvisited empty set".
+//
+// We apply some sharing to save memory. The class UnobservablesSet is only a
+// pointer wide, and a copy does not use any heap (or temp_zone) memory. Most
+// changes to an UnobservablesSet might allocate in the temp_zone.
+//
+// The size of an instance should be the size of a pointer, plus additional
+// space in the zone in the case of non-unvisited UnobservablesSets. Copying
+// an UnobservablesSet allocates no memory.
+class UnobservablesSet final {
+ private:
+ using KeyT = UnobservableStore;
+ using ValueT = bool; // Emulates set semantics in the map.
+
+ // The PersistentMap uses a special value to signify 'not present'. We use
+ // a boolean value to emulate set semantics.
+ static constexpr ValueT kNotPresent = false;
+ static constexpr ValueT kPresent = true;
+
+ public:
+ using SetT = PersistentMap<KeyT, ValueT>;
+
+ // Creates a new UnobservablesSet, with the null set.
+ static UnobservablesSet Unvisited() { return UnobservablesSet(); }
+
+ // Create a new empty UnobservablesSet. This allocates in the zone, and
+ // can probably be optimized to use a global singleton.
+ static UnobservablesSet VisitedEmpty(Zone* zone);
+ UnobservablesSet(const UnobservablesSet& other) V8_NOEXCEPT = default;
+
+ // Computes the intersection of two UnobservablesSets. If one of the sets is
+ // empty, will return empty.
+ UnobservablesSet Intersect(const UnobservablesSet& other,
+ const UnobservablesSet& empty, Zone* zone) const;
+
+ // Returns a set that it is the current one, plus the observation obs passed
+ // as parameter. If said obs it's already in the set, we don't have to
+ // create a new one.
+ UnobservablesSet Add(UnobservableStore obs, Zone* zone) const;
+
+ // Returns a set that it is the current one, except for all of the
+ // observations with offset off. This is done by creating a new set and
+ // copying all observations with different offsets.
+ // This can probably be done better if the observations are stored first by
+ // offset and then by node.
+ // We are removing all nodes with offset off since different nodes may
+ // alias one another, and we currently we don't have the means to know if
+ // two nodes are definitely the same value.
+ UnobservablesSet RemoveSameOffset(StoreOffset off, Zone* zone) const;
+
+ const SetT* set() const { return set_; }
+
+ bool IsUnvisited() const { return set_ == nullptr; }
+ bool IsEmpty() const {
+ return set_ == nullptr || set_->begin() == set_->end();
+ }
+ bool Contains(UnobservableStore obs) const {
+ return set_ != nullptr && set_->Get(obs) != kNotPresent;
+ }
+
+ bool operator==(const UnobservablesSet& other) const {
+ if (IsUnvisited() || other.IsUnvisited()) {
+ return IsEmpty() && other.IsEmpty();
+ } else {
+ // Both pointers guaranteed not to be nullptrs.
+ return *set() == *(other.set());
+ }
+ }
+
+ bool operator!=(const UnobservablesSet& other) const {
+ return !(*this == other);
+ }
+
+ private:
+ UnobservablesSet() = default;
+ explicit UnobservablesSet(const SetT* set) : set_(set) {}
+
+ static SetT* NewSet(Zone* zone) {
+ return new (zone->New(sizeof(UnobservablesSet::SetT)))
+ UnobservablesSet::SetT(zone, kNotPresent);
+ }
+
+ static void SetAdd(SetT* set, const KeyT& key) { set->Set(key, kPresent); }
+ static void SetErase(SetT* set, const KeyT& key) {
+ set->Set(key, kNotPresent);
+ }
+
+ const SetT* set_ = nullptr;
+};
+
+class RedundantStoreFinder final {
+ public:
+ // Note that we Initialize unobservable_ with js_graph->graph->NodeCount()
+ // amount of empty sets.
+ RedundantStoreFinder(JSGraph* js_graph, TickCounter* tick_counter,
+ Zone* temp_zone)
+ : jsgraph_(js_graph),
+ tick_counter_(tick_counter),
+ temp_zone_(temp_zone),
+ revisit_(temp_zone),
+ in_revisit_(js_graph->graph()->NodeCount(), temp_zone),
+ unobservable_(js_graph->graph()->NodeCount(),
+ UnobservablesSet::Unvisited(), temp_zone),
+ to_remove_(temp_zone),
+ unobservables_visited_empty_(
+ UnobservablesSet::VisitedEmpty(temp_zone)) {}
+
+ // Crawls from the end of the graph to the beginning, with the objective of
+ // finding redundant stores.
+ void Find();
+
+ // This method is used for const correctness to go through the final list of
+ // redundant stores that are replaced on the graph.
+ const ZoneSet<Node*>& to_remove_const() { return to_remove_; }
+
+ private:
+ // Assumption: All effectful nodes are reachable from End via a sequence of
+ // control, then a sequence of effect edges.
+ // Visit goes through the control chain, visiting effectful nodes that it
+ // encounters.
+ void Visit(Node* node);
+
+ // Marks effect inputs for visiting, if we are able to update this path of
+ // the graph.
+ void VisitEffectfulNode(Node* node);
+
+ // Compute the intersection of the UnobservablesSets of all effect uses and
+ // return it.
+ // The result UnobservablesSet will never be null.
+ UnobservablesSet RecomputeUseIntersection(Node* node);
+
+ // Recompute unobservables-set for a node. Will also mark superfluous nodes
+ // as to be removed.
+ UnobservablesSet RecomputeSet(Node* node, const UnobservablesSet& uses);
+
+ // Returns true if node's opcode cannot observe StoreFields.
+ static bool CannotObserveStoreField(Node* node);
+
+ void MarkForRevisit(Node* node);
+ bool HasBeenVisited(Node* node);
+
+ // To safely cast an offset from a FieldAccess, which has a potentially
+ // wider range (namely int).
+ StoreOffset ToOffset(const FieldAccess& access) {
+ DCHECK_GE(access.offset, 0);
+ return static_cast<StoreOffset>(access.offset);
+ }
+
+ JSGraph* jsgraph() const { return jsgraph_; }
+ Isolate* isolate() { return jsgraph()->isolate(); }
+ Zone* temp_zone() const { return temp_zone_; }
+ UnobservablesSet& unobservable_for_id(NodeId id) {
+ DCHECK_LT(id, unobservable_.size());
+ return unobservable_[id];
+ }
+ ZoneSet<Node*>& to_remove() { return to_remove_; }
+
+ JSGraph* const jsgraph_;
+ TickCounter* const tick_counter_;
+ Zone* const temp_zone_;
+
+ ZoneStack<Node*> revisit_;
+ ZoneVector<bool> in_revisit_;
+
+ // Maps node IDs to UnobservableNodeSets.
+ ZoneVector<UnobservablesSet> unobservable_;
+ ZoneSet<Node*> to_remove_;
+ const UnobservablesSet unobservables_visited_empty_;
+};
+
+void RedundantStoreFinder::Find() {
Visit(jsgraph()->graph()->end());
while (!revisit_.empty()) {
@@ -65,7 +259,7 @@ void StoreStoreElimination::RedundantStoreFinder::Find() {
#endif
}
-void StoreStoreElimination::RedundantStoreFinder::MarkForRevisit(Node* node) {
+void RedundantStoreFinder::MarkForRevisit(Node* node) {
DCHECK_LT(node->id(), in_revisit_.size());
if (!in_revisit_[node->id()]) {
revisit_.push(node);
@@ -73,32 +267,12 @@ void StoreStoreElimination::RedundantStoreFinder::MarkForRevisit(Node* node) {
}
}
-bool StoreStoreElimination::RedundantStoreFinder::HasBeenVisited(Node* node) {
+bool RedundantStoreFinder::HasBeenVisited(Node* node) {
return !unobservable_for_id(node->id()).IsUnvisited();
}
-void StoreStoreElimination::Run(JSGraph* js_graph, TickCounter* tick_counter,
- Zone* temp_zone) {
- // Find superfluous nodes
- RedundantStoreFinder finder(js_graph, tick_counter, temp_zone);
- finder.Find();
-
- // Remove superfluous nodes
- for (Node* node : finder.to_remove_const()) {
- if (FLAG_trace_store_elimination) {
- PrintF("StoreStoreElimination::Run: Eliminating node #%d:%s\n",
- node->id(), node->op()->mnemonic());
- }
- Node* previous_effect = NodeProperties::GetEffectInput(node);
- NodeProperties::ReplaceUses(node, nullptr, previous_effect, nullptr,
- nullptr);
- node->Kill();
- }
-}
-
-StoreStoreElimination::UnobservablesSet
-StoreStoreElimination::RedundantStoreFinder::RecomputeSet(
- Node* node, const StoreStoreElimination::UnobservablesSet& uses) {
+UnobservablesSet RedundantStoreFinder::RecomputeSet(
+ Node* node, const UnobservablesSet& uses) {
switch (node->op()->opcode()) {
case IrOpcode::kStoreField: {
Node* stored_to = node->InputAt(0);
@@ -150,8 +324,7 @@ StoreStoreElimination::RedundantStoreFinder::RecomputeSet(
UNREACHABLE();
}
-bool StoreStoreElimination::RedundantStoreFinder::CannotObserveStoreField(
- Node* node) {
+bool RedundantStoreFinder::CannotObserveStoreField(Node* node) {
IrOpcode::Value opcode = node->opcode();
return opcode == IrOpcode::kLoadElement || opcode == IrOpcode::kLoad ||
opcode == IrOpcode::kStore || opcode == IrOpcode::kEffectPhi ||
@@ -159,7 +332,7 @@ bool StoreStoreElimination::RedundantStoreFinder::CannotObserveStoreField(
opcode == IrOpcode::kUnsafePointerAdd || opcode == IrOpcode::kRetain;
}
-void StoreStoreElimination::RedundantStoreFinder::Visit(Node* node) {
+void RedundantStoreFinder::Visit(Node* node) {
if (!HasBeenVisited(node)) {
for (int i = 0; i < node->op()->ControlInputCount(); i++) {
Node* control_input = NodeProperties::GetControlInput(node, i);
@@ -180,19 +353,15 @@ void StoreStoreElimination::RedundantStoreFinder::Visit(Node* node) {
}
}
-void StoreStoreElimination::RedundantStoreFinder::VisitEffectfulNode(
- Node* node) {
+void RedundantStoreFinder::VisitEffectfulNode(Node* node) {
if (HasBeenVisited(node)) {
TRACE("- Revisiting: #%d:%s", node->id(), node->op()->mnemonic());
}
- StoreStoreElimination::UnobservablesSet after_set =
- RecomputeUseIntersection(node);
- StoreStoreElimination::UnobservablesSet before_set =
- RecomputeSet(node, after_set);
+ UnobservablesSet after_set = RecomputeUseIntersection(node);
+ UnobservablesSet before_set = RecomputeSet(node, after_set);
DCHECK(!before_set.IsUnvisited());
- StoreStoreElimination::UnobservablesSet stores_for_node =
- unobservable_for_id(node->id());
+ UnobservablesSet stores_for_node = unobservable_for_id(node->id());
bool cur_set_changed =
stores_for_node.IsUnvisited() || stores_for_node != before_set;
if (!cur_set_changed) {
@@ -212,9 +381,7 @@ void StoreStoreElimination::RedundantStoreFinder::VisitEffectfulNode(
}
}
-StoreStoreElimination::UnobservablesSet
-StoreStoreElimination::RedundantStoreFinder::RecomputeUseIntersection(
- Node* node) {
+UnobservablesSet RedundantStoreFinder::RecomputeUseIntersection(Node* node) {
// There were no effect uses. Break early.
if (node->op()->EffectOutputCount() == 0) {
IrOpcode::Value opcode = node->opcode();
@@ -236,8 +403,7 @@ StoreStoreElimination::RedundantStoreFinder::RecomputeUseIntersection(
// {first} == false indicates that cur_set is the intersection of at least one
// thing.
bool first = true;
- StoreStoreElimination::UnobservablesSet cur_set =
- StoreStoreElimination::UnobservablesSet::Unvisited(); // irrelevant
+ UnobservablesSet cur_set = UnobservablesSet::Unvisited(); // irrelevant
for (Edge edge : node->use_edges()) {
if (!NodeProperties::IsEffectEdge(edge)) {
continue;
@@ -245,8 +411,7 @@ StoreStoreElimination::RedundantStoreFinder::RecomputeUseIntersection(
// Intersect with the new use node.
Node* use = edge.from();
- StoreStoreElimination::UnobservablesSet new_set =
- unobservable_for_id(use->id());
+ UnobservablesSet new_set = unobservable_for_id(use->id());
if (first) {
first = false;
cur_set = new_set;
@@ -268,72 +433,70 @@ StoreStoreElimination::RedundantStoreFinder::RecomputeUseIntersection(
return cur_set;
}
-StoreStoreElimination::UnobservablesSet::UnobservablesSet() : set_(nullptr) {}
-
-StoreStoreElimination::UnobservablesSet
-StoreStoreElimination::UnobservablesSet::VisitedEmpty(Zone* zone) {
- ZoneSet<UnobservableStore>* empty_set =
- new (zone->New(sizeof(ZoneSet<UnobservableStore>)))
- ZoneSet<UnobservableStore>(zone);
- return StoreStoreElimination::UnobservablesSet(empty_set);
+UnobservablesSet UnobservablesSet::VisitedEmpty(Zone* zone) {
+ return UnobservablesSet(NewSet(zone));
}
-StoreStoreElimination::UnobservablesSet
-StoreStoreElimination::UnobservablesSet::Intersect(
- const StoreStoreElimination::UnobservablesSet& other,
- const StoreStoreElimination::UnobservablesSet& empty, Zone* zone) const {
- if (IsEmpty() || other.IsEmpty()) {
- return empty;
- } else {
- ZoneSet<UnobservableStore>* intersection =
- new (zone->New(sizeof(ZoneSet<UnobservableStore>)))
- ZoneSet<UnobservableStore>(zone);
- // Put the intersection of set() and other.set() in intersection.
- set_intersection(set()->begin(), set()->end(), other.set()->begin(),
- other.set()->end(),
- std::inserter(*intersection, intersection->end()));
-
- return StoreStoreElimination::UnobservablesSet(intersection);
+UnobservablesSet UnobservablesSet::Intersect(const UnobservablesSet& other,
+ const UnobservablesSet& empty,
+ Zone* zone) const {
+ if (IsEmpty() || other.IsEmpty()) return empty;
+
+ UnobservablesSet::SetT* intersection = NewSet(zone);
+ for (const auto& triple : set()->Zip(*other.set())) {
+ if (std::get<1>(triple) && std::get<2>(triple)) {
+ intersection->Set(std::get<0>(triple), kPresent);
+ }
}
+
+ return UnobservablesSet(intersection);
}
-StoreStoreElimination::UnobservablesSet
-StoreStoreElimination::UnobservablesSet::Add(UnobservableStore obs,
- Zone* zone) const {
- bool found = set()->find(obs) != set()->end();
- if (found) {
- return *this;
- } else {
- // Make a new empty set.
- ZoneSet<UnobservableStore>* new_set =
- new (zone->New(sizeof(ZoneSet<UnobservableStore>)))
- ZoneSet<UnobservableStore>(zone);
- // Copy the old elements over.
- *new_set = *set();
- // Add the new element.
- bool inserted = new_set->insert(obs).second;
- DCHECK(inserted);
- USE(inserted); // silence warning about unused variable
-
- return StoreStoreElimination::UnobservablesSet(new_set);
+UnobservablesSet UnobservablesSet::Add(UnobservableStore obs,
+ Zone* zone) const {
+ if (set()->Get(obs) != kNotPresent) return *this;
+
+ UnobservablesSet::SetT* new_set = NewSet(zone);
+ *new_set = *set();
+ SetAdd(new_set, obs);
+
+ return UnobservablesSet(new_set);
+}
+
+UnobservablesSet UnobservablesSet::RemoveSameOffset(StoreOffset offset,
+ Zone* zone) const {
+ UnobservablesSet::SetT* new_set = NewSet(zone);
+ *new_set = *set();
+
+ // Remove elements with the given offset.
+ for (const auto& entry : *new_set) {
+ const UnobservableStore& obs = entry.first;
+ if (obs.offset_ == offset) SetErase(new_set, obs);
}
+
+ return UnobservablesSet(new_set);
}
-StoreStoreElimination::UnobservablesSet
-StoreStoreElimination::UnobservablesSet::RemoveSameOffset(StoreOffset offset,
- Zone* zone) const {
- // Make a new empty set.
- ZoneSet<UnobservableStore>* new_set =
- new (zone->New(sizeof(ZoneSet<UnobservableStore>)))
- ZoneSet<UnobservableStore>(zone);
- // Copy all elements over that have a different offset.
- for (auto obs : *set()) {
- if (obs.offset_ != offset) {
- new_set->insert(obs);
+} // namespace
+
+// static
+void StoreStoreElimination::Run(JSGraph* js_graph, TickCounter* tick_counter,
+ Zone* temp_zone) {
+ // Find superfluous nodes
+ RedundantStoreFinder finder(js_graph, tick_counter, temp_zone);
+ finder.Find();
+
+ // Remove superfluous nodes
+ for (Node* node : finder.to_remove_const()) {
+ if (FLAG_trace_store_elimination) {
+ PrintF("StoreStoreElimination::Run: Eliminating node #%d:%s\n",
+ node->id(), node->op()->mnemonic());
}
+ Node* previous_effect = NodeProperties::GetEffectInput(node);
+ NodeProperties::ReplaceUses(node, nullptr, previous_effect, nullptr,
+ nullptr);
+ node->Kill();
}
-
- return StoreStoreElimination::UnobservablesSet(new_set);
}
#undef TRACE
diff --git a/deps/v8/src/compiler/store-store-elimination.h b/deps/v8/src/compiler/store-store-elimination.h
index 7704938fc0..0813adb1f0 100644
--- a/deps/v8/src/compiler/store-store-elimination.h
+++ b/deps/v8/src/compiler/store-store-elimination.h
@@ -5,18 +5,18 @@
#ifndef V8_COMPILER_STORE_STORE_ELIMINATION_H_
#define V8_COMPILER_STORE_STORE_ELIMINATION_H_
-#include "src/compiler/common-operator.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/simplified-operator.h"
-#include "src/zone/zone-containers.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
class TickCounter;
+class Zone;
namespace compiler {
+class JSGraph;
+
// Store-store elimination.
//
// The aim of this optimization is to detect the following pattern in the
@@ -44,176 +44,10 @@ namespace compiler {
//
// This implementation needs all dead nodes removed from the graph, and the
// graph should be trimmed.
-class StoreStoreElimination final {
+class StoreStoreElimination final : public AllStatic {
public:
static void Run(JSGraph* js_graph, TickCounter* tick_counter,
Zone* temp_zone);
-
- private:
- using StoreOffset = uint32_t;
-
- struct UnobservableStore {
- NodeId id_;
- StoreOffset offset_;
-
- bool operator==(const UnobservableStore other) const {
- return (id_ == other.id_) && (offset_ == other.offset_);
- }
-
- bool operator<(const UnobservableStore other) const {
- return (id_ < other.id_) || (id_ == other.id_ && offset_ < other.offset_);
- }
- };
-
- // Instances of UnobservablesSet are immutable. They represent either a set of
- // UnobservableStores, or the "unvisited empty set".
- //
- // We apply some sharing to save memory. The class UnobservablesSet is only a
- // pointer wide, and a copy does not use any heap (or temp_zone) memory. Most
- // changes to an UnobservablesSet might allocate in the temp_zone.
- //
- // The size of an instance should be the size of a pointer, plus additional
- // space in the zone in the case of non-unvisited UnobservablesSets. Copying
- // an UnobservablesSet allocates no memory.
- class UnobservablesSet final {
- public:
- // Creates a new UnobservablesSet, with the null set.
- static UnobservablesSet Unvisited() { return UnobservablesSet(); }
-
- // Create a new empty UnobservablesSet. This allocates in the zone, and
- // can probably be optimized to use a global singleton.
- static UnobservablesSet VisitedEmpty(Zone* zone);
- UnobservablesSet(const UnobservablesSet& other) V8_NOEXCEPT = default;
-
- // Computes the intersection of two UnobservablesSets. If one of the sets is
- // empty, will return empty.
- UnobservablesSet Intersect(const UnobservablesSet& other,
- const UnobservablesSet& empty, Zone* zone) const;
-
- // Returns a set that it is the current one, plus the observation obs passed
- // as parameter. If said obs it's already in the set, we don't have to
- // create a new one.
- UnobservablesSet Add(UnobservableStore obs, Zone* zone) const;
-
- // Returns a set that it is the current one, except for all of the
- // observations with offset off. This is done by creating a new set and
- // copying all observations with different offsets.
- // This can probably be done better if the observations are stored first by
- // offset and then by node.
- // We are removing all nodes with offset off since different nodes may
- // alias one another, and we currently we don't have the means to know if
- // two nodes are definitely the same value.
- UnobservablesSet RemoveSameOffset(StoreOffset off, Zone* zone) const;
-
- const ZoneSet<UnobservableStore>* set() const { return set_; }
-
- bool IsUnvisited() const { return set_ == nullptr; }
- bool IsEmpty() const { return set_ == nullptr || set_->empty(); }
- bool Contains(UnobservableStore obs) const {
- return set_ != nullptr && (set_->find(obs) != set_->end());
- }
-
- bool operator==(const UnobservablesSet& other) const {
- if (IsUnvisited() || other.IsUnvisited()) {
- return IsEmpty() && other.IsEmpty();
- } else {
- // Both pointers guaranteed not to be nullptrs.
- return *set() == *(other.set());
- }
- }
-
- bool operator!=(const UnobservablesSet& other) const {
- return !(*this == other);
- }
-
- private:
- UnobservablesSet();
- explicit UnobservablesSet(const ZoneSet<UnobservableStore>* set)
- : set_(set) {}
- const ZoneSet<UnobservableStore>* set_;
- };
-
- class RedundantStoreFinder final {
- public:
- // Note that we Initialize unobservable_ with js_graph->graph->NodeCount()
- // amount of empty sets.
- RedundantStoreFinder(JSGraph* js_graph, TickCounter* tick_counter,
- Zone* temp_zone)
- : jsgraph_(js_graph),
- tick_counter_(tick_counter),
- temp_zone_(temp_zone),
- revisit_(temp_zone),
- in_revisit_(js_graph->graph()->NodeCount(), temp_zone),
- unobservable_(js_graph->graph()->NodeCount(),
- StoreStoreElimination::UnobservablesSet::Unvisited(),
- temp_zone),
- to_remove_(temp_zone),
- unobservables_visited_empty_(
- StoreStoreElimination::UnobservablesSet::VisitedEmpty(
- temp_zone)) {}
-
- // Crawls from the end of the graph to the beginning, with the objective of
- // finding redundant stores.
- void Find();
-
- // This method is used for const correctness to go through the final list of
- // redundant stores that are replaced on the graph.
- const ZoneSet<Node*>& to_remove_const() { return to_remove_; }
-
- private:
- // Assumption: All effectful nodes are reachable from End via a sequence of
- // control, then a sequence of effect edges.
- // Visit goes through the control chain, visiting effectful nodes that it
- // encounters.
- void Visit(Node* node);
-
- // Marks effect inputs for visiting, if we are able to update this path of
- // the graph.
- void VisitEffectfulNode(Node* node);
-
- // Compute the intersection of the UnobservablesSets of all effect uses and
- // return it.
- // The result UnobservablesSet will never be null.
- UnobservablesSet RecomputeUseIntersection(Node* node);
-
- // Recompute unobservables-set for a node. Will also mark superfluous nodes
- // as to be removed.
- UnobservablesSet RecomputeSet(Node* node, const UnobservablesSet& uses);
-
- // Returns true if node's opcode cannot observe StoreFields.
- static bool CannotObserveStoreField(Node* node);
-
- void MarkForRevisit(Node* node);
- bool HasBeenVisited(Node* node);
-
- // To safely cast an offset from a FieldAccess, which has a potentially
- // wider range (namely int).
- StoreOffset ToOffset(const FieldAccess& access) {
- DCHECK_GE(access.offset, 0);
- return static_cast<StoreOffset>(access.offset);
- }
-
- JSGraph* jsgraph() const { return jsgraph_; }
- Isolate* isolate() { return jsgraph()->isolate(); }
- Zone* temp_zone() const { return temp_zone_; }
- UnobservablesSet& unobservable_for_id(NodeId id) {
- DCHECK_LT(id, unobservable_.size());
- return unobservable_[id];
- }
- ZoneSet<Node*>& to_remove() { return to_remove_; }
-
- JSGraph* const jsgraph_;
- TickCounter* const tick_counter_;
- Zone* const temp_zone_;
-
- ZoneStack<Node*> revisit_;
- ZoneVector<bool> in_revisit_;
-
- // Maps node IDs to UnobservableNodeSets.
- ZoneVector<UnobservablesSet> unobservable_;
- ZoneSet<Node*> to_remove_;
- const UnobservablesSet unobservables_visited_empty_;
- };
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 6ba1b39431..e5ee0aa733 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -1339,6 +1339,10 @@ Type Typer::Visitor::TypeJSCreateLiteralRegExp(Node* node) {
return Type::OtherObject();
}
+Type Typer::Visitor::TypeJSGetTemplateObject(Node* node) {
+ return Type::Array();
+}
+
Type Typer::Visitor::TypeJSLoadProperty(Node* node) {
return Type::NonInternal();
}
@@ -2192,10 +2196,16 @@ Type Typer::Visitor::TypeLoadField(Node* node) {
return FieldAccessOf(node->op()).type;
}
+Type Typer::Visitor::TypeLoadMessage(Node* node) { return Type::Any(); }
+
Type Typer::Visitor::TypeLoadElement(Node* node) {
return ElementAccessOf(node->op()).type;
}
+Type Typer::Visitor::TypeLoadStackArgument(Node* node) {
+ return Type::NonInternal();
+}
+
Type Typer::Visitor::TypeLoadFromObject(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeLoadTypedElement(Node* node) {
@@ -2222,6 +2232,8 @@ Type Typer::Visitor::TypeLoadDataViewElement(Node* node) {
Type Typer::Visitor::TypeStoreField(Node* node) { UNREACHABLE(); }
+Type Typer::Visitor::TypeStoreMessage(Node* node) { UNREACHABLE(); }
+
Type Typer::Visitor::TypeStoreElement(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeStoreToObject(Node* node) { UNREACHABLE(); }
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index 018c54c3d5..caa086bbd3 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -183,7 +183,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case HEAP_NUMBER_TYPE:
return kNumber;
case JS_OBJECT_TYPE:
- case JS_ARGUMENTS_TYPE:
+ case JS_ARGUMENTS_OBJECT_TYPE:
case JS_ERROR_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
case JS_GLOBAL_PROXY_TYPE:
@@ -207,16 +207,16 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case JS_MESSAGE_OBJECT_TYPE:
case JS_DATE_TYPE:
#ifdef V8_INTL_SUPPORT
- case JS_INTL_V8_BREAK_ITERATOR_TYPE:
- case JS_INTL_COLLATOR_TYPE:
- case JS_INTL_DATE_TIME_FORMAT_TYPE:
- case JS_INTL_LIST_FORMAT_TYPE:
- case JS_INTL_LOCALE_TYPE:
- case JS_INTL_NUMBER_FORMAT_TYPE:
- case JS_INTL_PLURAL_RULES_TYPE:
- case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
- case JS_INTL_SEGMENT_ITERATOR_TYPE:
- case JS_INTL_SEGMENTER_TYPE:
+ case JS_V8_BREAK_ITERATOR_TYPE:
+ case JS_COLLATOR_TYPE:
+ case JS_DATE_TIME_FORMAT_TYPE:
+ case JS_LIST_FORMAT_TYPE:
+ case JS_LOCALE_TYPE:
+ case JS_NUMBER_FORMAT_TYPE:
+ case JS_PLURAL_RULES_TYPE:
+ case JS_RELATIVE_TIME_FORMAT_TYPE:
+ case JS_SEGMENT_ITERATOR_TYPE:
+ case JS_SEGMENTER_TYPE:
#endif // V8_INTL_SUPPORT
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
@@ -225,8 +225,8 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case JS_MODULE_NAMESPACE_TYPE:
case JS_ARRAY_BUFFER_TYPE:
case JS_ARRAY_ITERATOR_TYPE:
- case JS_REGEXP_TYPE: // TODO(rossberg): there should be a RegExp type.
- case JS_REGEXP_STRING_ITERATOR_TYPE:
+ case JS_REG_EXP_TYPE:
+ case JS_REG_EXP_STRING_ITERATOR_TYPE:
case JS_TYPED_ARRAY_TYPE:
case JS_DATA_VIEW_TYPE:
case JS_SET_TYPE:
@@ -244,12 +244,12 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case JS_WEAK_REF_TYPE:
case JS_WEAK_SET_TYPE:
case JS_PROMISE_TYPE:
- case WASM_EXCEPTION_TYPE:
- case WASM_GLOBAL_TYPE:
- case WASM_INSTANCE_TYPE:
- case WASM_MEMORY_TYPE:
- case WASM_MODULE_TYPE:
- case WASM_TABLE_TYPE:
+ case WASM_EXCEPTION_OBJECT_TYPE:
+ case WASM_GLOBAL_OBJECT_TYPE:
+ case WASM_INSTANCE_OBJECT_TYPE:
+ case WASM_MEMORY_OBJECT_TYPE:
+ case WASM_MODULE_OBJECT_TYPE:
+ case WASM_TABLE_OBJECT_TYPE:
case WEAK_CELL_TYPE:
DCHECK(!map.is_callable());
DCHECK(!map.is_undetectable());
@@ -365,7 +365,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case PROMISE_REJECT_REACTION_JOB_TASK_TYPE:
case PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE:
#define MAKE_TORQUE_CLASS_TYPE(V) case V:
- TORQUE_DEFINED_INSTANCE_TYPES(MAKE_TORQUE_CLASS_TYPE)
+ TORQUE_INTERNAL_INSTANCE_TYPES(MAKE_TORQUE_CLASS_TYPE)
#undef MAKE_TORQUE_CLASS_TYPE
UNREACHABLE();
}
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 608d6ffee6..d7fdd4269e 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -732,6 +732,10 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// Type is OtherObject.
CheckTypeIs(node, Type::OtherObject());
break;
+ case IrOpcode::kJSGetTemplateObject:
+ // Type is Array
+ CheckTypeIs(node, Type::Array());
+ break;
case IrOpcode::kJSLoadProperty:
// Type can be anything.
CheckTypeIs(node, Type::Any());
@@ -1594,12 +1598,14 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckTypeIs(node, Type::NonInternal());
break;
case IrOpcode::kLoadField:
+ case IrOpcode::kLoadMessage:
// Object -> fieldtype
// TODO(rossberg): activate once machine ops are typed.
// CheckValueInputIs(node, 0, Type::Object());
// CheckTypeIs(node, FieldAccessOf(node->op()).type));
break;
case IrOpcode::kLoadElement:
+ case IrOpcode::kLoadStackArgument:
// Object -> elementtype
// TODO(rossberg): activate once machine ops are typed.
// CheckValueInputIs(node, 0, Type::Object());
@@ -1613,6 +1619,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kLoadDataViewElement:
break;
case IrOpcode::kStoreField:
+ case IrOpcode::kStoreMessage:
// (Object, fieldtype) -> _|_
// TODO(rossberg): activate once machine ops are typed.
// CheckValueInputIs(node, 0, Type::Object());
@@ -1700,6 +1707,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kWord64Ctz:
case IrOpcode::kWord64ReverseBits:
case IrOpcode::kWord64ReverseBytes:
+ case IrOpcode::kSimd128ReverseBytes:
case IrOpcode::kInt64AbsWithOverflow:
case IrOpcode::kWord64Equal:
case IrOpcode::kInt32Add:
@@ -1801,7 +1809,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kBitcastInt32ToFloat32:
case IrOpcode::kBitcastInt64ToFloat64:
case IrOpcode::kBitcastTaggedToWord:
- case IrOpcode::kBitcastTaggedSignedToWord:
+ case IrOpcode::kBitcastTaggedToWordForTagAndSmiBits:
case IrOpcode::kBitcastWordToTagged:
case IrOpcode::kBitcastWordToTaggedSigned:
case IrOpcode::kBitcastWord32ToCompressedSigned:
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index 28f9943e59..ddc97ce503 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -176,8 +176,6 @@ WasmGraphBuilder::WasmGraphBuilder(
: zone_(zone),
mcgraph_(mcgraph),
env_(env),
- cur_buffer_(def_buffer_),
- cur_bufsize_(kDefaultBufferSize),
has_simd_(ContainsSimd(sig)),
untrusted_code_mitigations_(FLAG_untrusted_code_mitigations),
sig_(sig),
@@ -255,24 +253,19 @@ Node* WasmGraphBuilder::Merge(unsigned count, Node** controls) {
return graph()->NewNode(mcgraph()->common()->Merge(count), count, controls);
}
-Node* WasmGraphBuilder::Phi(wasm::ValueType type, unsigned count, Node** vals,
- Node* control) {
- DCHECK(IrOpcode::IsMergeOpcode(control->opcode()));
- Vector<Node*> buf = Realloc(vals, count, count + 1);
- buf[count] = control;
+Node* WasmGraphBuilder::Phi(wasm::ValueType type, unsigned count,
+ Node** vals_and_control) {
+ DCHECK(IrOpcode::IsMergeOpcode(vals_and_control[count]->opcode()));
return graph()->NewNode(
mcgraph()->common()->Phi(wasm::ValueTypes::MachineRepresentationFor(type),
count),
- count + 1, buf.begin());
+ count + 1, vals_and_control);
}
-Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects,
- Node* control) {
- DCHECK(IrOpcode::IsMergeOpcode(control->opcode()));
- Vector<Node*> buf = Realloc(effects, count, count + 1);
- buf[count] = control;
+Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects_and_control) {
+ DCHECK(IrOpcode::IsMergeOpcode(effects_and_control[count]->opcode()));
return graph()->NewNode(mcgraph()->common()->EffectPhi(count), count + 1,
- buf.begin());
+ effects_and_control);
}
Node* WasmGraphBuilder::RefNull() {
@@ -1114,6 +1107,10 @@ Node* WasmGraphBuilder::ZeroCheck64(wasm::TrapReason reason, Node* node,
}
Node* WasmGraphBuilder::Switch(unsigned count, Node* key) {
+ // The instruction selector will use {kArchTableSwitch} for large switches,
+ // which has limited input count, see {InstructionSelector::EmitTableSwitch}.
+ DCHECK_LE(count, Instruction::kMaxInputCount - 2); // value_range + 2
+ DCHECK_LE(count, wasm::kV8MaxWasmFunctionBrTableSize + 1); // plus IfDefault
return graph()->NewNode(mcgraph()->common()->Switch(count), key, Control());
}
@@ -1266,27 +1263,9 @@ Node* WasmGraphBuilder::BuildChangeEndiannessStore(
case 8:
result = graph()->NewNode(m->Word64ReverseBytes(), value);
break;
- case 16: {
- Node* byte_reversed_lanes[4];
- for (int lane = 0; lane < 4; lane++) {
- byte_reversed_lanes[lane] = graph()->NewNode(
- m->Word32ReverseBytes(),
- graph()->NewNode(mcgraph()->machine()->I32x4ExtractLane(lane),
- value));
- }
-
- // This is making a copy of the value.
- result =
- graph()->NewNode(mcgraph()->machine()->S128And(), value, value);
-
- for (int lane = 0; lane < 4; lane++) {
- result =
- graph()->NewNode(mcgraph()->machine()->I32x4ReplaceLane(3 - lane),
- result, byte_reversed_lanes[lane]);
- }
-
+ case 16:
+ result = graph()->NewNode(m->Simd128ReverseBytes(), value);
break;
- }
default:
UNREACHABLE();
break;
@@ -1405,27 +1384,9 @@ Node* WasmGraphBuilder::BuildChangeEndiannessLoad(Node* node,
case 8:
result = graph()->NewNode(m->Word64ReverseBytes(), value);
break;
- case 16: {
- Node* byte_reversed_lanes[4];
- for (int lane = 0; lane < 4; lane++) {
- byte_reversed_lanes[lane] = graph()->NewNode(
- m->Word32ReverseBytes(),
- graph()->NewNode(mcgraph()->machine()->I32x4ExtractLane(lane),
- value));
- }
-
- // This is making a copy of the value.
- result =
- graph()->NewNode(mcgraph()->machine()->S128And(), value, value);
-
- for (int lane = 0; lane < 4; lane++) {
- result =
- graph()->NewNode(mcgraph()->machine()->I32x4ReplaceLane(3 - lane),
- result, byte_reversed_lanes[lane]);
- }
-
+ case 16:
+ result = graph()->NewNode(m->Simd128ReverseBytes(), value);
break;
- }
default:
UNREACHABLE();
}
@@ -2295,13 +2256,14 @@ Node* WasmGraphBuilder::GetExceptionTag(Node* except_obj) {
return BuildCallToRuntime(Runtime::kWasmExceptionGetTag, &except_obj, 1);
}
-Vector<Node*> WasmGraphBuilder::GetExceptionValues(
- Node* except_obj, const wasm::WasmException* exception) {
+Node* WasmGraphBuilder::GetExceptionValues(Node* except_obj,
+ const wasm::WasmException* exception,
+ Vector<Node*> values) {
Node* values_array =
BuildCallToRuntime(Runtime::kWasmExceptionGetValues, &except_obj, 1);
uint32_t index = 0;
const wasm::WasmExceptionSig* sig = exception->sig;
- Vector<Node*> values = Buffer(sig->parameter_count());
+ DCHECK_EQ(sig->parameter_count(), values.size());
for (size_t i = 0; i < sig->parameter_count(); ++i) {
Node* value;
switch (sig->GetParam(i)) {
@@ -2347,7 +2309,7 @@ Vector<Node*> WasmGraphBuilder::GetExceptionValues(
values[i] = value;
}
DCHECK_EQ(index, WasmExceptionPackage::GetEncodedSize(exception));
- return values;
+ return values_array;
}
Node* WasmGraphBuilder::BuildI32DivS(Node* left, Node* right,
@@ -2682,7 +2644,8 @@ Node* WasmGraphBuilder::BuildCCall(MachineSignature* sig, Node* function,
return SetEffect(graph()->NewNode(op, arraysize(call_args), call_args));
}
-Node* WasmGraphBuilder::BuildCallNode(wasm::FunctionSig* sig, Node** args,
+Node* WasmGraphBuilder::BuildCallNode(wasm::FunctionSig* sig,
+ Vector<Node*> args,
wasm::WasmCodePosition position,
Node* instance_node, const Operator* op) {
if (instance_node == nullptr) {
@@ -2695,25 +2658,28 @@ Node* WasmGraphBuilder::BuildCallNode(wasm::FunctionSig* sig, Node** args,
const size_t count = 1 + params + extra;
// Reallocate the buffer to make space for extra inputs.
- args = Realloc(args, 1 + params, count).begin();
+ base::SmallVector<Node*, 16 + extra> inputs(count);
+ DCHECK_EQ(1 + params, args.size());
// Make room for the instance_node parameter at index 1, just after code.
- memmove(&args[2], &args[1], params * sizeof(Node*));
- args[1] = instance_node;
+ inputs[0] = args[0]; // code
+ inputs[1] = instance_node;
+ if (params > 0) memcpy(&inputs[2], &args[1], params * sizeof(Node*));
// Add effect and control inputs.
- args[params + 2] = Effect();
- args[params + 3] = Control();
+ inputs[params + 2] = Effect();
+ inputs[params + 3] = Control();
- Node* call = SetEffect(graph()->NewNode(op, static_cast<int>(count), args));
+ Node* call =
+ SetEffect(graph()->NewNode(op, static_cast<int>(count), inputs.begin()));
DCHECK(position == wasm::kNoCodePosition || position > 0);
if (position > 0) SetSourcePosition(call, position);
return call;
}
-Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args,
- Node*** rets,
+Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig,
+ Vector<Node*> args, Vector<Node*> rets,
wasm::WasmCodePosition position,
Node* instance_node,
UseRetpoline use_retpoline) {
@@ -2725,21 +2691,22 @@ Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args,
size_t ret_count = sig->return_count();
if (ret_count == 0) return call; // No return value.
- *rets = Buffer(ret_count).begin();
+ DCHECK_EQ(ret_count, rets.size());
if (ret_count == 1) {
// Only a single return value.
- (*rets)[0] = call;
+ rets[0] = call;
} else {
// Create projections for all return values.
for (size_t i = 0; i < ret_count; i++) {
- (*rets)[i] = graph()->NewNode(mcgraph()->common()->Projection(i), call,
- graph()->start());
+ rets[i] = graph()->NewNode(mcgraph()->common()->Projection(i), call,
+ graph()->start());
}
}
return call;
}
-Node* WasmGraphBuilder::BuildWasmReturnCall(wasm::FunctionSig* sig, Node** args,
+Node* WasmGraphBuilder::BuildWasmReturnCall(wasm::FunctionSig* sig,
+ Vector<Node*> args,
wasm::WasmCodePosition position,
Node* instance_node,
UseRetpoline use_retpoline) {
@@ -2753,8 +2720,8 @@ Node* WasmGraphBuilder::BuildWasmReturnCall(wasm::FunctionSig* sig, Node** args,
return call;
}
-Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig, Node** args,
- Node*** rets,
+Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig,
+ Vector<Node*> args, Vector<Node*> rets,
wasm::WasmCodePosition position,
int func_index,
IsReturnCall continuation) {
@@ -2779,13 +2746,13 @@ Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig, Node** args,
case kCallContinues:
return BuildWasmCall(sig, args, rets, position, ref_node, use_retpoline);
case kReturnCall:
- DCHECK_NULL(rets);
+ DCHECK(rets.empty());
return BuildWasmReturnCall(sig, args, position, ref_node, use_retpoline);
}
}
-Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig, Node** args,
- Node*** rets,
+Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig,
+ Vector<Node*> args, Vector<Node*> rets,
wasm::WasmCodePosition position,
Node* func_index,
IsReturnCall continuation) {
@@ -2829,12 +2796,13 @@ Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig, Node** args,
case kCallContinues:
return BuildWasmCall(sig, args, rets, position, ref_node, use_retpoline);
case kReturnCall:
- DCHECK_NULL(rets);
+ DCHECK(rets.empty());
return BuildWasmReturnCall(sig, args, position, ref_node, use_retpoline);
}
}
-Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args, Node*** rets,
+Node* WasmGraphBuilder::CallDirect(uint32_t index, Vector<Node*> args,
+ Vector<Node*> rets,
wasm::WasmCodePosition position) {
DCHECK_NULL(args[0]);
wasm::FunctionSig* sig = env_->module->functions[index].sig;
@@ -2853,7 +2821,7 @@ Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args, Node*** rets,
}
Node* WasmGraphBuilder::CallIndirect(uint32_t table_index, uint32_t sig_index,
- Node** args, Node*** rets,
+ Vector<Node*> args, Vector<Node*> rets,
wasm::WasmCodePosition position) {
return BuildIndirectCall(table_index, sig_index, args, rets, position,
kCallContinues);
@@ -2902,8 +2870,9 @@ void WasmGraphBuilder::LoadIndirectFunctionTable(uint32_t table_index,
}
Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
- uint32_t sig_index, Node** args,
- Node*** rets,
+ uint32_t sig_index,
+ Vector<Node*> args,
+ Vector<Node*> rets,
wasm::WasmCodePosition position,
IsReturnCall continuation) {
DCHECK_NOT_NULL(args[0]);
@@ -2993,14 +2962,14 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
}
}
-Node* WasmGraphBuilder::ReturnCall(uint32_t index, Node** args,
+Node* WasmGraphBuilder::ReturnCall(uint32_t index, Vector<Node*> args,
wasm::WasmCodePosition position) {
DCHECK_NULL(args[0]);
wasm::FunctionSig* sig = env_->module->functions[index].sig;
if (env_ && index < env_->module->num_imported_functions) {
// Return Call to an imported function.
- return BuildImportCall(sig, args, nullptr, position, index, kReturnCall);
+ return BuildImportCall(sig, args, {}, position, index, kReturnCall);
}
// A direct tail call to a wasm function defined in this module.
@@ -3013,9 +2982,10 @@ Node* WasmGraphBuilder::ReturnCall(uint32_t index, Node** args,
}
Node* WasmGraphBuilder::ReturnCallIndirect(uint32_t table_index,
- uint32_t sig_index, Node** args,
+ uint32_t sig_index,
+ Vector<Node*> args,
wasm::WasmCodePosition position) {
- return BuildIndirectCall(table_index, sig_index, args, nullptr, position,
+ return BuildIndirectCall(table_index, sig_index, args, {}, position,
kReturnCall);
}
@@ -3062,6 +3032,14 @@ bool CanCover(Node* value, IrOpcode::Value opcode) {
return true;
}
+Node* WasmGraphBuilder::BuildTruncateIntPtrToInt32(Node* value) {
+ if (mcgraph()->machine()->Is64()) {
+ value =
+ graph()->NewNode(mcgraph()->machine()->TruncateInt64ToInt32(), value);
+ }
+ return value;
+}
+
Node* WasmGraphBuilder::BuildChangeInt32ToIntPtr(Node* value) {
if (mcgraph()->machine()->Is64()) {
value = graph()->NewNode(mcgraph()->machine()->ChangeInt32ToInt64(), value);
@@ -3070,12 +3048,20 @@ Node* WasmGraphBuilder::BuildChangeInt32ToIntPtr(Node* value) {
}
Node* WasmGraphBuilder::BuildChangeInt32ToSmi(Node* value) {
+ if (COMPRESS_POINTERS_BOOL) {
+ return graph()->NewNode(mcgraph()->machine()->Word32Shl(), value,
+ BuildSmiShiftBitsConstant32());
+ }
value = BuildChangeInt32ToIntPtr(value);
return graph()->NewNode(mcgraph()->machine()->WordShl(), value,
BuildSmiShiftBitsConstant());
}
Node* WasmGraphBuilder::BuildChangeUint31ToSmi(Node* value) {
+ if (COMPRESS_POINTERS_BOOL) {
+ return graph()->NewNode(mcgraph()->machine()->Word32Shl(), value,
+ BuildSmiShiftBitsConstant32());
+ }
return graph()->NewNode(mcgraph()->machine()->WordShl(),
Uint32ToUintptr(value), BuildSmiShiftBitsConstant());
}
@@ -3084,16 +3070,32 @@ Node* WasmGraphBuilder::BuildSmiShiftBitsConstant() {
return mcgraph()->IntPtrConstant(kSmiShiftSize + kSmiTagSize);
}
+Node* WasmGraphBuilder::BuildSmiShiftBitsConstant32() {
+ return mcgraph()->Int32Constant(kSmiShiftSize + kSmiTagSize);
+}
+
Node* WasmGraphBuilder::BuildChangeSmiToInt32(Node* value) {
- value = graph()->NewNode(mcgraph()->machine()->WordSar(), value,
- BuildSmiShiftBitsConstant());
- if (mcgraph()->machine()->Is64()) {
+ if (COMPRESS_POINTERS_BOOL) {
value =
graph()->NewNode(mcgraph()->machine()->TruncateInt64ToInt32(), value);
+ value = graph()->NewNode(mcgraph()->machine()->Word32Sar(), value,
+ BuildSmiShiftBitsConstant32());
+ } else {
+ value = BuildChangeSmiToIntPtr(value);
+ value = BuildTruncateIntPtrToInt32(value);
}
return value;
}
+Node* WasmGraphBuilder::BuildChangeSmiToIntPtr(Node* value) {
+ if (COMPRESS_POINTERS_BOOL) {
+ value = BuildChangeSmiToInt32(value);
+ return BuildChangeInt32ToIntPtr(value);
+ }
+ return graph()->NewNode(mcgraph()->machine()->WordSar(), value,
+ BuildSmiShiftBitsConstant());
+}
+
Node* WasmGraphBuilder::BuildConvertUint32ToSmiWithSaturation(Node* value,
uint32_t maxval) {
DCHECK(Smi::IsValid(maxval));
@@ -3181,14 +3183,16 @@ Node* WasmGraphBuilder::CreateOrMergeIntoPhi(MachineRepresentation rep,
if (IsPhiWithMerge(tnode, merge)) {
AppendToPhi(tnode, fnode);
} else if (tnode != fnode) {
+ // Note that it is not safe to use {Buffer} here since this method is used
+ // via {CheckForException} while the {Buffer} is in use by another method.
uint32_t count = merge->InputCount();
// + 1 for the merge node.
- Vector<Node*> vals = Buffer(count + 1);
- for (uint32_t j = 0; j < count - 1; j++) vals[j] = tnode;
- vals[count - 1] = fnode;
- vals[count] = merge;
- return graph()->NewNode(mcgraph()->common()->Phi(rep, count), count + 1,
- vals.begin());
+ base::SmallVector<Node*, 9> inputs(count + 1);
+ for (uint32_t j = 0; j < count - 1; j++) inputs[j] = tnode;
+ inputs[count - 1] = fnode;
+ inputs[count] = merge;
+ tnode = graph()->NewNode(mcgraph()->common()->Phi(rep, count), count + 1,
+ inputs.begin());
}
return tnode;
}
@@ -3198,13 +3202,18 @@ Node* WasmGraphBuilder::CreateOrMergeIntoEffectPhi(Node* merge, Node* tnode,
if (IsPhiWithMerge(tnode, merge)) {
AppendToPhi(tnode, fnode);
} else if (tnode != fnode) {
+ // Note that it is not safe to use {Buffer} here since this method is used
+ // via {CheckForException} while the {Buffer} is in use by another method.
uint32_t count = merge->InputCount();
- Vector<Node*> effects = Buffer(count);
+ // + 1 for the merge node.
+ base::SmallVector<Node*, 9> inputs(count + 1);
for (uint32_t j = 0; j < count - 1; j++) {
- effects[j] = tnode;
+ inputs[j] = tnode;
}
- effects[count - 1] = fnode;
- tnode = EffectPhi(count, effects.begin(), merge);
+ inputs[count - 1] = fnode;
+ inputs[count] = merge;
+ tnode = graph()->NewNode(mcgraph()->common()->EffectPhi(count), count + 1,
+ inputs.begin());
}
return tnode;
}
@@ -3310,10 +3319,7 @@ Node* WasmGraphBuilder::CurrentMemoryPages() {
Node* result =
graph()->NewNode(mcgraph()->machine()->WordShr(), mem_size,
mcgraph()->Int32Constant(wasm::kWasmPageSizeLog2));
- if (mcgraph()->machine()->Is64()) {
- result =
- graph()->NewNode(mcgraph()->machine()->TruncateInt64ToInt32(), result);
- }
+ result = BuildTruncateIntPtrToInt32(result);
return result;
}
@@ -3365,7 +3371,7 @@ Node* WasmGraphBuilder::BuildCallToRuntime(Runtime::FunctionId f,
parameter_count, effect_, Control());
}
-Node* WasmGraphBuilder::GetGlobal(uint32_t index) {
+Node* WasmGraphBuilder::GlobalGet(uint32_t index) {
const wasm::WasmGlobal& global = env_->module->globals[index];
if (wasm::ValueTypes::IsReferenceType(global.type)) {
if (global.mutability && global.imported) {
@@ -3395,7 +3401,7 @@ Node* WasmGraphBuilder::GetGlobal(uint32_t index) {
return result;
}
-Node* WasmGraphBuilder::SetGlobal(uint32_t index, Node* val) {
+Node* WasmGraphBuilder::GlobalSet(uint32_t index, Node* val) {
const wasm::WasmGlobal& global = env_->module->globals[index];
if (wasm::ValueTypes::IsReferenceType(global.type)) {
if (global.mutability && global.imported) {
@@ -4008,6 +4014,8 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
return graph()->NewNode(mcgraph()->machine()->F64x2Abs(), inputs[0]);
case wasm::kExprF64x2Neg:
return graph()->NewNode(mcgraph()->machine()->F64x2Neg(), inputs[0]);
+ case wasm::kExprF64x2Sqrt:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Sqrt(), inputs[0]);
case wasm::kExprF64x2Add:
return graph()->NewNode(mcgraph()->machine()->F64x2Add(), inputs[0],
inputs[1]);
@@ -4044,6 +4052,12 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprF64x2Ge:
return graph()->NewNode(mcgraph()->machine()->F64x2Le(), inputs[1],
inputs[0]);
+ case wasm::kExprF64x2Qfma:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Qfma(), inputs[0],
+ inputs[1], inputs[2]);
+ case wasm::kExprF64x2Qfms:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Qfms(), inputs[0],
+ inputs[1], inputs[2]);
case wasm::kExprF32x4Splat:
return graph()->NewNode(mcgraph()->machine()->F32x4Splat(), inputs[0]);
case wasm::kExprF32x4SConvertI32x4:
@@ -4056,6 +4070,8 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
return graph()->NewNode(mcgraph()->machine()->F32x4Abs(), inputs[0]);
case wasm::kExprF32x4Neg:
return graph()->NewNode(mcgraph()->machine()->F32x4Neg(), inputs[0]);
+ case wasm::kExprF32x4Sqrt:
+ return graph()->NewNode(mcgraph()->machine()->F32x4Sqrt(), inputs[0]);
case wasm::kExprF32x4RecipApprox:
return graph()->NewNode(mcgraph()->machine()->F32x4RecipApprox(),
inputs[0]);
@@ -4101,6 +4117,12 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprF32x4Ge:
return graph()->NewNode(mcgraph()->machine()->F32x4Le(), inputs[1],
inputs[0]);
+ case wasm::kExprF32x4Qfma:
+ return graph()->NewNode(mcgraph()->machine()->F32x4Qfma(), inputs[0],
+ inputs[1], inputs[2]);
+ case wasm::kExprF32x4Qfms:
+ return graph()->NewNode(mcgraph()->machine()->F32x4Qfms(), inputs[0],
+ inputs[1], inputs[2]);
case wasm::kExprI64x2Splat:
return graph()->NewNode(mcgraph()->machine()->I64x2Splat(), inputs[0]);
case wasm::kExprI64x2Neg:
@@ -4459,6 +4481,9 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
return graph()->NewNode(mcgraph()->machine()->S1x16AnyTrue(), inputs[0]);
case wasm::kExprS1x16AllTrue:
return graph()->NewNode(mcgraph()->machine()->S1x16AllTrue(), inputs[0]);
+ case wasm::kExprS8x16Swizzle:
+ return graph()->NewNode(mcgraph()->machine()->S8x16Swizzle(), inputs[0],
+ inputs[1]);
default:
FATAL_UNSUPPORTED_OPCODE(opcode);
}
@@ -4492,13 +4517,23 @@ Node* WasmGraphBuilder::SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane,
case wasm::kExprI32x4ReplaceLane:
return graph()->NewNode(mcgraph()->machine()->I32x4ReplaceLane(lane),
inputs[0], inputs[1]);
- case wasm::kExprI16x8ExtractLane:
+ case wasm::kExprI16x8ExtractLaneS:
+ return graph()->NewNode(
+ mcgraph()->machine()->SignExtendWord16ToInt32(),
+ graph()->NewNode(mcgraph()->machine()->I16x8ExtractLane(lane),
+ inputs[0]));
+ case wasm::kExprI16x8ExtractLaneU:
return graph()->NewNode(mcgraph()->machine()->I16x8ExtractLane(lane),
inputs[0]);
case wasm::kExprI16x8ReplaceLane:
return graph()->NewNode(mcgraph()->machine()->I16x8ReplaceLane(lane),
inputs[0], inputs[1]);
- case wasm::kExprI8x16ExtractLane:
+ case wasm::kExprI8x16ExtractLaneS:
+ return graph()->NewNode(
+ mcgraph()->machine()->SignExtendWord8ToInt32(),
+ graph()->NewNode(mcgraph()->machine()->I8x16ExtractLane(lane),
+ inputs[0]));
+ case wasm::kExprI8x16ExtractLaneU:
return graph()->NewNode(mcgraph()->machine()->I8x16ExtractLane(lane),
inputs[0]);
case wasm::kExprI8x16ReplaceLane:
@@ -5076,7 +5111,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
CallDescriptor* GetI64ToBigIntCallDescriptor() {
if (!lowering_special_case_) {
- lowering_special_case_ = base::make_unique<Int64LoweringSpecialCase>();
+ lowering_special_case_ = std::make_unique<Int64LoweringSpecialCase>();
}
if (lowering_special_case_->i64_to_bigint_call_descriptor) {
@@ -5112,7 +5147,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
CallDescriptor* GetBigIntToI64CallDescriptor() {
if (!lowering_special_case_) {
- lowering_special_case_ = base::make_unique<Int64LoweringSpecialCase>();
+ lowering_special_case_ = std::make_unique<Int64LoweringSpecialCase>();
}
if (lowering_special_case_->bigint_to_i64_call_descriptor) {
@@ -5613,7 +5648,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* function_index_smi = LOAD_RAW(
function_data,
WasmExportedFunctionData::kFunctionIndexOffset - kHeapObjectTag,
- MachineType::TypeCompressedTagged());
+ MachineType::TypeCompressedTaggedSigned());
Node* function_index = BuildChangeSmiToInt32(function_index_smi);
return function_index;
}
@@ -5622,13 +5657,30 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* jump_table_offset_smi = LOAD_RAW(
function_data,
WasmExportedFunctionData::kJumpTableOffsetOffset - kHeapObjectTag,
- MachineType::TypeCompressedTagged());
- Node* jump_table_offset = BuildChangeSmiToInt32(jump_table_offset_smi);
+ MachineType::TypeCompressedTaggedSigned());
+ Node* jump_table_offset = BuildChangeSmiToIntPtr(jump_table_offset_smi);
return jump_table_offset;
}
+ Node* BuildMultiReturnFixedArrayFromIterable(const wasm::FunctionSig* sig,
+ Node* iterable, Node* context) {
+ Node* iterable_to_fixed_array =
+ BuildLoadBuiltinFromIsolateRoot(Builtins::kIterableToFixedArrayForWasm);
+ IterableToFixedArrayForWasmDescriptor interface_descriptor;
+ Node* length = BuildChangeUint31ToSmi(
+ Uint32Constant(static_cast<uint32_t>(sig->return_count())));
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ mcgraph()->zone(), interface_descriptor,
+ interface_descriptor.GetStackParameterCount(), CallDescriptor::kNoFlags,
+ Operator::kNoProperties, StubCallMode::kCallCodeObject);
+ return SetEffect(graph()->NewNode(
+ mcgraph()->common()->Call(call_descriptor), iterable_to_fixed_array,
+ iterable, length, context, Effect(), Control()));
+ }
+
void BuildJSToWasmWrapper(bool is_import) {
const int wasm_count = static_cast<int>(sig_->parameter_count());
+ const int rets_count = static_cast<int>(sig_->return_count());
// Build the start and the JS parameter nodes.
SetEffect(SetControl(Start(wasm_count + 5)));
@@ -5662,8 +5714,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
}
const int args_count = wasm_count + 1; // +1 for wasm_code.
- Vector<Node*> args = Buffer(args_count);
- Node** rets;
+ base::SmallVector<Node*, 16> args(args_count);
+ base::SmallVector<Node*, 1> rets(rets_count);
// Convert JS parameters to wasm numbers.
for (int i = 0; i < wasm_count; ++i) {
@@ -5680,8 +5732,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Load function index from {WasmExportedFunctionData}.
Node* function_index =
BuildLoadFunctionIndexFromExportedFunctionData(function_data);
- BuildImportCall(sig_, args.begin(), &rets, wasm::kNoCodePosition,
- function_index, kCallContinues);
+ BuildImportCall(sig_, VectorOf(args), VectorOf(rets),
+ wasm::kNoCodePosition, function_index, kCallContinues);
} else {
// Call to a wasm function defined in this module.
// The call target is the jump table slot for that function.
@@ -5693,8 +5745,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
mcgraph()->machine()->IntAdd(), jump_table_start, jump_table_offset);
args[0] = jump_table_slot;
- BuildWasmCall(sig_, args.begin(), &rets, wasm::kNoCodePosition, nullptr,
- kNoRetpoline);
+ BuildWasmCall(sig_, VectorOf(args), VectorOf(rets), wasm::kNoCodePosition,
+ nullptr, kNoRetpoline);
}
// Clear the ThreadInWasm flag.
@@ -5765,7 +5817,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
sloppy_receiver = false;
V8_FALLTHROUGH; // fallthru
case WasmImportCallKind::kJSFunctionArityMatchSloppy: {
- Vector<Node*> args = Buffer(wasm_count + 7);
+ base::SmallVector<Node*, 16> args(wasm_count + 7);
int pos = 0;
Node* function_context =
LOAD_RAW(callable_node,
@@ -5785,7 +5837,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
graph()->zone(), false, wasm_count + 1, CallDescriptor::kNoFlags);
// Convert wasm numbers to JS values.
- pos = AddArgumentNodes(args, pos, wasm_count, sig_);
+ pos = AddArgumentNodes(VectorOf(args), pos, wasm_count, sig_);
args[pos++] = undefined_node; // new target
args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count
@@ -5805,7 +5857,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
sloppy_receiver = false;
V8_FALLTHROUGH; // fallthru
case WasmImportCallKind::kJSFunctionArityMismatchSloppy: {
- Vector<Node*> args = Buffer(wasm_count + 9);
+ base::SmallVector<Node*, 16> args(wasm_count + 9);
int pos = 0;
Node* function_context =
LOAD_RAW(callable_node,
@@ -5852,7 +5904,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
flags, Operator::kNoProperties);
// Convert wasm numbers to JS values.
- pos = AddArgumentNodes(args, pos, wasm_count, sig_);
+ pos = AddArgumentNodes(VectorOf(args), pos, wasm_count, sig_);
args[pos++] = function_context;
args[pos++] = Effect();
args[pos++] = Control();
@@ -5866,7 +5918,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// === General case of unknown callable ==================================
// =======================================================================
case WasmImportCallKind::kUseCallBuiltin: {
- Vector<Node*> args = Buffer(wasm_count + 7);
+ base::SmallVector<Node*, 16> args(wasm_count + 7);
int pos = 0;
args[pos++] =
BuildLoadBuiltinFromIsolateRoot(Builtins::kCall_ReceiverIsAny);
@@ -5879,7 +5931,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
CallDescriptor::kNoFlags, Operator::kNoProperties);
// Convert wasm numbers to JS values.
- pos = AddArgumentNodes(args, pos, wasm_count, sig_);
+ pos = AddArgumentNodes(VectorOf(args), pos, wasm_count, sig_);
// The native_context is sufficient here, because all kind of callables
// which depend on the context provide their own context. The context
@@ -5903,15 +5955,24 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
SetEffect(call);
SetSourcePosition(call, 0);
- // Convert the return value back.
- Node* val = sig_->return_count() == 0
- ? mcgraph()->Int32Constant(0)
- : FromJS(call, native_context, sig_->GetReturn());
-
- // Set the ThreadInWasm flag again.
- BuildModifyThreadInWasmFlag(true);
-
- Return(val);
+ // Convert the return value(s) back.
+ if (sig_->return_count() <= 1) {
+ Node* val = sig_->return_count() == 0
+ ? mcgraph()->Int32Constant(0)
+ : FromJS(call, native_context, sig_->GetReturn());
+ BuildModifyThreadInWasmFlag(true);
+ Return(val);
+ } else {
+ Node* fixed_array =
+ BuildMultiReturnFixedArrayFromIterable(sig_, call, native_context);
+ base::SmallVector<Node*, 8> wasm_values(sig_->return_count());
+ for (unsigned i = 0; i < sig_->return_count(); ++i) {
+ wasm_values[i] = FromJS(LOAD_FIXED_ARRAY_SLOT_ANY(fixed_array, i),
+ native_context, sig_->GetReturn(i));
+ }
+ BuildModifyThreadInWasmFlag(true);
+ Return(VectorOf(wasm_values));
+ }
if (ContainsInt64(sig_)) LowerInt64(kCalledFromWasm);
return true;
@@ -6006,7 +6067,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
if (return_count == 0) {
Return(Int32Constant(0));
} else {
- Vector<Node*> returns = Buffer(return_count);
+ base::SmallVector<Node*, 8> returns(return_count);
offset = 0;
for (size_t i = 0; i < return_count; ++i) {
wasm::ValueType type = sig_->GetReturn(i);
@@ -6016,7 +6077,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
returns[i] = val;
offset += wasm::ValueTypes::ElementSizeInBytes(type);
}
- Return(returns);
+ Return(VectorOf(returns));
}
if (ContainsInt64(sig_)) LowerInt64(kCalledFromWasm);
@@ -6078,7 +6139,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
if (return_count == 0) {
Return(Int32Constant(0));
} else {
- Vector<Node*> returns = Buffer(return_count);
+ base::SmallVector<Node*, 8> returns(return_count);
offset = 0;
for (size_t i = 0; i < return_count; ++i) {
wasm::ValueType type = sig_->GetReturn(i);
@@ -6088,7 +6149,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
returns[i] = val;
offset += wasm::ValueTypes::ElementSizeInBytes(type);
}
- Return(returns);
+ Return(VectorOf(returns));
}
if (ContainsInt64(sig_)) LowerInt64(kCalledFromWasm);
@@ -6130,10 +6191,10 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
wasm::ObjectAccess::ToTagged(WasmJSFunctionData::kCallableOffset));
// Call the underlying closure.
- Vector<Node*> args = Buffer(wasm_count + 7);
+ base::SmallVector<Node*, 16> args(wasm_count + 7);
int pos = 0;
- args[pos++] = graph()->NewNode(mcgraph()->common()->HeapConstant(
- BUILTIN_CODE(isolate, Call_ReceiverIsAny)));
+ args[pos++] =
+ BuildLoadBuiltinFromIsolateRoot(Builtins::kCall_ReceiverIsAny);
args[pos++] = callable;
args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count
args[pos++] = BuildLoadUndefinedValueFromInstance(); // receiver
@@ -6158,14 +6219,30 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* call = SetEffect(graph()->NewNode(
mcgraph()->common()->Call(call_descriptor), pos, args.begin()));
- // TODO(wasm): Extend this to support multi-return.
- DCHECK_LE(sig_->return_count(), 1);
-
// Convert return JS values to wasm numbers and back to JS values.
- Node* jsval =
- sig_->return_count() == 0
- ? BuildLoadUndefinedValueFromInstance()
- : ToJS(FromJS(call, context, sig_->GetReturn()), sig_->GetReturn());
+ Node* jsval;
+ if (sig_->return_count() == 0) {
+ jsval = BuildLoadUndefinedValueFromInstance();
+ } else if (sig_->return_count() == 1) {
+ jsval = ToJS(FromJS(call, context, sig_->GetReturn()), sig_->GetReturn());
+ } else {
+ Node* fixed_array =
+ BuildMultiReturnFixedArrayFromIterable(sig_, call, context);
+ int32_t return_count = static_cast<int32_t>(sig_->return_count());
+ Node* size =
+ graph()->NewNode(mcgraph()->common()->NumberConstant(return_count));
+ Node* result_fixed_array =
+ BuildCallToRuntime(Runtime::kWasmNewMultiReturnFixedArray, &size, 1);
+ for (unsigned i = 0; i < sig_->return_count(); ++i) {
+ const auto& type = sig_->GetReturn(i);
+ Node* elem = LOAD_FIXED_ARRAY_SLOT_ANY(fixed_array, i);
+ Node* cast = ToJS(FromJS(elem, context, type), type);
+ STORE_FIXED_ARRAY_SLOT_ANY(result_fixed_array, i, cast);
+ }
+ jsval = BuildCallToRuntimeWithContext(Runtime::kWasmNewMultiReturnJSArray,
+ context, &result_fixed_array, 1,
+ effect_, Control());
+ }
Return(jsval);
}
@@ -6184,7 +6261,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
kNoWriteBarrier);
int wasm_arg_count = static_cast<int>(sig_->parameter_count());
- Vector<Node*> args = Buffer(wasm_arg_count + 4);
+ base::SmallVector<Node*, 16> args(wasm_arg_count + 4);
int pos = 0;
args[pos++] = code_entry;
@@ -6222,14 +6299,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
pos = 0;
offset = 0;
for (wasm::ValueType type : sig_->returns()) {
- StoreRepresentation store_rep(
- wasm::ValueTypes::MachineRepresentationFor(type), kNoWriteBarrier);
Node* value = sig_->return_count() == 1
? call
: graph()->NewNode(mcgraph()->common()->Projection(pos),
call, Control());
- SetEffect(graph()->NewNode(mcgraph()->machine()->Store(store_rep),
- arg_buffer, Int32Constant(offset), value,
+ SetEffect(graph()->NewNode(GetSafeStoreOperator(offset, type), arg_buffer,
+ Int32Constant(offset), value,
Effect(), Control()));
offset += wasm::ValueTypes::ElementSizeInBytes(type);
pos++;
@@ -6287,7 +6362,7 @@ std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob(
// Create the Graph.
//----------------------------------------------------------------------------
std::unique_ptr<Zone> zone =
- base::make_unique<Zone>(wasm_engine->allocator(), ZONE_NAME);
+ std::make_unique<Zone>(wasm_engine->allocator(), ZONE_NAME);
Graph* graph = new (zone.get()) Graph(zone.get());
CommonOperatorBuilder common(zone.get());
MachineOperatorBuilder machine(
@@ -6702,7 +6777,7 @@ wasm::WasmCompilationResult CompileWasmInterpreterEntry(
MaybeHandle<Code> CompileJSToJSWrapper(Isolate* isolate,
wasm::FunctionSig* sig) {
std::unique_ptr<Zone> zone =
- base::make_unique<Zone>(isolate->allocator(), ZONE_NAME);
+ std::make_unique<Zone>(isolate->allocator(), ZONE_NAME);
Graph* graph = new (zone.get()) Graph(zone.get());
CommonOperatorBuilder common(zone.get());
MachineOperatorBuilder machine(
@@ -6749,7 +6824,7 @@ MaybeHandle<Code> CompileJSToJSWrapper(Isolate* isolate,
MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
std::unique_ptr<Zone> zone =
- base::make_unique<Zone>(isolate->allocator(), ZONE_NAME);
+ std::make_unique<Zone>(isolate->allocator(), ZONE_NAME);
Graph* graph = new (zone.get()) Graph(zone.get());
CommonOperatorBuilder common(zone.get());
MachineOperatorBuilder machine(
@@ -6916,6 +6991,7 @@ wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
counters->wasm_compile_function_peak_memory_bytes()->AddSample(
static_cast<int>(mcgraph->graph()->zone()->allocation_size()));
auto result = info.ReleaseWasmCompilationResult();
+ CHECK_NOT_NULL(result); // Compilation expected to succeed.
DCHECK_EQ(wasm::ExecutionTier::kTurbofan, result->result_tier);
return std::move(*result);
}
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index dd86ea1499..de0ca58c23 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -179,16 +179,6 @@ class WasmGraphBuilder {
wasm::CompilationEnv* env, Zone* zone, MachineGraph* mcgraph,
wasm::FunctionSig* sig, compiler::SourcePositionTable* spt = nullptr);
- Vector<Node*> Buffer(size_t count) {
- if (count > cur_bufsize_) {
- size_t new_size = count + cur_bufsize_ + 5;
- cur_buffer_ =
- reinterpret_cast<Node**>(zone_->New(new_size * sizeof(Node*)));
- cur_bufsize_ = new_size;
- }
- return {cur_buffer_, count};
- }
-
//-----------------------------------------------------------------------
// Operations independent of {control} or {effect}.
//-----------------------------------------------------------------------
@@ -199,11 +189,11 @@ class WasmGraphBuilder {
Node* TerminateLoop(Node* effect, Node* control);
Node* TerminateThrow(Node* effect, Node* control);
Node* Merge(unsigned count, Node** controls);
- Node* Phi(wasm::ValueType type, unsigned count, Node** vals, Node* control);
+ Node* Phi(wasm::ValueType type, unsigned count, Node** vals_and_control);
Node* CreateOrMergeIntoPhi(MachineRepresentation rep, Node* merge,
Node* tnode, Node* fnode);
Node* CreateOrMergeIntoEffectPhi(Node* merge, Node* tnode, Node* fnode);
- Node* EffectPhi(unsigned count, Node** effects, Node* control);
+ Node* EffectPhi(unsigned count, Node** effects_and_control);
Node* RefNull();
Node* RefFunc(uint32_t function_index);
Node* Uint32Constant(uint32_t value);
@@ -223,8 +213,9 @@ class WasmGraphBuilder {
Node* ExceptionTagEqual(Node* caught_tag, Node* expected_tag);
Node* LoadExceptionTagFromTable(uint32_t exception_index);
Node* GetExceptionTag(Node* except_obj);
- Vector<Node*> GetExceptionValues(Node* except_obj,
- const wasm::WasmException* exception);
+ Node* GetExceptionValues(Node* except_obj,
+ const wasm::WasmException* exception,
+ Vector<Node*> values_out);
bool IsPhiWithMerge(Node* phi, Node* merge);
bool ThrowsException(Node* node, Node** if_success, Node** if_exception);
void AppendToMerge(Node* merge, Node* from);
@@ -275,20 +266,21 @@ class WasmGraphBuilder {
}
Node* Unreachable(wasm::WasmCodePosition position);
- Node* CallDirect(uint32_t index, Node** args, Node*** rets,
+ Node* CallDirect(uint32_t index, Vector<Node*> args, Vector<Node*> rets,
wasm::WasmCodePosition position);
- Node* CallIndirect(uint32_t table_index, uint32_t sig_index, Node** args,
- Node*** rets, wasm::WasmCodePosition position);
+ Node* CallIndirect(uint32_t table_index, uint32_t sig_index,
+ Vector<Node*> args, Vector<Node*> rets,
+ wasm::WasmCodePosition position);
- Node* ReturnCall(uint32_t index, Node** args,
+ Node* ReturnCall(uint32_t index, Vector<Node*> args,
wasm::WasmCodePosition position);
Node* ReturnCallIndirect(uint32_t table_index, uint32_t sig_index,
- Node** args, wasm::WasmCodePosition position);
+ Vector<Node*> args, wasm::WasmCodePosition position);
Node* Invert(Node* node);
- Node* GetGlobal(uint32_t index);
- Node* SetGlobal(uint32_t index, Node* val);
+ Node* GlobalGet(uint32_t index);
+ Node* GlobalSet(uint32_t index, Node* val);
Node* TableGet(uint32_t table_index, Node* index,
wasm::WasmCodePosition position);
Node* TableSet(uint32_t table_index, Node* index, Node* val,
@@ -427,8 +419,6 @@ class WasmGraphBuilder {
void RemoveBytecodePositionDecorator();
protected:
- static const int kDefaultBufferSize = 16;
-
Zone* const zone_;
MachineGraph* const mcgraph_;
wasm::CompilationEnv* const env_;
@@ -444,9 +434,6 @@ class WasmGraphBuilder {
SetOncePointer<Node> isolate_root_node_;
SetOncePointer<const Operator> stack_check_call_operator_;
- Node** cur_buffer_;
- size_t cur_bufsize_;
- Node* def_buffer_[kDefaultBufferSize];
bool has_simd_ = false;
bool needs_stack_check_ = false;
const bool untrusted_code_mitigations_ = true;
@@ -496,28 +483,29 @@ class WasmGraphBuilder {
template <typename... Args>
Node* BuildCCall(MachineSignature* sig, Node* function, Args... args);
- Node* BuildCallNode(wasm::FunctionSig* sig, Node** args,
+ Node* BuildCallNode(wasm::FunctionSig* sig, Vector<Node*> args,
wasm::WasmCodePosition position, Node* instance_node,
const Operator* op);
// Helper function for {BuildIndirectCall}.
void LoadIndirectFunctionTable(uint32_t table_index, Node** ift_size,
Node** ift_sig_ids, Node** ift_targets,
Node** ift_instances);
- Node* BuildIndirectCall(uint32_t table_index, uint32_t sig_index, Node** args,
- Node*** rets, wasm::WasmCodePosition position,
+ Node* BuildIndirectCall(uint32_t table_index, uint32_t sig_index,
+ Vector<Node*> args, Vector<Node*> rets,
+ wasm::WasmCodePosition position,
IsReturnCall continuation);
- Node* BuildWasmCall(wasm::FunctionSig* sig, Node** args, Node*** rets,
- wasm::WasmCodePosition position, Node* instance_node,
- UseRetpoline use_retpoline);
- Node* BuildWasmReturnCall(wasm::FunctionSig* sig, Node** args,
+ Node* BuildWasmCall(wasm::FunctionSig* sig, Vector<Node*> args,
+ Vector<Node*> rets, wasm::WasmCodePosition position,
+ Node* instance_node, UseRetpoline use_retpoline);
+ Node* BuildWasmReturnCall(wasm::FunctionSig* sig, Vector<Node*> args,
wasm::WasmCodePosition position,
Node* instance_node, UseRetpoline use_retpoline);
- Node* BuildImportCall(wasm::FunctionSig* sig, Node** args, Node*** rets,
- wasm::WasmCodePosition position, int func_index,
- IsReturnCall continuation);
- Node* BuildImportCall(wasm::FunctionSig* sig, Node** args, Node*** rets,
- wasm::WasmCodePosition position, Node* func_index,
- IsReturnCall continuation);
+ Node* BuildImportCall(wasm::FunctionSig* sig, Vector<Node*> args,
+ Vector<Node*> rets, wasm::WasmCodePosition position,
+ int func_index, IsReturnCall continuation);
+ Node* BuildImportCall(wasm::FunctionSig* sig, Vector<Node*> args,
+ Vector<Node*> rets, wasm::WasmCodePosition position,
+ Node* func_index, IsReturnCall continuation);
Node* BuildF32CopySign(Node* left, Node* right);
Node* BuildF64CopySign(Node* left, Node* right);
@@ -574,11 +562,14 @@ class WasmGraphBuilder {
MachineType result_type, wasm::TrapReason trap_zero,
wasm::WasmCodePosition position);
+ Node* BuildTruncateIntPtrToInt32(Node* value);
Node* BuildChangeInt32ToIntPtr(Node* value);
Node* BuildChangeInt32ToSmi(Node* value);
Node* BuildChangeUint31ToSmi(Node* value);
Node* BuildSmiShiftBitsConstant();
+ Node* BuildSmiShiftBitsConstant32();
Node* BuildChangeSmiToInt32(Node* value);
+ Node* BuildChangeSmiToIntPtr(Node* value);
// generates {index > max ? Smi(max) : Smi(index)}
Node* BuildConvertUint32ToSmiWithSaturation(Node* index, uint32_t maxval);
@@ -599,15 +590,8 @@ class WasmGraphBuilder {
Node* BuildDecodeException32BitValue(Node* values_array, uint32_t* index);
Node* BuildDecodeException64BitValue(Node* values_array, uint32_t* index);
- Vector<Node*> Realloc(Node* const* buffer, size_t old_count,
- size_t new_count) {
- DCHECK_GE(new_count, old_count); // Only support growing.
- Vector<Node*> buf = Buffer(new_count);
- if (buf.begin() != buffer) {
- memcpy(buf.begin(), buffer, old_count * sizeof(Node*));
- }
- return buf;
- }
+ Node* BuildMultiReturnFixedArrayFromIterable(const wasm::FunctionSig* sig,
+ Node* iterable, Node* context);
//-----------------------------------------------------------------------
// Operations involving the CEntry, a dependency we want to remove
diff --git a/deps/v8/src/compiler/zone-stats.h b/deps/v8/src/compiler/zone-stats.h
index 63d58eb99f..68036f116d 100644
--- a/deps/v8/src/compiler/zone-stats.h
+++ b/deps/v8/src/compiler/zone-stats.h
@@ -33,6 +33,8 @@ class V8_EXPORT_PRIVATE ZoneStats final {
zone_ = nullptr;
}
+ ZoneStats* zone_stats() const { return zone_stats_; }
+
private:
const char* zone_name_;
ZoneStats* const zone_stats_;