aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler/memory-optimizer.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/compiler/memory-optimizer.cc')
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc242
1 files changed, 194 insertions, 48 deletions
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index a3f47a8acb..29cbb4d26c 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -4,13 +4,14 @@
#include "src/compiler/memory-optimizer.h"
+#include "src/codegen/interface-descriptors.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
#include "src/compiler/simplified-operator.h"
-#include "src/interface-descriptors.h"
+#include "src/roots/roots-inl.h"
namespace v8 {
namespace internal {
@@ -18,7 +19,8 @@ namespace compiler {
MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
PoisoningMitigationLevel poisoning_level,
- AllocationFolding allocation_folding)
+ AllocationFolding allocation_folding,
+ const char* function_debug_name)
: jsgraph_(jsgraph),
empty_state_(AllocationState::Empty(zone)),
pending_(zone),
@@ -26,7 +28,8 @@ MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
zone_(zone),
graph_assembler_(jsgraph, nullptr, nullptr, zone),
poisoning_level_(poisoning_level),
- allocation_folding_(allocation_folding) {}
+ allocation_folding_(allocation_folding),
+ function_debug_name_(function_debug_name) {}
void MemoryOptimizer::Optimize() {
EnqueueUses(graph()->start(), empty_state());
@@ -58,7 +61,21 @@ void MemoryOptimizer::AllocationGroup::Add(Node* node) {
}
bool MemoryOptimizer::AllocationGroup::Contains(Node* node) const {
- return node_ids_.find(node->id()) != node_ids_.end();
+ // Additions should stay within the same allocated object, so it's safe to
+ // ignore them.
+ while (node_ids_.find(node->id()) == node_ids_.end()) {
+ switch (node->opcode()) {
+ case IrOpcode::kBitcastTaggedToWord:
+ case IrOpcode::kBitcastWordToTagged:
+ case IrOpcode::kInt32Add:
+ case IrOpcode::kInt64Add:
+ node = NodeProperties::GetValueInput(node, 0);
+ break;
+ default:
+ return false;
+ }
+ }
+ return true;
}
MemoryOptimizer::AllocationState::AllocationState()
@@ -86,6 +103,7 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kDebugBreak:
case IrOpcode::kDeoptimizeIf:
case IrOpcode::kDeoptimizeUnless:
+ case IrOpcode::kEffectPhi:
case IrOpcode::kIfException:
case IrOpcode::kLoad:
case IrOpcode::kLoadElement:
@@ -94,6 +112,10 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kProtectedLoad:
case IrOpcode::kProtectedStore:
case IrOpcode::kRetain:
+ // TODO(tebbi): Store nodes might do a bump-pointer allocation.
+ // We should introduce a special bump-pointer store node to
+ // differentiate that.
+ case IrOpcode::kStore:
case IrOpcode::kStoreElement:
case IrOpcode::kStoreField:
case IrOpcode::kTaggedPoisonOnSpeculation:
@@ -101,6 +123,7 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kUnalignedStore:
case IrOpcode::kUnsafePointerAdd:
case IrOpcode::kUnreachable:
+ case IrOpcode::kStaticAssert:
case IrOpcode::kWord32AtomicAdd:
case IrOpcode::kWord32AtomicAnd:
case IrOpcode::kWord32AtomicCompareExchange:
@@ -136,29 +159,17 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kCallWithCallerSavedRegisters:
return !(CallDescriptorOf(node->op())->flags() &
CallDescriptor::kNoAllocate);
-
- case IrOpcode::kStore:
- // Store is not safe because it could be part of CSA's bump pointer
- // allocation(?).
- return true;
-
default:
break;
}
return true;
}
-bool CanLoopAllocate(Node* loop_effect_phi, Zone* temp_zone) {
- Node* const control = NodeProperties::GetControlInput(loop_effect_phi);
-
+Node* SearchAllocatingNode(Node* start, Node* limit, Zone* temp_zone) {
ZoneQueue<Node*> queue(temp_zone);
ZoneSet<Node*> visited(temp_zone);
- visited.insert(loop_effect_phi);
-
- // Start the effect chain walk from the loop back edges.
- for (int i = 1; i < control->InputCount(); ++i) {
- queue.push(loop_effect_phi->InputAt(i));
- }
+ visited.insert(limit);
+ queue.push(start);
while (!queue.empty()) {
Node* const current = queue.front();
@@ -166,16 +177,40 @@ bool CanLoopAllocate(Node* loop_effect_phi, Zone* temp_zone) {
if (visited.find(current) == visited.end()) {
visited.insert(current);
- if (CanAllocate(current)) return true;
+ if (CanAllocate(current)) {
+ return current;
+ }
for (int i = 0; i < current->op()->EffectInputCount(); ++i) {
queue.push(NodeProperties::GetEffectInput(current, i));
}
}
}
+ return nullptr;
+}
+
+bool CanLoopAllocate(Node* loop_effect_phi, Zone* temp_zone) {
+ Node* const control = NodeProperties::GetControlInput(loop_effect_phi);
+ // Start the effect chain walk from the loop back edges.
+ for (int i = 1; i < control->InputCount(); ++i) {
+ if (SearchAllocatingNode(loop_effect_phi->InputAt(i), loop_effect_phi,
+ temp_zone) != nullptr) {
+ return true;
+ }
+ }
return false;
}
+Node* EffectPhiForPhi(Node* phi) {
+ Node* control = NodeProperties::GetControlInput(phi);
+ for (Node* use : control->uses()) {
+ if (use->opcode() == IrOpcode::kEffectPhi) {
+ return use;
+ }
+ }
+ return nullptr;
+}
+
} // namespace
void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
@@ -192,10 +227,14 @@ void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
return VisitCall(node, state);
case IrOpcode::kCallWithCallerSavedRegisters:
return VisitCallWithCallerSavedRegisters(node, state);
+ case IrOpcode::kLoadFromObject:
+ return VisitLoadFromObject(node, state);
case IrOpcode::kLoadElement:
return VisitLoadElement(node, state);
case IrOpcode::kLoadField:
return VisitLoadField(node, state);
+ case IrOpcode::kStoreToObject:
+ return VisitStoreToObject(node, state);
case IrOpcode::kStoreElement:
return VisitStoreElement(node, state);
case IrOpcode::kStoreField:
@@ -223,13 +262,14 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
gasm()->Reset(effect, control);
- AllocationType allocation = AllocationTypeOf(node->op());
+ const AllocateParameters& allocation = AllocateParametersOf(node->op());
+ AllocationType allocation_type = allocation.allocation_type();
// Propagate tenuring from outer allocations to inner allocations, i.e.
// when we allocate an object in old space and store a newly allocated
// child object into the pretenured object, then the newly allocated
// child object also should get pretenured to old space.
- if (allocation == AllocationType::kOld) {
+ if (allocation_type == AllocationType::kOld) {
for (Edge const edge : node->use_edges()) {
Node* const user = edge.from();
if (user->opcode() == IrOpcode::kStoreField && edge.index() == 0) {
@@ -242,14 +282,14 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
}
}
} else {
- DCHECK_EQ(AllocationType::kYoung, allocation);
+ DCHECK_EQ(AllocationType::kYoung, allocation_type);
for (Edge const edge : node->use_edges()) {
Node* const user = edge.from();
if (user->opcode() == IrOpcode::kStoreField && edge.index() == 1) {
Node* const parent = user->InputAt(0);
if (parent->opcode() == IrOpcode::kAllocateRaw &&
AllocationTypeOf(parent->op()) == AllocationType::kOld) {
- allocation = AllocationType::kOld;
+ allocation_type = AllocationType::kOld;
break;
}
}
@@ -258,22 +298,22 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
// Determine the top/limit addresses.
Node* top_address = __ ExternalConstant(
- allocation == AllocationType::kYoung
+ allocation_type == AllocationType::kYoung
? ExternalReference::new_space_allocation_top_address(isolate())
: ExternalReference::old_space_allocation_top_address(isolate()));
Node* limit_address = __ ExternalConstant(
- allocation == AllocationType::kYoung
+ allocation_type == AllocationType::kYoung
? ExternalReference::new_space_allocation_limit_address(isolate())
: ExternalReference::old_space_allocation_limit_address(isolate()));
// Check if we can fold this allocation into a previous allocation represented
// by the incoming {state}.
IntPtrMatcher m(size);
- if (m.IsInRange(0, kMaxRegularHeapObjectSize)) {
+ if (m.IsInRange(0, kMaxRegularHeapObjectSize) && FLAG_inline_new) {
intptr_t const object_size = m.Value();
if (allocation_folding_ == AllocationFolding::kDoAllocationFolding &&
state->size() <= kMaxRegularHeapObjectSize - object_size &&
- state->group()->allocation() == allocation) {
+ state->group()->allocation() == allocation_type) {
// We can fold this Allocate {node} into the allocation {group}
// represented by the given {state}. Compute the upper bound for
// the new {state}.
@@ -331,7 +371,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
__ Bind(&call_runtime);
{
- Node* target = allocation == AllocationType::kYoung
+ Node* target = allocation_type == AllocationType::kYoung
? __
AllocateInYoungGenerationStubConstant()
: __
@@ -363,7 +403,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
// Start a new allocation group.
AllocationGroup* group =
- new (zone()) AllocationGroup(value, allocation, size, zone());
+ new (zone()) AllocationGroup(value, allocation_type, size, zone());
state = AllocationState::Open(group, object_size, top, zone());
}
} else {
@@ -382,6 +422,11 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
// Check if we can do bump pointer allocation here.
Node* check = __ UintLessThan(new_top, limit);
__ GotoIfNot(check, &call_runtime);
+ if (allocation.allow_large_objects() == AllowLargeObjects::kTrue) {
+ __ GotoIfNot(
+ __ UintLessThan(size, __ IntPtrConstant(kMaxRegularHeapObjectSize)),
+ &call_runtime);
+ }
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
top_address, __ IntPtrConstant(0), new_top);
@@ -389,7 +434,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
__ IntAdd(top, __ IntPtrConstant(kHeapObjectTag))));
__ Bind(&call_runtime);
- Node* target = allocation == AllocationType::kYoung
+ Node* target = allocation_type == AllocationType::kYoung
? __
AllocateInYoungGenerationStubConstant()
: __
@@ -408,7 +453,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
// Create an unfoldable allocation group.
AllocationGroup* group =
- new (zone()) AllocationGroup(value, allocation, zone());
+ new (zone()) AllocationGroup(value, allocation_type, zone());
state = AllocationState::Closed(group, zone());
}
@@ -434,6 +479,32 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
node->Kill();
}
+void MemoryOptimizer::VisitLoadFromObject(Node* node,
+ AllocationState const* state) {
+ DCHECK_EQ(IrOpcode::kLoadFromObject, node->opcode());
+ ObjectAccess const& access = ObjectAccessOf(node->op());
+ Node* offset = node->InputAt(1);
+ node->ReplaceInput(1, __ IntSub(offset, __ IntPtrConstant(kHeapObjectTag)));
+ NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
+ EnqueueUses(node, state);
+}
+
+void MemoryOptimizer::VisitStoreToObject(Node* node,
+ AllocationState const* state) {
+ DCHECK_EQ(IrOpcode::kStoreToObject, node->opcode());
+ ObjectAccess const& access = ObjectAccessOf(node->op());
+ Node* object = node->InputAt(0);
+ Node* offset = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ node->ReplaceInput(1, __ IntSub(offset, __ IntPtrConstant(kHeapObjectTag)));
+ WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
+ node, object, value, state, access.write_barrier_kind);
+ NodeProperties::ChangeOp(
+ node, machine()->Store(StoreRepresentation(
+ access.machine_type.representation(), write_barrier_kind)));
+ EnqueueUses(node, state);
+}
+
#undef __
void MemoryOptimizer::VisitCall(Node* node, AllocationState const* state) {
@@ -461,13 +532,13 @@ void MemoryOptimizer::VisitLoadElement(Node* node,
ElementAccess const& access = ElementAccessOf(node->op());
Node* index = node->InputAt(1);
node->ReplaceInput(1, ComputeIndex(access, index));
+ MachineType type = access.machine_type;
if (NeedsPoisoning(access.load_sensitivity) &&
- access.machine_type.representation() !=
- MachineRepresentation::kTaggedPointer) {
- NodeProperties::ChangeOp(node,
- machine()->PoisonedLoad(access.machine_type));
+ type.representation() != MachineRepresentation::kTaggedPointer &&
+ type.representation() != MachineRepresentation::kCompressedPointer) {
+ NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
} else {
- NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
+ NodeProperties::ChangeOp(node, machine()->Load(type));
}
EnqueueUses(node, state);
}
@@ -477,13 +548,13 @@ void MemoryOptimizer::VisitLoadField(Node* node, AllocationState const* state) {
FieldAccess const& access = FieldAccessOf(node->op());
Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph()->zone(), 1, offset);
+ MachineType type = access.machine_type;
if (NeedsPoisoning(access.load_sensitivity) &&
- access.machine_type.representation() !=
- MachineRepresentation::kTaggedPointer) {
- NodeProperties::ChangeOp(node,
- machine()->PoisonedLoad(access.machine_type));
+ type.representation() != MachineRepresentation::kTaggedPointer &&
+ type.representation() != MachineRepresentation::kCompressedPointer) {
+ NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
} else {
- NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
+ NodeProperties::ChangeOp(node, machine()->Load(type));
}
EnqueueUses(node, state);
}
@@ -494,8 +565,9 @@ void MemoryOptimizer::VisitStoreElement(Node* node,
ElementAccess const& access = ElementAccessOf(node->op());
Node* object = node->InputAt(0);
Node* index = node->InputAt(1);
- WriteBarrierKind write_barrier_kind =
- ComputeWriteBarrierKind(object, state, access.write_barrier_kind);
+ Node* value = node->InputAt(2);
+ WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
+ node, object, value, state, access.write_barrier_kind);
node->ReplaceInput(1, ComputeIndex(access, index));
NodeProperties::ChangeOp(
node, machine()->Store(StoreRepresentation(
@@ -508,8 +580,9 @@ void MemoryOptimizer::VisitStoreField(Node* node,
DCHECK_EQ(IrOpcode::kStoreField, node->opcode());
FieldAccess const& access = FieldAccessOf(node->op());
Node* object = node->InputAt(0);
- WriteBarrierKind write_barrier_kind =
- ComputeWriteBarrierKind(object, state, access.write_barrier_kind);
+ Node* value = node->InputAt(1);
+ WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
+ node, object, value, state, access.write_barrier_kind);
Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph()->zone(), 1, offset);
NodeProperties::ChangeOp(
@@ -522,8 +595,9 @@ void MemoryOptimizer::VisitStore(Node* node, AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStore, node->opcode());
StoreRepresentation representation = StoreRepresentationOf(node->op());
Node* object = node->InputAt(0);
+ Node* value = node->InputAt(2);
WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
- object, state, representation.write_barrier_kind());
+ node, object, value, state, representation.write_barrier_kind());
if (write_barrier_kind != representation.write_barrier_kind()) {
NodeProperties::ChangeOp(
node, machine()->Store(StoreRepresentation(
@@ -552,13 +626,85 @@ Node* MemoryOptimizer::ComputeIndex(ElementAccess const& access, Node* index) {
return index;
}
+namespace {
+
+bool ValueNeedsWriteBarrier(Node* value, Isolate* isolate) {
+ while (true) {
+ switch (value->opcode()) {
+ case IrOpcode::kBitcastWordToTaggedSigned:
+ case IrOpcode::kChangeTaggedSignedToCompressedSigned:
+ case IrOpcode::kChangeTaggedToCompressedSigned:
+ return false;
+ case IrOpcode::kChangeTaggedPointerToCompressedPointer:
+ case IrOpcode::kChangeTaggedToCompressed:
+ value = NodeProperties::GetValueInput(value, 0);
+ continue;
+ case IrOpcode::kHeapConstant: {
+ RootIndex root_index;
+ if (isolate->roots_table().IsRootHandle(HeapConstantOf(value->op()),
+ &root_index) &&
+ RootsTable::IsImmortalImmovable(root_index)) {
+ return false;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ return true;
+ }
+}
+
+void WriteBarrierAssertFailed(Node* node, Node* object, const char* name,
+ Zone* temp_zone) {
+ std::stringstream str;
+ str << "MemoryOptimizer could not remove write barrier for node #"
+ << node->id() << "\n";
+ str << " Run mksnapshot with --csa-trap-on-node=" << name << ","
+ << node->id() << " to break in CSA code.\n";
+ Node* object_position = object;
+ if (object_position->opcode() == IrOpcode::kPhi) {
+ object_position = EffectPhiForPhi(object_position);
+ }
+ Node* allocating_node = nullptr;
+ if (object_position && object_position->op()->EffectOutputCount() > 0) {
+ allocating_node = SearchAllocatingNode(node, object_position, temp_zone);
+ }
+ if (allocating_node) {
+ str << "\n There is a potentially allocating node in between:\n";
+ str << " " << *allocating_node << "\n";
+ str << " Run mksnapshot with --csa-trap-on-node=" << name << ","
+ << allocating_node->id() << " to break there.\n";
+ if (allocating_node->opcode() == IrOpcode::kCall) {
+ str << " If this is a never-allocating runtime call, you can add an "
+ "exception to Runtime::MayAllocate.\n";
+ }
+ } else {
+ str << "\n It seems the store happened to something different than a "
+ "direct "
+ "allocation:\n";
+ str << " " << *object << "\n";
+ str << " Run mksnapshot with --csa-trap-on-node=" << name << ","
+ << object->id() << " to break there.\n";
+ }
+ FATAL("%s", str.str().c_str());
+}
+
+} // namespace
+
WriteBarrierKind MemoryOptimizer::ComputeWriteBarrierKind(
- Node* object, AllocationState const* state,
+ Node* node, Node* object, Node* value, AllocationState const* state,
WriteBarrierKind write_barrier_kind) {
if (state->IsYoungGenerationAllocation() &&
state->group()->Contains(object)) {
write_barrier_kind = kNoWriteBarrier;
}
+ if (!ValueNeedsWriteBarrier(value, isolate())) {
+ write_barrier_kind = kNoWriteBarrier;
+ }
+ if (write_barrier_kind == WriteBarrierKind::kAssertNoWriteBarrier) {
+ WriteBarrierAssertFailed(node, object, function_debug_name_, zone());
+ }
return write_barrier_kind;
}