diff options
Diffstat (limited to 'deps/v8/src/compiler/memory-optimizer.cc')
-rw-r--r-- | deps/v8/src/compiler/memory-optimizer.cc | 546 |
1 files changed, 79 insertions, 467 deletions
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc index 8684f2ce3c..6527dfb287 100644 --- a/deps/v8/src/compiler/memory-optimizer.cc +++ b/deps/v8/src/compiler/memory-optimizer.cc @@ -11,90 +11,12 @@ #include "src/compiler/node-matchers.h" #include "src/compiler/node-properties.h" #include "src/compiler/node.h" -#include "src/compiler/simplified-operator.h" #include "src/roots/roots-inl.h" namespace v8 { namespace internal { namespace compiler { -MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone, - PoisoningMitigationLevel poisoning_level, - AllocationFolding allocation_folding, - const char* function_debug_name, - TickCounter* tick_counter) - : jsgraph_(jsgraph), - empty_state_(AllocationState::Empty(zone)), - pending_(zone), - tokens_(zone), - zone_(zone), - graph_assembler_(jsgraph, nullptr, nullptr, zone), - poisoning_level_(poisoning_level), - allocation_folding_(allocation_folding), - function_debug_name_(function_debug_name), - tick_counter_(tick_counter) {} - -void MemoryOptimizer::Optimize() { - EnqueueUses(graph()->start(), empty_state()); - while (!tokens_.empty()) { - Token const token = tokens_.front(); - tokens_.pop(); - VisitNode(token.node, token.state); - } - DCHECK(pending_.empty()); - DCHECK(tokens_.empty()); -} - -MemoryOptimizer::AllocationGroup::AllocationGroup(Node* node, - AllocationType allocation, - Zone* zone) - : node_ids_(zone), allocation_(allocation), size_(nullptr) { - node_ids_.insert(node->id()); -} - -MemoryOptimizer::AllocationGroup::AllocationGroup(Node* node, - AllocationType allocation, - Node* size, Zone* zone) - : node_ids_(zone), allocation_(allocation), size_(size) { - node_ids_.insert(node->id()); -} - -void MemoryOptimizer::AllocationGroup::Add(Node* node) { - node_ids_.insert(node->id()); -} - -bool MemoryOptimizer::AllocationGroup::Contains(Node* node) const { - // Additions should stay within the same allocated object, so it's safe to - // ignore them. - while (node_ids_.find(node->id()) == node_ids_.end()) { - switch (node->opcode()) { - case IrOpcode::kBitcastTaggedToWord: - case IrOpcode::kBitcastWordToTagged: - case IrOpcode::kInt32Add: - case IrOpcode::kInt64Add: - node = NodeProperties::GetValueInput(node, 0); - break; - default: - return false; - } - } - return true; -} - -MemoryOptimizer::AllocationState::AllocationState() - : group_(nullptr), size_(std::numeric_limits<int>::max()), top_(nullptr) {} - -MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group) - : group_(group), size_(std::numeric_limits<int>::max()), top_(nullptr) {} - -MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group, - intptr_t size, Node* top) - : group_(group), size_(size), top_(top) {} - -bool MemoryOptimizer::AllocationState::IsYoungGenerationAllocation() const { - return group() && group()->IsYoungGenerationAllocation(); -} - namespace { bool CanAllocate(const Node* node) { @@ -221,8 +143,67 @@ Node* EffectPhiForPhi(Node* phi) { return nullptr; } +void WriteBarrierAssertFailed(Node* node, Node* object, const char* name, + Zone* temp_zone) { + std::stringstream str; + str << "MemoryOptimizer could not remove write barrier for node #" + << node->id() << "\n"; + str << " Run mksnapshot with --csa-trap-on-node=" << name << "," + << node->id() << " to break in CSA code.\n"; + Node* object_position = object; + if (object_position->opcode() == IrOpcode::kPhi) { + object_position = EffectPhiForPhi(object_position); + } + Node* allocating_node = nullptr; + if (object_position && object_position->op()->EffectOutputCount() > 0) { + allocating_node = SearchAllocatingNode(node, object_position, temp_zone); + } + if (allocating_node) { + str << "\n There is a potentially allocating node in between:\n"; + str << " " << *allocating_node << "\n"; + str << " Run mksnapshot with --csa-trap-on-node=" << name << "," + << allocating_node->id() << " to break there.\n"; + if (allocating_node->opcode() == IrOpcode::kCall) { + str << " If this is a never-allocating runtime call, you can add an " + "exception to Runtime::MayAllocate.\n"; + } + } else { + str << "\n It seems the store happened to something different than a " + "direct " + "allocation:\n"; + str << " " << *object << "\n"; + str << " Run mksnapshot with --csa-trap-on-node=" << name << "," + << object->id() << " to break there.\n"; + } + FATAL("%s", str.str().c_str()); +} + } // namespace +MemoryOptimizer::MemoryOptimizer( + JSGraph* jsgraph, Zone* zone, PoisoningMitigationLevel poisoning_level, + MemoryLowering::AllocationFolding allocation_folding, + const char* function_debug_name, TickCounter* tick_counter) + : memory_lowering_(jsgraph, zone, poisoning_level, allocation_folding, + WriteBarrierAssertFailed, function_debug_name), + jsgraph_(jsgraph), + empty_state_(AllocationState::Empty(zone)), + pending_(zone), + tokens_(zone), + zone_(zone), + tick_counter_(tick_counter) {} + +void MemoryOptimizer::Optimize() { + EnqueueUses(graph()->start(), empty_state()); + while (!tokens_.empty()) { + Token const token = tokens_.front(); + tokens_.pop(); + VisitNode(token.node, token.state); + } + DCHECK(pending_.empty()); + DCHECK(tokens_.empty()); +} + void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) { tick_counter_->DoTick(); DCHECK(!node->IsDead()); @@ -259,8 +240,6 @@ void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) { DCHECK_EQ(0, node->op()->EffectOutputCount()); } -#define __ gasm()-> - bool MemoryOptimizer::AllocationTypeNeedsUpdateToOld(Node* const node, const Edge edge) { if (COMPRESS_POINTERS_BOOL && IrOpcode::IsCompressOpcode(node->opcode())) { @@ -293,13 +272,6 @@ bool MemoryOptimizer::AllocationTypeNeedsUpdateToOld(Node* const node, void MemoryOptimizer::VisitAllocateRaw(Node* node, AllocationState const* state) { DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode()); - Node* value; - Node* size = node->InputAt(0); - Node* effect = node->InputAt(1); - Node* control = node->InputAt(2); - - gasm()->Reset(effect, control); - const AllocateParameters& allocation = AllocateParametersOf(node->op()); AllocationType allocation_type = allocation.allocation_type(); @@ -310,7 +282,6 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node, if (allocation_type == AllocationType::kOld) { for (Edge const edge : node->use_edges()) { Node* const user = edge.from(); - if (user->opcode() == IrOpcode::kStoreField && edge.index() == 0) { Node* child = user->InputAt(1); // In Pointer Compression we might have a Compress node between an @@ -339,299 +310,62 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node, } } - Node* allocate_builtin; - if (allocation_type == AllocationType::kYoung) { - if (allocation.allow_large_objects() == AllowLargeObjects::kTrue) { - allocate_builtin = __ AllocateInYoungGenerationStubConstant(); - } else { - allocate_builtin = __ AllocateRegularInYoungGenerationStubConstant(); - } - } else { - if (allocation.allow_large_objects() == AllowLargeObjects::kTrue) { - allocate_builtin = __ AllocateInOldGenerationStubConstant(); - } else { - allocate_builtin = __ AllocateRegularInOldGenerationStubConstant(); - } - } - - // Determine the top/limit addresses. - Node* top_address = __ ExternalConstant( - allocation_type == AllocationType::kYoung - ? ExternalReference::new_space_allocation_top_address(isolate()) - : ExternalReference::old_space_allocation_top_address(isolate())); - Node* limit_address = __ ExternalConstant( - allocation_type == AllocationType::kYoung - ? ExternalReference::new_space_allocation_limit_address(isolate()) - : ExternalReference::old_space_allocation_limit_address(isolate())); - - // Check if we can fold this allocation into a previous allocation represented - // by the incoming {state}. - IntPtrMatcher m(size); - if (m.IsInRange(0, kMaxRegularHeapObjectSize) && FLAG_inline_new) { - intptr_t const object_size = m.Value(); - if (allocation_folding_ == AllocationFolding::kDoAllocationFolding && - state->size() <= kMaxRegularHeapObjectSize - object_size && - state->group()->allocation() == allocation_type) { - // We can fold this Allocate {node} into the allocation {group} - // represented by the given {state}. Compute the upper bound for - // the new {state}. - intptr_t const state_size = state->size() + object_size; - - // Update the reservation check to the actual maximum upper bound. - AllocationGroup* const group = state->group(); - if (machine()->Is64()) { - if (OpParameter<int64_t>(group->size()->op()) < state_size) { - NodeProperties::ChangeOp(group->size(), - common()->Int64Constant(state_size)); - } - } else { - if (OpParameter<int32_t>(group->size()->op()) < state_size) { - NodeProperties::ChangeOp( - group->size(), - common()->Int32Constant(static_cast<int32_t>(state_size))); - } - } - - // Update the allocation top with the new object allocation. - // TODO(bmeurer): Defer writing back top as much as possible. - Node* top = __ IntAdd(state->top(), size); - __ Store(StoreRepresentation(MachineType::PointerRepresentation(), - kNoWriteBarrier), - top_address, __ IntPtrConstant(0), top); - - // Compute the effective inner allocated address. - value = __ BitcastWordToTagged( - __ IntAdd(state->top(), __ IntPtrConstant(kHeapObjectTag))); - - // Extend the allocation {group}. - group->Add(value); - state = AllocationState::Open(group, state_size, top, zone()); - } else { - auto call_runtime = __ MakeDeferredLabel(); - auto done = __ MakeLabel(MachineType::PointerRepresentation()); - - // Setup a mutable reservation size node; will be patched as we fold - // additional allocations into this new group. - Node* size = __ UniqueIntPtrConstant(object_size); - - // Load allocation top and limit. - Node* top = - __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0)); - Node* limit = - __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0)); - - // Check if we need to collect garbage before we can start bump pointer - // allocation (always done for folded allocations). - Node* check = __ UintLessThan(__ IntAdd(top, size), limit); - - __ GotoIfNot(check, &call_runtime); - __ Goto(&done, top); - - __ Bind(&call_runtime); - { - if (!allocate_operator_.is_set()) { - auto descriptor = AllocateDescriptor{}; - auto call_descriptor = Linkage::GetStubCallDescriptor( - graph()->zone(), descriptor, descriptor.GetStackParameterCount(), - CallDescriptor::kCanUseRoots, Operator::kNoThrow); - allocate_operator_.set(common()->Call(call_descriptor)); - } - Node* vfalse = __ BitcastTaggedToWord( - __ Call(allocate_operator_.get(), allocate_builtin, size)); - vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag)); - __ Goto(&done, vfalse); - } - - __ Bind(&done); - - // Compute the new top and write it back. - top = __ IntAdd(done.PhiAt(0), __ IntPtrConstant(object_size)); - __ Store(StoreRepresentation(MachineType::PointerRepresentation(), - kNoWriteBarrier), - top_address, __ IntPtrConstant(0), top); - - // Compute the initial object address. - value = __ BitcastWordToTagged( - __ IntAdd(done.PhiAt(0), __ IntPtrConstant(kHeapObjectTag))); - - // Start a new allocation group. - AllocationGroup* group = - new (zone()) AllocationGroup(value, allocation_type, size, zone()); - state = AllocationState::Open(group, object_size, top, zone()); - } - } else { - auto call_runtime = __ MakeDeferredLabel(); - auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer); - - // Load allocation top and limit. - Node* top = - __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0)); - Node* limit = - __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0)); - - // Compute the new top. - Node* new_top = __ IntAdd(top, size); - - // Check if we can do bump pointer allocation here. - Node* check = __ UintLessThan(new_top, limit); - __ GotoIfNot(check, &call_runtime); - if (allocation.allow_large_objects() == AllowLargeObjects::kTrue) { - __ GotoIfNot( - __ UintLessThan(size, __ IntPtrConstant(kMaxRegularHeapObjectSize)), - &call_runtime); - } - __ Store(StoreRepresentation(MachineType::PointerRepresentation(), - kNoWriteBarrier), - top_address, __ IntPtrConstant(0), new_top); - __ Goto(&done, __ BitcastWordToTagged( - __ IntAdd(top, __ IntPtrConstant(kHeapObjectTag)))); - - __ Bind(&call_runtime); - if (!allocate_operator_.is_set()) { - auto descriptor = AllocateDescriptor{}; - auto call_descriptor = Linkage::GetStubCallDescriptor( - graph()->zone(), descriptor, descriptor.GetStackParameterCount(), - CallDescriptor::kCanUseRoots, Operator::kNoThrow); - allocate_operator_.set(common()->Call(call_descriptor)); - } - __ Goto(&done, __ Call(allocate_operator_.get(), allocate_builtin, size)); - - __ Bind(&done); - value = done.PhiAt(0); - - // Create an unfoldable allocation group. - AllocationGroup* group = - new (zone()) AllocationGroup(value, allocation_type, zone()); - state = AllocationState::Closed(group, zone()); - } - - effect = __ ExtractCurrentEffect(); - control = __ ExtractCurrentControl(); - - // Replace all effect uses of {node} with the {effect}, enqueue the - // effect uses for further processing, and replace all value uses of - // {node} with the {value}. - for (Edge edge : node->use_edges()) { - if (NodeProperties::IsEffectEdge(edge)) { - EnqueueUse(edge.from(), edge.index(), state); - edge.UpdateTo(effect); - } else if (NodeProperties::IsValueEdge(edge)) { - edge.UpdateTo(value); - } else { - DCHECK(NodeProperties::IsControlEdge(edge)); - edge.UpdateTo(control); - } - } - - // Kill the {node} to make sure we don't leave dangling dead uses. - node->Kill(); + memory_lowering()->ReduceAllocateRaw( + node, allocation_type, allocation.allow_large_objects(), &state); + EnqueueUses(state->effect(), state); } void MemoryOptimizer::VisitLoadFromObject(Node* node, AllocationState const* state) { DCHECK_EQ(IrOpcode::kLoadFromObject, node->opcode()); - ObjectAccess const& access = ObjectAccessOf(node->op()); - NodeProperties::ChangeOp(node, machine()->Load(access.machine_type)); + memory_lowering()->ReduceLoadFromObject(node); EnqueueUses(node, state); } void MemoryOptimizer::VisitStoreToObject(Node* node, AllocationState const* state) { DCHECK_EQ(IrOpcode::kStoreToObject, node->opcode()); - ObjectAccess const& access = ObjectAccessOf(node->op()); - Node* object = node->InputAt(0); - Node* value = node->InputAt(2); - WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind( - node, object, value, state, access.write_barrier_kind); - NodeProperties::ChangeOp( - node, machine()->Store(StoreRepresentation( - access.machine_type.representation(), write_barrier_kind))); - EnqueueUses(node, state); -} - -#undef __ - -void MemoryOptimizer::VisitCall(Node* node, AllocationState const* state) { - DCHECK_EQ(IrOpcode::kCall, node->opcode()); - // If the call can allocate, we start with a fresh state. - if (!(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate)) { - state = empty_state(); - } + memory_lowering()->ReduceStoreToObject(node, state); EnqueueUses(node, state); } void MemoryOptimizer::VisitLoadElement(Node* node, AllocationState const* state) { DCHECK_EQ(IrOpcode::kLoadElement, node->opcode()); - ElementAccess const& access = ElementAccessOf(node->op()); - Node* index = node->InputAt(1); - node->ReplaceInput(1, ComputeIndex(access, index)); - MachineType type = access.machine_type; - if (NeedsPoisoning(access.load_sensitivity)) { - NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type)); - } else { - NodeProperties::ChangeOp(node, machine()->Load(type)); - } + memory_lowering()->ReduceLoadElement(node); EnqueueUses(node, state); } void MemoryOptimizer::VisitLoadField(Node* node, AllocationState const* state) { DCHECK_EQ(IrOpcode::kLoadField, node->opcode()); - FieldAccess const& access = FieldAccessOf(node->op()); - Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag()); - node->InsertInput(graph()->zone(), 1, offset); - MachineType type = access.machine_type; - if (NeedsPoisoning(access.load_sensitivity)) { - NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type)); - } else { - NodeProperties::ChangeOp(node, machine()->Load(type)); - } + memory_lowering()->ReduceLoadField(node); EnqueueUses(node, state); } void MemoryOptimizer::VisitStoreElement(Node* node, AllocationState const* state) { DCHECK_EQ(IrOpcode::kStoreElement, node->opcode()); - ElementAccess const& access = ElementAccessOf(node->op()); - Node* object = node->InputAt(0); - Node* index = node->InputAt(1); - Node* value = node->InputAt(2); - WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind( - node, object, value, state, access.write_barrier_kind); - node->ReplaceInput(1, ComputeIndex(access, index)); - NodeProperties::ChangeOp( - node, machine()->Store(StoreRepresentation( - access.machine_type.representation(), write_barrier_kind))); + memory_lowering()->ReduceStoreElement(node, state); EnqueueUses(node, state); } void MemoryOptimizer::VisitStoreField(Node* node, AllocationState const* state) { DCHECK_EQ(IrOpcode::kStoreField, node->opcode()); - FieldAccess const& access = FieldAccessOf(node->op()); - Node* object = node->InputAt(0); - Node* value = node->InputAt(1); - WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind( - node, object, value, state, access.write_barrier_kind); - Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag()); - node->InsertInput(graph()->zone(), 1, offset); - NodeProperties::ChangeOp( - node, machine()->Store(StoreRepresentation( - access.machine_type.representation(), write_barrier_kind))); + memory_lowering()->ReduceStoreField(node, state); EnqueueUses(node, state); } - void MemoryOptimizer::VisitStore(Node* node, AllocationState const* state) { DCHECK_EQ(IrOpcode::kStore, node->opcode()); - StoreRepresentation representation = StoreRepresentationOf(node->op()); - Node* object = node->InputAt(0); - Node* value = node->InputAt(2); - WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind( - node, object, value, state, representation.write_barrier_kind()); - if (write_barrier_kind != representation.write_barrier_kind()) { - NodeProperties::ChangeOp( - node, machine()->Store(StoreRepresentation( - representation.representation(), write_barrier_kind))); + memory_lowering()->ReduceStore(node, state); + EnqueueUses(node, state); +} + +void MemoryOptimizer::VisitCall(Node* node, AllocationState const* state) { + DCHECK_EQ(IrOpcode::kCall, node->opcode()); + // If the call can allocate, we start with a fresh state. + if (!(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate)) { + state = empty_state(); } EnqueueUses(node, state); } @@ -641,109 +375,12 @@ void MemoryOptimizer::VisitOtherEffect(Node* node, EnqueueUses(node, state); } -Node* MemoryOptimizer::ComputeIndex(ElementAccess const& access, Node* index) { - int const element_size_shift = - ElementSizeLog2Of(access.machine_type.representation()); - if (element_size_shift) { - index = graph()->NewNode(machine()->WordShl(), index, - jsgraph()->IntPtrConstant(element_size_shift)); - } - int const fixed_offset = access.header_size - access.tag(); - if (fixed_offset) { - index = graph()->NewNode(machine()->IntAdd(), index, - jsgraph()->IntPtrConstant(fixed_offset)); - } - return index; -} - -namespace { - -bool ValueNeedsWriteBarrier(Node* value, Isolate* isolate) { - while (true) { - switch (value->opcode()) { - case IrOpcode::kBitcastWordToTaggedSigned: - case IrOpcode::kChangeTaggedSignedToCompressedSigned: - case IrOpcode::kChangeTaggedToCompressedSigned: - return false; - case IrOpcode::kChangeTaggedPointerToCompressedPointer: - case IrOpcode::kChangeTaggedToCompressed: - value = NodeProperties::GetValueInput(value, 0); - continue; - case IrOpcode::kHeapConstant: { - RootIndex root_index; - if (isolate->roots_table().IsRootHandle(HeapConstantOf(value->op()), - &root_index) && - RootsTable::IsImmortalImmovable(root_index)) { - return false; - } - break; - } - default: - break; - } - return true; - } -} - -void WriteBarrierAssertFailed(Node* node, Node* object, const char* name, - Zone* temp_zone) { - std::stringstream str; - str << "MemoryOptimizer could not remove write barrier for node #" - << node->id() << "\n"; - str << " Run mksnapshot with --csa-trap-on-node=" << name << "," - << node->id() << " to break in CSA code.\n"; - Node* object_position = object; - if (object_position->opcode() == IrOpcode::kPhi) { - object_position = EffectPhiForPhi(object_position); - } - Node* allocating_node = nullptr; - if (object_position && object_position->op()->EffectOutputCount() > 0) { - allocating_node = SearchAllocatingNode(node, object_position, temp_zone); - } - if (allocating_node) { - str << "\n There is a potentially allocating node in between:\n"; - str << " " << *allocating_node << "\n"; - str << " Run mksnapshot with --csa-trap-on-node=" << name << "," - << allocating_node->id() << " to break there.\n"; - if (allocating_node->opcode() == IrOpcode::kCall) { - str << " If this is a never-allocating runtime call, you can add an " - "exception to Runtime::MayAllocate.\n"; - } - } else { - str << "\n It seems the store happened to something different than a " - "direct " - "allocation:\n"; - str << " " << *object << "\n"; - str << " Run mksnapshot with --csa-trap-on-node=" << name << "," - << object->id() << " to break there.\n"; - } - FATAL("%s", str.str().c_str()); -} - -} // namespace - -WriteBarrierKind MemoryOptimizer::ComputeWriteBarrierKind( - Node* node, Node* object, Node* value, AllocationState const* state, - WriteBarrierKind write_barrier_kind) { - if (state->IsYoungGenerationAllocation() && - state->group()->Contains(object)) { - write_barrier_kind = kNoWriteBarrier; - } - if (!ValueNeedsWriteBarrier(value, isolate())) { - write_barrier_kind = kNoWriteBarrier; - } - if (write_barrier_kind == WriteBarrierKind::kAssertNoWriteBarrier) { - WriteBarrierAssertFailed(node, object, function_debug_name_, zone()); - } - return write_barrier_kind; -} - MemoryOptimizer::AllocationState const* MemoryOptimizer::MergeStates( AllocationStates const& states) { // Check if all states are the same; or at least if all allocation // states belong to the same allocation group. AllocationState const* state = states.front(); - AllocationGroup* group = state->group(); + MemoryLowering::AllocationGroup* group = state->group(); for (size_t i = 1; i < states.size(); ++i) { if (states[i] != state) state = nullptr; if (states[i]->group() != group) group = nullptr; @@ -755,7 +392,7 @@ MemoryOptimizer::AllocationState const* MemoryOptimizer::MergeStates( // TODO(bmeurer): We could potentially just create a Phi here to merge // the various tops; but we need to pay special attention not to create // an unschedulable graph. - state = AllocationState::Closed(group, zone()); + state = AllocationState::Closed(group, nullptr, zone()); } else { // The states are from different allocation groups. state = empty_state(); @@ -830,31 +467,6 @@ void MemoryOptimizer::EnqueueUse(Node* node, int index, Graph* MemoryOptimizer::graph() const { return jsgraph()->graph(); } -Isolate* MemoryOptimizer::isolate() const { return jsgraph()->isolate(); } - -CommonOperatorBuilder* MemoryOptimizer::common() const { - return jsgraph()->common(); -} - -MachineOperatorBuilder* MemoryOptimizer::machine() const { - return jsgraph()->machine(); -} - -bool MemoryOptimizer::NeedsPoisoning(LoadSensitivity load_sensitivity) const { - // Safe loads do not need poisoning. - if (load_sensitivity == LoadSensitivity::kSafe) return false; - - switch (poisoning_level_) { - case PoisoningMitigationLevel::kDontPoison: - return false; - case PoisoningMitigationLevel::kPoisonAll: - return true; - case PoisoningMitigationLevel::kPoisonCriticalOnly: - return load_sensitivity == LoadSensitivity::kCritical; - } - UNREACHABLE(); -} - } // namespace compiler } // namespace internal } // namespace v8 |