summaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler/memory-optimizer.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/compiler/memory-optimizer.cc')
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc56
1 files changed, 25 insertions, 31 deletions
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index 3ba3dcc6b8..298a503771 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -67,7 +67,7 @@ MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group)
: group_(group), size_(std::numeric_limits<int>::max()), top_(nullptr) {}
MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group,
- int size, Node* top)
+ intptr_t size, Node* top)
: group_(group), size_(size), top_(top) {}
bool MemoryOptimizer::AllocationState::IsNewSpaceAllocation() const {
@@ -175,27 +175,35 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
// Check if we can fold this allocation into a previous allocation represented
// by the incoming {state}.
- Int32Matcher m(size);
- if (m.HasValue() && m.Value() < kMaxRegularHeapObjectSize) {
- int32_t const object_size = m.Value();
+ IntPtrMatcher m(size);
+ if (m.IsInRange(0, kMaxRegularHeapObjectSize)) {
+ intptr_t const object_size = m.Value();
if (allocation_folding_ == AllocationFolding::kDoAllocationFolding &&
state->size() <= kMaxRegularHeapObjectSize - object_size &&
state->group()->pretenure() == pretenure) {
// We can fold this Allocate {node} into the allocation {group}
// represented by the given {state}. Compute the upper bound for
// the new {state}.
- int32_t const state_size = state->size() + object_size;
+ intptr_t const state_size = state->size() + object_size;
// Update the reservation check to the actual maximum upper bound.
AllocationGroup* const group = state->group();
- if (OpParameter<int32_t>(group->size()->op()) < state_size) {
- NodeProperties::ChangeOp(group->size(),
- common()->Int32Constant(state_size));
+ if (machine()->Is64()) {
+ if (OpParameter<int64_t>(group->size()->op()) < state_size) {
+ NodeProperties::ChangeOp(group->size(),
+ common()->Int64Constant(state_size));
+ }
+ } else {
+ if (OpParameter<int32_t>(group->size()->op()) < state_size) {
+ NodeProperties::ChangeOp(
+ group->size(),
+ common()->Int32Constant(static_cast<int32_t>(state_size)));
+ }
}
// Update the allocation top with the new object allocation.
// TODO(bmeurer): Defer writing back top as much as possible.
- Node* top = __ IntAdd(state->top(), __ IntPtrConstant(object_size));
+ Node* top = __ IntAdd(state->top(), size);
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
top_address, __ IntPtrConstant(0), top);
@@ -213,7 +221,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
// Setup a mutable reservation size node; will be patched as we fold
// additional allocations into this new group.
- Node* size = __ UniqueInt32Constant(object_size);
+ Node* size = __ UniqueIntPtrConstant(object_size);
// Load allocation top and limit.
Node* top =
@@ -223,10 +231,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
// Check if we need to collect garbage before we can start bump pointer
// allocation (always done for folded allocations).
- Node* check = __ UintLessThan(
- __ IntAdd(top,
- machine()->Is64() ? __ ChangeInt32ToInt64(size) : size),
- limit);
+ Node* check = __ UintLessThan(__ IntAdd(top, size), limit);
__ GotoIfNot(check, &call_runtime);
__ Goto(&done, top);
@@ -238,8 +243,9 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
: __
AllocateInOldSpaceStubConstant();
if (!allocate_operator_.is_set()) {
+ auto descriptor = AllocateDescriptor{};
auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), AllocateDescriptor{}, 0,
+ graph()->zone(), descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kCanUseRoots, Operator::kNoThrow);
allocate_operator_.set(common()->Call(call_descriptor));
}
@@ -276,8 +282,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
__ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
// Compute the new top.
- Node* new_top =
- __ IntAdd(top, machine()->Is64() ? __ ChangeInt32ToInt64(size) : size);
+ Node* new_top = __ IntAdd(top, size);
// Check if we can do bump pointer allocation here.
Node* check = __ UintLessThan(new_top, limit);
@@ -294,8 +299,9 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
: __
AllocateInOldSpaceStubConstant();
if (!allocate_operator_.is_set()) {
+ auto descriptor = AllocateDescriptor{};
auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), AllocateDescriptor{}, 0,
+ graph()->zone(), descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kCanUseRoots, Operator::kNoThrow);
allocate_operator_.set(common()->Call(call_descriptor));
}
@@ -421,19 +427,7 @@ void MemoryOptimizer::VisitOtherEffect(Node* node,
EnqueueUses(node, state);
}
-Node* MemoryOptimizer::ComputeIndex(ElementAccess const& access, Node* key) {
- Node* index;
- if (machine()->Is64()) {
- // On 64-bit platforms, we need to feed a Word64 index to the Load and
- // Store operators. Since LoadElement or StoreElement don't do any bounds
- // checking themselves, we can be sure that the {key} was already checked
- // and is in valid range, so we can do the further address computation on
- // Word64 below, which ideally allows us to fuse the address computation
- // with the actual memory access operation on Intel platforms.
- index = graph()->NewNode(machine()->ChangeUint32ToUint64(), key);
- } else {
- index = key;
- }
+Node* MemoryOptimizer::ComputeIndex(ElementAccess const& access, Node* index) {
int const element_size_shift =
ElementSizeLog2Of(access.machine_type.representation());
if (element_size_shift) {