summaryrefslogtreecommitdiff
path: root/deps/v8/src/deoptimizer
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/deoptimizer')
-rw-r--r--deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc21
-rw-r--r--deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc18
-rw-r--r--deps/v8/src/deoptimizer/deoptimize-reason.h1
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.cc23
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.h12
-rw-r--r--deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc21
-rw-r--r--deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc46
-rw-r--r--deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc2
8 files changed, 117 insertions, 27 deletions
diff --git a/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc b/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc
index 2befb70264..45ff06eb70 100644
--- a/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc
@@ -123,6 +123,17 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ vstr(d0, r1, dst_offset);
}
+ // Mark the stack as not iterable for the CPU profiler which won't be able to
+ // walk the stack without the return address.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register is_iterable = temps.Acquire();
+ Register zero = r4;
+ __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
+ __ mov(zero, Operand(0));
+ __ strb(zero, MemOperand(is_iterable));
+ }
+
// Remove the saved registers from the stack.
__ add(sp, sp, Operand(kSavedRegistersAreaSize));
@@ -209,6 +220,15 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// Restore the registers from the stack.
__ ldm(ia_w, sp, restored_regs); // all but pc registers.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register is_iterable = temps.Acquire();
+ Register one = r4;
+ __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
+ __ mov(one, Operand(1));
+ __ strb(one, MemOperand(is_iterable));
+ }
+
// Remove sp, lr and pc.
__ Drop(3);
{
@@ -218,6 +238,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ pop(lr);
__ Jump(scratch);
}
+
__ stop();
}
diff --git a/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc b/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc
index 82ae764e50..17091259d6 100644
--- a/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc
@@ -189,6 +189,15 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
CopyRegListToFrame(masm, x1, FrameDescription::double_registers_offset(),
saved_double_registers, x2, x3, kDoubleRegistersOffset);
+ // Mark the stack as not iterable for the CPU profiler which won't be able to
+ // walk the stack without the return address.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register is_iterable = temps.AcquireX();
+ __ Mov(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
+ __ strb(xzr, MemOperand(is_iterable));
+ }
+
// Remove the saved registers from the stack.
DCHECK_EQ(kSavedRegistersAreaSize % kXRegSize, 0);
__ Drop(kSavedRegistersAreaSize / kXRegSize);
@@ -251,6 +260,15 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
RestoreRegList(masm, saved_double_registers, x1,
FrameDescription::double_registers_offset());
+ {
+ UseScratchRegisterScope temps(masm);
+ Register is_iterable = temps.AcquireX();
+ Register one = x4;
+ __ Mov(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
+ __ Mov(one, Operand(1));
+ __ strb(one, MemOperand(is_iterable));
+ }
+
// TODO(all): ARM copies a lot (if not all) of the last output frame onto the
// stack, then pops it all into registers. Here, we try to load it directly
// into the relevant registers. Is this correct? If so, we should improve the
diff --git a/deps/v8/src/deoptimizer/deoptimize-reason.h b/deps/v8/src/deoptimizer/deoptimize-reason.h
index ac2273460a..71eaa8b626 100644
--- a/deps/v8/src/deoptimizer/deoptimize-reason.h
+++ b/deps/v8/src/deoptimizer/deoptimize-reason.h
@@ -48,7 +48,6 @@ namespace internal {
V(NotASymbol, "not a Symbol") \
V(OutOfBounds, "out of bounds") \
V(Overflow, "overflow") \
- V(ReceiverNotAGlobalProxy, "receiver was not a global proxy") \
V(Smi, "Smi") \
V(Unknown, "(unknown)") \
V(ValueMismatch, "value mismatch") \
diff --git a/deps/v8/src/deoptimizer/deoptimizer.cc b/deps/v8/src/deoptimizer/deoptimizer.cc
index 64551c6899..fcb4c27d0b 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer/deoptimizer.cc
@@ -357,6 +357,9 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(NativeContext native_context) {
for (Code code : codes) {
isolate->heap()->InvalidateCodeDeoptimizationData(code);
}
+
+ native_context.GetOSROptimizedCodeCache().EvictMarkedCode(
+ native_context.GetIsolate());
}
void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
@@ -375,6 +378,7 @@ void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
while (!context.IsUndefined(isolate)) {
NativeContext native_context = NativeContext::cast(context);
MarkAllCodeForContext(native_context);
+ OSROptimizedCodeCache::Clear(native_context);
DeoptimizeMarkedCodeForContext(native_context);
context = native_context.next_context_link();
}
@@ -432,6 +436,13 @@ void Deoptimizer::DeoptimizeFunction(JSFunction function, Code code) {
code.set_deopt_already_counted(true);
}
DeoptimizeMarkedCodeForContext(function.context().native_context());
+ // TODO(mythria): Ideally EvictMarkCode should compact the cache without
+ // having to explicitly call this. We don't do this currently because
+ // compacting causes GC and DeoptimizeMarkedCodeForContext uses raw
+ // pointers. Update DeoptimizeMarkedCodeForContext to use handles and remove
+ // this call from here.
+ OSROptimizedCodeCache::Compact(
+ Handle<NativeContext>(function.context().native_context(), isolate));
}
}
@@ -3640,8 +3651,7 @@ void TranslatedState::EnsurePropertiesAllocatedAndMarked(
// Set markers for the double properties.
Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
- int field_count = map->NumberOfOwnDescriptors();
- for (int i = 0; i < field_count; i++) {
+ for (InternalIndex i : map->IterateOwnDescriptors()) {
FieldIndex index = FieldIndex::ForDescriptor(*map, i);
if (descriptors->GetDetails(i).representation().IsDouble() &&
!index.is_inobject()) {
@@ -3673,10 +3683,9 @@ void TranslatedState::EnsureJSObjectAllocated(TranslatedValue* slot,
Handle<ByteArray> object_storage = AllocateStorageFor(slot);
// Now we handle the interesting (JSObject) case.
Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
- int field_count = map->NumberOfOwnDescriptors();
// Set markers for the double properties.
- for (int i = 0; i < field_count; i++) {
+ for (InternalIndex i : map->IterateOwnDescriptors()) {
FieldIndex index = FieldIndex::ForDescriptor(*map, i);
if (descriptors->GetDetails(i).representation().IsDouble() &&
index.is_inobject()) {
@@ -3712,8 +3721,7 @@ void TranslatedState::InitializeJSObjectAt(
CHECK_GE(slot->GetChildrenCount(), 2);
// Notify the concurrent marker about the layout change.
- isolate()->heap()->NotifyObjectLayoutChange(
- *object_storage, slot->GetChildrenCount() * kTaggedSize, no_allocation);
+ isolate()->heap()->NotifyObjectLayoutChange(*object_storage, no_allocation);
// Fill the property array field.
{
@@ -3772,8 +3780,7 @@ void TranslatedState::InitializeObjectWithTaggedFieldsAt(
}
// Notify the concurrent marker about the layout change.
- isolate()->heap()->NotifyObjectLayoutChange(
- *object_storage, slot->GetChildrenCount() * kTaggedSize, no_allocation);
+ isolate()->heap()->NotifyObjectLayoutChange(*object_storage, no_allocation);
// Write the fields to the object.
for (int i = 1; i < slot->GetChildrenCount(); i++) {
diff --git a/deps/v8/src/deoptimizer/deoptimizer.h b/deps/v8/src/deoptimizer/deoptimizer.h
index 6d0a350aac..beb2a9aa50 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.h
+++ b/deps/v8/src/deoptimizer/deoptimizer.h
@@ -488,14 +488,14 @@ class Deoptimizer : public Malloced {
DeoptimizeKind* type);
// Code generation support.
- static int input_offset() { return OFFSET_OF(Deoptimizer, input_); }
+ static int input_offset() { return offsetof(Deoptimizer, input_); }
static int output_count_offset() {
- return OFFSET_OF(Deoptimizer, output_count_);
+ return offsetof(Deoptimizer, output_count_);
}
- static int output_offset() { return OFFSET_OF(Deoptimizer, output_); }
+ static int output_offset() { return offsetof(Deoptimizer, output_); }
static int caller_frame_top_offset() {
- return OFFSET_OF(Deoptimizer, caller_frame_top_);
+ return offsetof(Deoptimizer, caller_frame_top_);
}
V8_EXPORT_PRIVATE static int GetDeoptimizedCodeCount(Isolate* isolate);
@@ -731,11 +731,11 @@ class FrameDescription {
int parameter_count() { return parameter_count_; }
static int registers_offset() {
- return OFFSET_OF(FrameDescription, register_values_.registers_);
+ return offsetof(FrameDescription, register_values_.registers_);
}
static int double_registers_offset() {
- return OFFSET_OF(FrameDescription, register_values_.double_registers_);
+ return offsetof(FrameDescription, register_values_.double_registers_);
}
static int frame_size_offset() {
diff --git a/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc b/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
index 864e9dbe36..4036b73443 100644
--- a/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
+++ b/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
@@ -113,6 +113,17 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ stfd(d0, MemOperand(r4, dst_offset));
}
+ // Mark the stack as not iterable for the CPU profiler which won't be able to
+ // walk the stack without the return address.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register is_iterable = temps.Acquire();
+ Register zero = r7;
+ __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
+ __ li(zero, Operand(0));
+ __ stb(zero, MemOperand(is_iterable));
+ }
+
// Remove the saved registers from the stack.
__ addi(sp, sp, Operand(kSavedRegistersAreaSize));
@@ -210,12 +221,22 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
{
UseScratchRegisterScope temps(masm);
+ Register is_iterable = temps.Acquire();
+ Register one = r7;
+ __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
+ __ li(one, Operand(1));
+ __ stb(one, MemOperand(is_iterable));
+ }
+
+ {
+ UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ pop(scratch); // get continuation, leave pc on stack
__ pop(r0);
__ mtlr(r0);
__ Jump(scratch);
}
+
__ stop();
}
diff --git a/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc b/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc
index 616a57ba0e..7ea6e56b8c 100644
--- a/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc
+++ b/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc
@@ -40,7 +40,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
}
// Push all GPRs onto the stack
- __ lay(sp, MemOperand(sp, -kNumberOfRegisters * kPointerSize));
+ __ lay(sp, MemOperand(sp, -kNumberOfRegisters * kSystemPointerSize));
__ StoreMultipleP(r0, sp, MemOperand(sp)); // Save all 16 registers
__ mov(r1, Operand(ExternalReference::Create(
@@ -48,7 +48,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ StoreP(fp, MemOperand(r1));
const int kSavedRegistersAreaSize =
- (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
+ (kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize;
// The bailout id is passed using r10
__ LoadRR(r4, r10);
@@ -79,7 +79,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// r6: Fp-to-sp delta.
// Parm6: isolate is passed on the stack.
__ mov(r7, Operand(ExternalReference::isolate_address(isolate)));
- __ StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
+ __ StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
// Call Deoptimizer::New().
{
@@ -94,13 +94,14 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// Copy core registers into FrameDescription::registers_[kNumRegisters].
// DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
// __ mvc(MemOperand(r3, FrameDescription::registers_offset()),
- // MemOperand(sp), kNumberOfRegisters * kPointerSize);
+ // MemOperand(sp), kNumberOfRegisters * kSystemPointerSize);
// Copy core registers into FrameDescription::registers_[kNumRegisters].
// TODO(john.yan): optimize the following code by using mvc instruction
DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ LoadP(r4, MemOperand(sp, i * kPointerSize));
+ int offset =
+ (i * kSystemPointerSize) + FrameDescription::registers_offset();
+ __ LoadP(r4, MemOperand(sp, i * kSystemPointerSize));
__ StoreP(r4, MemOperand(r3, offset));
}
@@ -110,12 +111,24 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
int dst_offset = code * kDoubleSize + double_regs_offset;
- int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ int src_offset =
+ code * kDoubleSize + kNumberOfRegisters * kSystemPointerSize;
// TODO(joransiu): MVC opportunity
__ LoadDouble(d0, MemOperand(sp, src_offset));
__ StoreDouble(d0, MemOperand(r3, dst_offset));
}
+ // Mark the stack as not iterable for the CPU profiler which won't be able to
+ // walk the stack without the return address.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register is_iterable = temps.Acquire();
+ Register zero = r6;
+ __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
+ __ lhi(zero, Operand(0));
+ __ StoreByte(zero, MemOperand(is_iterable));
+ }
+
// Remove the saved registers from the stack.
__ la(sp, MemOperand(sp, kSavedRegistersAreaSize));
@@ -134,7 +147,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ bind(&pop_loop);
__ pop(r6);
__ StoreP(r6, MemOperand(r5, 0));
- __ la(r5, MemOperand(r5, kPointerSize));
+ __ la(r5, MemOperand(r5, kSystemPointerSize));
__ bind(&pop_loop_header);
__ CmpP(r4, sp);
__ bne(&pop_loop);
@@ -158,7 +171,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// r3 = one past the last FrameDescription**.
__ LoadlW(r3, MemOperand(r2, Deoptimizer::output_count_offset()));
__ LoadP(r6, MemOperand(r2, Deoptimizer::output_offset())); // r6 is output_.
- __ ShiftLeftP(r3, r3, Operand(kPointerSizeLog2));
+ __ ShiftLeftP(r3, r3, Operand(kSystemPointerSizeLog2));
__ AddP(r3, r6, r3);
__ b(&outer_loop_header, Label::kNear);
@@ -178,7 +191,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ CmpP(r5, Operand::Zero());
__ bne(&inner_push_loop); // test for gt?
- __ AddP(r6, r6, Operand(kPointerSize));
+ __ AddP(r6, r6, Operand(kSystemPointerSize));
__ bind(&outer_loop_header);
__ CmpP(r6, r3);
__ blt(&outer_push_loop);
@@ -200,15 +213,26 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// Restore the registers from the last output frame.
__ LoadRR(r1, r4);
for (int i = kNumberOfRegisters - 1; i > 0; i--) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ int offset =
+ (i * kSystemPointerSize) + FrameDescription::registers_offset();
if ((restored_regs & (1 << i)) != 0) {
__ LoadP(ToRegister(i), MemOperand(r1, offset));
}
}
+ {
+ UseScratchRegisterScope temps(masm);
+ Register is_iterable = temps.Acquire();
+ Register one = r6;
+ __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
+ __ lhi(one, Operand(1));
+ __ StoreByte(one, MemOperand(is_iterable));
+ }
+
__ pop(ip); // get continuation, leave pc on stack
__ pop(r14);
__ Jump(ip);
+
__ stop();
}
diff --git a/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc b/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc
index 29c81f195c..03d7c759c0 100644
--- a/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc
@@ -81,7 +81,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// On windows put the arguments on the stack (PrepareCallCFunction
// has created space for this). On linux pass the arguments in r8 and r9.
-#ifdef _WIN64
+#ifdef V8_TARGET_OS_WIN
__ movq(Operand(rsp, 4 * kSystemPointerSize), arg5);
__ LoadAddress(arg5, ExternalReference::isolate_address(isolate));
__ movq(Operand(rsp, 5 * kSystemPointerSize), arg5);