summaryrefslogtreecommitdiff
path: root/deps/v8/src/builtins/ppc/builtins-ppc.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/builtins/ppc/builtins-ppc.cc')
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc210
1 files changed, 102 insertions, 108 deletions
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index 485b793395..ab0c7900d5 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -863,9 +863,11 @@ void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
__ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
}
-static void ReplaceClosureCodeWithOptimizedCode(
- MacroAssembler* masm, Register optimized_code, Register closure,
- Register scratch1, Register scratch2, Register scratch3) {
+static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
+ Register optimized_code,
+ Register closure,
+ Register scratch1,
+ Register scratch2) {
// Store code entry in the closure.
__ StoreP(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset),
r0);
@@ -902,100 +904,73 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
__ bind(&no_match);
}
-static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
- Register feedback_vector,
- Register scratch1, Register scratch2,
- Register scratch3) {
+static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
+ Register optimized_code_entry,
+ Register scratch) {
// ----------- S t a t e -------------
// -- r6 : new target (preserved for callee if needed, and caller)
// -- r4 : target function (preserved for callee if needed, and caller)
- // -- feedback vector (preserved for caller if needed)
// -----------------------------------
- DCHECK(!AreAliased(feedback_vector, r4, r6, scratch1, scratch2, scratch3));
-
- Label optimized_code_slot_is_weak_ref, fallthrough;
+ DCHECK(!AreAliased(r4, r6, optimized_code_entry, scratch));
Register closure = r4;
- Register optimized_code_entry = scratch1;
-
- __ LoadP(
- optimized_code_entry,
- FieldMemOperand(feedback_vector,
- FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
-
- // Check if the code entry is a Smi. If yes, we interpret it as an
- // optimisation marker. Otherwise, interpret it as a weak reference to a code
- // object.
- __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
- {
- // Optimized code slot is a Smi optimization marker.
-
- // Fall through if no optimization trigger.
- __ CmpSmiLiteral(optimized_code_entry,
- Smi::FromEnum(OptimizationMarker::kNone), r0);
- __ beq(&fallthrough);
-
- TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
- OptimizationMarker::kLogFirstExecution,
- Runtime::kFunctionFirstExecution);
- TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
- OptimizationMarker::kCompileOptimized,
- Runtime::kCompileOptimized_NotConcurrent);
- TailCallRuntimeIfMarkerEquals(
- masm, optimized_code_entry,
- OptimizationMarker::kCompileOptimizedConcurrent,
- Runtime::kCompileOptimized_Concurrent);
-
- {
- // Otherwise, the marker is InOptimizationQueue, so fall through hoping
- // that an interrupt will eventually update the slot with optimized code.
- if (FLAG_debug_code) {
- __ CmpSmiLiteral(
- optimized_code_entry,
- Smi::FromEnum(OptimizationMarker::kInOptimizationQueue), r0);
- __ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
- }
- __ b(&fallthrough);
- }
- }
+ // Check if the optimized code is marked for deopt. If it is, call the
+ // runtime to clear it.
+ Label found_deoptimized_code;
+ __ LoadP(scratch, FieldMemOperand(optimized_code_entry,
+ Code::kCodeDataContainerOffset));
+ __ LoadWordArith(
+ scratch,
+ FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
+ __ TestBit(scratch, Code::kMarkedForDeoptimizationBit, r0);
+ __ bne(&found_deoptimized_code, cr0);
+
+ // Optimized code is good, get it into the closure and link the closure
+ // into the optimized functions list, then tail call the optimized code.
+ ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
+ scratch, r8);
+ static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
+ __ LoadCodeObjectEntry(r5, optimized_code_entry);
+ __ Jump(r5);
- {
- // Optimized code slot is a weak reference.
- __ bind(&optimized_code_slot_is_weak_ref);
-
- __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough);
-
- // Check if the optimized code is marked for deopt. If it is, call the
- // runtime to clear it.
- Label found_deoptimized_code;
- __ LoadP(scratch2, FieldMemOperand(optimized_code_entry,
- Code::kCodeDataContainerOffset));
- __ LoadWordArith(
- scratch2,
- FieldMemOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset));
- __ TestBit(scratch2, Code::kMarkedForDeoptimizationBit, r0);
- __ bne(&found_deoptimized_code, cr0);
-
- // Optimized code is good, get it into the closure and link the closure into
- // the optimized functions list, then tail call the optimized code.
- // The feedback vector is no longer used, so re-use it as a scratch
- // register.
- ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
- scratch2, scratch3, feedback_vector);
- static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
- __ LoadCodeObjectEntry(r5, optimized_code_entry);
- __ Jump(r5);
+ // Optimized code slot contains deoptimized code, evict it and re-enter
+ // the closure's code.
+ __ bind(&found_deoptimized_code);
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+}
- // Optimized code slot contains deoptimized code, evict it and re-enter the
- // closure's code.
- __ bind(&found_deoptimized_code);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
+ Register optimization_marker) {
+ // ----------- S t a t e -------------
+ // -- r6 : new target (preserved for callee if needed, and caller)
+ // -- r4 : target function (preserved for callee if needed, and caller)
+ // -- feedback vector (preserved for caller if needed)
+ // -- optimization_marker : a Smi containing a non-zero optimization marker.
+ // -----------------------------------
+ DCHECK(!AreAliased(feedback_vector, r4, r6, optimization_marker));
+
+ // TODO(v8:8394): The logging of first execution will break if
+ // feedback vectors are not allocated. We need to find a different way of
+ // logging these events if required.
+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
+ OptimizationMarker::kLogFirstExecution,
+ Runtime::kFunctionFirstExecution);
+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
+ OptimizationMarker::kCompileOptimized,
+ Runtime::kCompileOptimized_NotConcurrent);
+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
+ OptimizationMarker::kCompileOptimizedConcurrent,
+ Runtime::kCompileOptimized_Concurrent);
+
+ // Otherwise, the marker is InOptimizationQueue, so fall through hoping
+ // that an interrupt will eventually update the slot with optimized code.
+ if (FLAG_debug_code) {
+ __ CmpSmiLiteral(optimization_marker,
+ Smi::FromEnum(OptimizationMarker::kInOptimizationQueue),
+ r0);
+ __ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
}
-
- // Fall-through if the optimized code cell is clear and there is no
- // optimization marker.
- __ bind(&fallthrough);
}
// Advance the current bytecode offset. This simulates what all bytecode
@@ -1104,9 +1079,20 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ cmpi(r7, Operand(FEEDBACK_VECTOR_TYPE));
__ bne(&push_stack_frame);
- // Read off the optimized code slot in the feedback vector, and if there
- // is optimized code or an optimization marker, call that instead.
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r7, r9, r8);
+ Register optimized_code_entry = r7;
+
+ // Read off the optimized code slot in the feedback vector.
+ __ LoadP(optimized_code_entry,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
+ // Check if the optimized code slot is not empty.
+ Label optimized_code_slot_not_empty;
+ __ CmpSmiLiteral(optimized_code_entry,
+ Smi::FromEnum(OptimizationMarker::kNone), r0);
+ __ bne(&optimized_code_slot_not_empty);
+
+ Label not_optimized;
+ __ bind(&not_optimized);
// Increment invocation count for the function.
__ LoadWord(
@@ -1149,29 +1135,27 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(kInterpreterBytecodeArrayRegister, r3);
// Allocate the local and temporary register file on the stack.
+ Label stack_overflow;
{
// Load frame size (word) from the BytecodeArray object.
__ lwz(r5, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kFrameSizeOffset));
// Do a stack check to ensure we don't go over the limit.
- Label ok;
__ sub(r8, sp, r5);
LoadRealStackLimit(masm, r0);
__ cmpl(r8, r0);
- __ bge(&ok);
- __ CallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&ok);
+ __ blt(&stack_overflow);
// If ok, push undefined as the initial value for all register file entries.
// TODO(rmcilroy): Consider doing more than one push per loop iteration.
Label loop, no_args;
- __ LoadRoot(r8, RootIndex::kUndefinedValue);
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
__ ShiftRightImm(r5, r5, Operand(kPointerSizeLog2), SetRC);
__ beq(&no_args, cr0);
__ mtctr(r5);
__ bind(&loop);
- __ push(r8);
+ __ push(kInterpreterAccumulatorRegister);
__ bdnz(&loop);
__ bind(&no_args);
}
@@ -1189,8 +1173,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ StorePX(r6, MemOperand(fp, r8));
__ bind(&no_incoming_new_target_or_generator_register);
- // Load accumulator with undefined.
- __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+ // The accumulator is already loaded with undefined.
+
// Load the dispatch table into a register and dispatch to the bytecode
// handler at the current bytecode offset.
Label do_dispatch;
@@ -1231,8 +1215,26 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, r5);
__ blr();
+ __ bind(&optimized_code_slot_not_empty);
+ Label maybe_has_optimized_code;
+ // Check if optimized code marker is actually a weak reference to the
+ // optimized code.
+ __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
+ MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry);
+ // Fall through if there's no runnable optimized code.
+ __ jmp(&not_optimized);
+
+ __ bind(&maybe_has_optimized_code);
+ // Load code entry from the weak reference, if it was cleared, resume
+ // execution of unoptimized code.
+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &not_optimized);
+ TailCallOptimizedCodeSlot(masm, optimized_code_entry, r9);
+
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+
+ __ bind(&stack_overflow);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bkpt(0); // Should not return.
}
@@ -1596,14 +1598,8 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
}
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
- // Lookup the function in the JavaScript frame.
- __ LoadP(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(r3, MemOperand(r3, JavaScriptFrameConstants::kFunctionOffset));
-
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // Pass function as argument.
- __ push(r3);
__ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
@@ -2260,7 +2256,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// -- r4 : the target to call (can be any Object).
// -----------------------------------
- Label non_callable, non_function, non_smi;
+ Label non_callable, non_smi;
__ JumpIfSmi(r4, &non_callable);
__ bind(&non_smi);
__ CompareObjectType(r4, r7, r8, JS_FUNCTION_TYPE);
@@ -2277,12 +2273,10 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// Check if target is a proxy and call CallProxy external builtin
__ cmpi(r8, Operand(JS_PROXY_TYPE));
- __ bne(&non_function);
- __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET);
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq);
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
- __ bind(&non_function);
// Overwrite the original receiver the (original) target.
__ ShiftLeftImm(r8, r3, Operand(kPointerSizeLog2));
__ StorePX(r4, MemOperand(sp, r8));