summaryrefslogtreecommitdiff
path: root/deps/v8/src/builtins/ia32/builtins-ia32.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/builtins/ia32/builtins-ia32.cc')
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc107
1 files changed, 64 insertions, 43 deletions
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 368e6670c1..3319dd0c51 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -101,7 +101,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ push(edi);
__ CallRuntime(function_id, 1);
- __ mov(ebx, eax);
+ __ mov(ecx, eax);
// Restore target function and new target.
__ pop(edx);
@@ -110,15 +110,9 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ SmiUntag(eax);
}
- __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
- __ jmp(ebx);
-}
-
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kCodeOffset));
- __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
- __ jmp(ebx);
+ static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
+ __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+ __ jmp(ecx);
}
namespace {
@@ -224,7 +218,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ test(FieldOperand(ebx, SharedFunctionInfo::kCompilerHintsOffset),
- Immediate(SharedFunctionInfo::kDerivedConstructorMask));
+ Immediate(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
__ j(not_zero, &not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
@@ -345,7 +339,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ mov(ebx, Operand(ebp, ConstructFrameConstants::kConstructorOffset));
__ mov(ebx, FieldOperand(ebx, JSFunction::kSharedFunctionInfoOffset));
__ test(FieldOperand(ebx, SharedFunctionInfo::kCompilerHintsOffset),
- Immediate(SharedFunctionInfo::kClassConstructorMask));
+ Immediate(SharedFunctionInfo::IsClassConstructorBit::kMask));
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
@@ -590,6 +584,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
+ static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(edi, JSFunction::kCodeOffset));
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(ecx);
@@ -748,10 +743,12 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
edx, eax, feedback_vector);
- __ add(optimized_code_entry, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
+ __ Move(ecx, optimized_code_entry);
+ __ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ pop(edx);
__ pop(eax);
- __ jmp(optimized_code_entry);
+ __ jmp(ecx);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
@@ -767,10 +764,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
// Advance the current bytecode offset. This simulates what all bytecode
-// handlers do upon completion of the underlying operation.
-static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
- Register bytecode_offset, Register bytecode,
- Register scratch1) {
+// handlers do upon completion of the underlying operation. Will bail out to a
+// label if the bytecode (without prefix) is a return bytecode.
+static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
+ Register bytecode_array,
+ Register bytecode_offset,
+ Register bytecode, Register scratch1,
+ Label* if_return) {
Register bytecode_size_table = scratch1;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
@@ -780,11 +780,11 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
ExternalReference::bytecode_size_table_address(masm->isolate())));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
- Label load_size, extra_wide;
+ Label process_bytecode, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
__ cmpb(bytecode, Immediate(0x1));
- __ j(above, &load_size, Label::kNear);
+ __ j(above, &process_bytecode, Label::kNear);
__ j(equal, &extra_wide, Label::kNear);
// Load the next bytecode and update table to the wide scaled table.
@@ -792,7 +792,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ movzx_b(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
__ add(bytecode_size_table,
Immediate(kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ jmp(&load_size, Label::kNear);
+ __ jmp(&process_bytecode, Label::kNear);
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
@@ -801,8 +801,17 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ add(bytecode_size_table,
Immediate(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- // Load the size of the current bytecode.
- __ bind(&load_size);
+ __ bind(&process_bytecode);
+
+// Bailout to the return label if this is a return bytecode.
+#define JUMP_IF_EQUAL(NAME) \
+ __ cmpb(bytecode, \
+ Immediate(static_cast<int>(interpreter::Bytecode::k##NAME))); \
+ __ j(equal, if_return, Label::kNear);
+ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
+#undef JUMP_IF_EQUAL
+
+ // Otherwise, load the size of the current bytecode and advance the offset.
__ add(bytecode_offset, Operand(bytecode_size_table, bytecode, times_4, 0));
}
@@ -828,7 +837,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load the feedback vector from the closure.
__ mov(feedback_vector,
- FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
@@ -931,9 +940,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
masm->isolate())));
__ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ mov(ebx, Operand(kInterpreterDispatchTableRegister, ebx,
- times_pointer_size, 0));
- __ call(ebx);
+ __ mov(
+ kJavaScriptCallCodeStartRegister,
+ Operand(kInterpreterDispatchTableRegister, ebx, times_pointer_size, 0));
+ __ call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are either due to the return bytecode
@@ -946,16 +956,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
- // Check if we should return.
+ // Either return, or advance to the next bytecode and dispatch.
Label do_return;
__ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ cmpb(ebx, Immediate(static_cast<int>(interpreter::Bytecode::kReturn)));
- __ j(equal, &do_return, Label::kNear);
-
- // Advance to the next bytecode and dispatch.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, ebx, edx);
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, ebx, ecx,
+ &do_return);
__ jmp(&do_dispatch);
__ bind(&do_return);
@@ -1262,9 +1269,10 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Dispatch to the target bytecode.
__ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ mov(ebx, Operand(kInterpreterDispatchTableRegister, ebx,
- times_pointer_size, 0));
- __ jmp(ebx);
+ __ mov(
+ kJavaScriptCallCodeStartRegister,
+ Operand(kInterpreterDispatchTableRegister, ebx, times_pointer_size, 0));
+ __ jmp(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
@@ -1280,8 +1288,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister, times_1, 0));
// Advance to the next bytecode.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, ebx, edx);
+ Label if_return;
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, ebx, ecx,
+ &if_return);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ mov(ebx, kInterpreterBytecodeOffsetRegister);
@@ -1289,6 +1299,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ mov(Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp), ebx);
Generate_InterpreterEnterBytecode(masm);
+
+ // We should never take the if_return path.
+ __ bind(&if_return);
+ __ Abort(AbortReason::kInvalidBytecodeAdvance);
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1306,7 +1320,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// Get the feedback vector.
Register feedback_vector = ebx;
__ mov(feedback_vector,
- FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
@@ -1319,7 +1333,11 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, ecx);
// Otherwise, tail call the SFI code.
- GenerateTailCallToSharedCode(masm);
+ static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kCodeOffset));
+ __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+ __ jmp(ecx);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
@@ -1348,7 +1366,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Do we have a valid feedback vector?
__ mov(feedback_vector,
- FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
@@ -1531,6 +1549,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
}
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
+ static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(edi, JSFunction::kCodeOffset));
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(ecx);
@@ -2093,7 +2112,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ test(FieldOperand(edx, SharedFunctionInfo::kCompilerHintsOffset),
- Immediate(SharedFunctionInfo::kClassConstructorMask));
+ Immediate(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ j(not_zero, &class_constructor);
// Enter the context of the function; ToObject has to run in the function
@@ -2566,6 +2585,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// eax : expected number of arguments
// edx : new target (passed through to callee)
// edi : function (passed through to callee)
+ static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(edi, JSFunction::kCodeOffset));
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(ecx);
@@ -2581,6 +2601,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
+ static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(edi, JSFunction::kCodeOffset));
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(ecx);