summaryrefslogtreecommitdiff
path: root/deps/v8/src/arm64/macro-assembler-arm64.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/arm64/macro-assembler-arm64.cc')
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc396
1 files changed, 265 insertions, 131 deletions
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index 97a75e5758..48cd13d5fc 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -10,39 +10,27 @@
#include "src/bootstrapper.h"
#include "src/callable.h"
#include "src/code-factory.h"
-#include "src/code-stubs.h"
+#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
#include "src/frame-constants.h"
#include "src/frames-inl.h"
-#include "src/heap/heap-inl.h"
-#include "src/instruction-stream.h"
+#include "src/macro-assembler-inl.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
+#include "src/snapshot/embedded-data.h"
#include "src/snapshot/snapshot.h"
#include "src/wasm/wasm-code-manager.h"
-#include "src/arm64/macro-assembler-arm64-inl.h"
-#include "src/arm64/macro-assembler-arm64.h" // Cannot be the first include
+// Satisfy cpplint check, but don't include platform-specific header. It is
+// included recursively via macro-assembler.h.
+#if 0
+#include "src/arm64/macro-assembler-arm64.h"
+#endif
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(Isolate* isolate,
- const AssemblerOptions& options, void* buffer,
- int size, CodeObjectRequired create_code_object)
- : TurboAssembler(isolate, options, buffer, size, create_code_object) {
- if (create_code_object == CodeObjectRequired::kYes) {
- // Unlike TurboAssembler, which can be used off the main thread and may not
- // allocate, macro assembler creates its own copy of the self-reference
- // marker in order to disambiguate between self-references during nested
- // code generation (e.g.: codegen of the current object triggers stub
- // compilation through CodeStub::GetCode()).
- code_object_ = Handle<HeapObject>::New(
- *isolate->factory()->NewSelfReferenceMarker(), isolate);
- }
-}
-
CPURegList TurboAssembler::DefaultTmpList() { return CPURegList(ip0, ip1); }
CPURegList TurboAssembler::DefaultFPTmpList() {
@@ -53,13 +41,27 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion) const {
int bytes = 0;
auto list = kCallerSaved;
- DCHECK_EQ(list.Count() % 2, 0);
// We only allow one exclusion register, so if the list is of even length
// before exclusions, it must still be afterwards, to maintain alignment.
// Therefore, we can ignore the exclusion register in the computation.
// However, we leave it in the argument list to mirror the prototype for
// Push/PopCallerSaved().
+
+#if defined(V8_OS_WIN)
+ // X18 is excluded from caller-saved register list on Windows ARM64 which
+ // makes caller-saved registers in odd number. padreg is used accordingly
+ // to maintain the alignment.
+ DCHECK_EQ(list.Count() % 2, 1);
+ if (exclusion.Is(no_reg)) {
+ bytes += kXRegSizeInBits / 8;
+ } else {
+ bytes -= kXRegSizeInBits / 8;
+ }
+#else
+ DCHECK_EQ(list.Count() % 2, 0);
USE(exclusion);
+#endif
+
bytes += list.Count() * kXRegSizeInBits / 8;
if (fp_mode == kSaveFPRegs) {
@@ -73,12 +75,24 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion) {
int bytes = 0;
auto list = kCallerSaved;
- DCHECK_EQ(list.Count() % 2, 0);
+
+#if defined(V8_OS_WIN)
+ // X18 is excluded from caller-saved register list on Windows ARM64, use
+ // padreg accordingly to maintain alignment.
+ if (!exclusion.Is(no_reg)) {
+ list.Remove(exclusion);
+ } else {
+ list.Combine(padreg);
+ }
+#else
if (!exclusion.Is(no_reg)) {
// Replace the excluded register with padding to maintain alignment.
list.Remove(exclusion);
list.Combine(padreg);
}
+#endif
+
+ DCHECK_EQ(list.Count() % 2, 0);
PushCPURegList(list);
bytes += list.Count() * kXRegSizeInBits / 8;
@@ -99,12 +113,24 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) {
}
auto list = kCallerSaved;
- DCHECK_EQ(list.Count() % 2, 0);
+
+#if defined(V8_OS_WIN)
+ // X18 is excluded from caller-saved register list on Windows ARM64, use
+ // padreg accordingly to maintain alignment.
+ if (!exclusion.Is(no_reg)) {
+ list.Remove(exclusion);
+ } else {
+ list.Combine(padreg);
+ }
+#else
if (!exclusion.Is(no_reg)) {
// Replace the excluded register with padding to maintain alignment.
list.Remove(exclusion);
list.Combine(padreg);
}
+#endif
+
+ DCHECK_EQ(list.Count() % 2, 0);
PopCPURegList(list);
bytes += list.Count() * kXRegSizeInBits / 8;
@@ -315,7 +341,7 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
return;
} else if (operand.ImmediateRMode() == RelocInfo::EMBEDDED_OBJECT) {
Handle<HeapObject> x(
- reinterpret_cast<HeapObject**>(operand.ImmediateValue()));
+ reinterpret_cast<Address*>(operand.ImmediateValue()));
IndirectLoadConstant(rd, x);
return;
}
@@ -360,6 +386,10 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
}
}
+void TurboAssembler::Mov(const Register& rd, Smi smi) {
+ return Mov(rd, Operand(smi));
+}
+
void TurboAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) {
DCHECK(is_uint16(imm));
int byte1 = (imm & 0xFF);
@@ -1519,7 +1549,8 @@ void TurboAssembler::CanonicalizeNaN(const VRegister& dst,
void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
// TODO(jbramley): Most root values are constants, and can be synthesized
// without a load. Refer to the ARM back end for details.
- Ldr(destination, MemOperand(kRootRegister, RootRegisterOffset(index)));
+ Ldr(destination,
+ MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
}
@@ -1532,7 +1563,7 @@ void MacroAssembler::LoadObject(Register result, Handle<Object> object) {
}
}
-void TurboAssembler::Move(Register dst, Smi* src) { Mov(dst, src); }
+void TurboAssembler::Move(Register dst, Smi src) { Mov(dst, src); }
void TurboAssembler::Swap(Register lhs, Register rhs) {
DCHECK(lhs.IsSameSizeAndType(rhs));
@@ -1632,6 +1663,10 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
CompareInstanceType(temp, temp, JS_GENERATOR_OBJECT_TYPE);
B(eq, &do_check);
+ // Check if JSAsyncFunctionObject
+ Cmp(temp, JS_ASYNC_FUNCTION_OBJECT_TYPE);
+ B(eq, &do_check);
+
// Check if JSAsyncGeneratorObject
Cmp(temp, JS_ASYNC_GENERATOR_OBJECT_TYPE);
@@ -1664,27 +1699,6 @@ void TurboAssembler::AssertPositiveOrZero(Register value) {
}
}
-void TurboAssembler::CallStubDelayed(CodeStub* stub) {
- DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- BlockPoolsScope scope(this);
-#ifdef DEBUG
- Label start;
- Bind(&start);
-#endif
- Operand operand = Operand::EmbeddedCode(stub);
- near_call(operand.heap_object_request());
- DCHECK_EQ(kNearCallSize, SizeOfCodeGeneratedSince(&start));
-}
-
-void MacroAssembler::CallStub(CodeStub* stub) {
- DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET);
-}
-
-void MacroAssembler::TailCallStub(CodeStub* stub) {
- Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
-}
-
void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
Register centry) {
const Runtime::Function* f = Runtime::FunctionForId(fid);
@@ -1695,8 +1709,7 @@ void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
Mov(x0, f->nargs);
Mov(x1, ExternalReference::Create(f));
DCHECK(!AreAliased(centry, x0, x1));
- Add(centry, centry, Operand(Code::kHeaderSize - kHeapObjectTag));
- Call(centry);
+ CallCodeObject(centry);
}
void MacroAssembler::CallRuntime(const Runtime::Function* f,
@@ -1792,10 +1805,38 @@ void TurboAssembler::CallCFunction(Register function, int num_of_reg_args,
DCHECK_LE(num_of_double_args + num_of_reg_args, 2);
}
+ // Save the frame pointer and PC so that the stack layout remains iterable,
+ // even without an ExitFrame which normally exists between JS and C frames.
+ if (isolate() != nullptr) {
+ Register scratch1 = x4;
+ Register scratch2 = x5;
+ Push(scratch1, scratch2);
+
+ Label get_pc;
+ Bind(&get_pc);
+ Adr(scratch2, &get_pc);
+
+ Mov(scratch1, ExternalReference::fast_c_call_caller_pc_address(isolate()));
+ Str(scratch2, MemOperand(scratch1));
+ Mov(scratch1, ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ Str(fp, MemOperand(scratch1));
+
+ Pop(scratch2, scratch1);
+ }
+
// Call directly. The function called cannot cause a GC, or allow preemption,
// so the return address in the link register stays correct.
Call(function);
+ if (isolate() != nullptr) {
+ // We don't unset the PC; the FP is the source of truth.
+ Register scratch = x4;
+ Push(scratch, xzr);
+ Mov(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ Str(xzr, MemOperand(scratch));
+ Pop(xzr, scratch);
+ }
+
if (num_of_reg_args > kRegisterPassedArguments) {
// Drop the register passed arguments.
int claim_slots = RoundUp(num_of_reg_args - kRegisterPassedArguments, 2);
@@ -1805,8 +1846,7 @@ void TurboAssembler::CallCFunction(Register function, int num_of_reg_args,
void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
- DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
- RootIndex::kBuiltinsConstantsTable));
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
Ldr(destination,
FieldMemOperand(destination,
@@ -1880,37 +1920,26 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
- if (FLAG_embedded_builtins) {
- if (root_array_available_ && options().isolate_independent_code &&
- !Builtins::IsIsolateIndependentBuiltin(*code)) {
- // Calls to embedded targets are initially generated as standard
- // pc-relative calls below. When creating the embedded blob, call offsets
- // are patched up to point directly to the off-heap instruction start.
- // Note: It is safe to dereference {code} above since code generation
- // for builtins and code stubs happens on the main thread.
+ DCHECK_IMPLIES(options().isolate_independent_code,
+ Builtins::IsIsolateIndependentBuiltin(*code));
+
+ if (options().inline_offheap_trampolines) {
+ int builtin_index = Builtins::kNoBuiltinId;
+ if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index)) {
+ // Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
- IndirectLoadConstant(scratch, code);
- Add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Jump(scratch, cond);
return;
- } else if (options().inline_offheap_trampolines) {
- int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index)) {
- // Inline the trampoline.
- RecordCommentForOffHeapTrampoline(builtin_index);
- CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
- UseScratchRegisterScope temps(this);
- Register scratch = temps.AcquireX();
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
- Jump(scratch, cond);
- return;
- }
}
}
+
if (CanUseNearCallOrJump(rmode)) {
JumpHelper(static_cast<int64_t>(AddCodeTarget(code)), rmode, cond);
} else {
@@ -1936,39 +1965,27 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) {
}
void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
+ DCHECK_IMPLIES(options().isolate_independent_code,
+ Builtins::IsIsolateIndependentBuiltin(*code));
BlockPoolsScope scope(this);
- if (FLAG_embedded_builtins) {
- if (root_array_available_ && options().isolate_independent_code &&
- !Builtins::IsIsolateIndependentBuiltin(*code)) {
- // Calls to embedded targets are initially generated as standard
- // pc-relative calls below. When creating the embedded blob, call offsets
- // are patched up to point directly to the off-heap instruction start.
- // Note: It is safe to dereference {code} above since code generation
- // for builtins and code stubs happens on the main thread.
+ if (options().inline_offheap_trampolines) {
+ int builtin_index = Builtins::kNoBuiltinId;
+ if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index)) {
+ // Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
- IndirectLoadConstant(scratch, code);
- Add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Call(scratch);
return;
- } else if (options().inline_offheap_trampolines) {
- int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index)) {
- // Inline the trampoline.
- RecordCommentForOffHeapTrampoline(builtin_index);
- CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
- UseScratchRegisterScope temps(this);
- Register scratch = temps.AcquireX();
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
- Call(scratch);
- return;
- }
}
}
+
if (CanUseNearCallOrJump(rmode)) {
near_call(AddCodeTarget(code), rmode);
} else {
@@ -1983,6 +2000,108 @@ void TurboAssembler::Call(ExternalReference target) {
Call(temp);
}
+void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
+ STATIC_ASSERT(kSystemPointerSize == 8);
+ STATIC_ASSERT(kSmiShiftSize == 31);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+
+ // The builtin_pointer register contains the builtin index as a Smi.
+ // Untagging is folded into the indexing operand below.
+ Asr(builtin_pointer, builtin_pointer, kSmiShift - kSystemPointerSizeLog2);
+ Add(builtin_pointer, builtin_pointer,
+ IsolateData::builtin_entry_table_offset());
+ Ldr(builtin_pointer, MemOperand(kRootRegister, builtin_pointer));
+ Call(builtin_pointer);
+}
+
+void TurboAssembler::LoadCodeObjectEntry(Register destination,
+ Register code_object) {
+ // Code objects are called differently depending on whether we are generating
+ // builtin code (which will later be embedded into the binary) or compiling
+ // user JS code at runtime.
+ // * Builtin code runs in --jitless mode and thus must not call into on-heap
+ // Code targets. Instead, we dispatch through the builtins entry table.
+ // * Codegen at runtime does not have this restriction and we can use the
+ // shorter, branchless instruction sequence. The assumption here is that
+ // targets are usually generated code and not builtin Code objects.
+
+ if (options().isolate_independent_code) {
+ DCHECK(root_array_available());
+ Label if_code_is_builtin, out;
+
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireX();
+
+ DCHECK(!AreAliased(destination, scratch));
+ DCHECK(!AreAliased(code_object, scratch));
+
+ // Check whether the Code object is a builtin. If so, call its (off-heap)
+ // entry point directly without going through the (on-heap) trampoline.
+ // Otherwise, just call the Code object as always.
+
+ Ldrsw(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
+ Cmp(scratch, Operand(Builtins::kNoBuiltinId));
+ B(ne, &if_code_is_builtin);
+
+ // A non-builtin Code object, the entry point is at
+ // Code::raw_instruction_start().
+ Add(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
+ B(&out);
+
+ // A builtin Code object, the entry point is loaded from the builtin entry
+ // table.
+ // The builtin index is loaded in scratch.
+ bind(&if_code_is_builtin);
+ Lsl(destination, scratch, kSystemPointerSizeLog2);
+ Add(destination, destination, kRootRegister);
+ Ldr(destination,
+ MemOperand(destination, IsolateData::builtin_entry_table_offset()));
+
+ bind(&out);
+ } else {
+ Add(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
+ }
+}
+
+void TurboAssembler::CallCodeObject(Register code_object) {
+ LoadCodeObjectEntry(code_object, code_object);
+ Call(code_object);
+}
+
+void TurboAssembler::JumpCodeObject(Register code_object) {
+ LoadCodeObjectEntry(code_object, code_object);
+ Jump(code_object);
+}
+
+void TurboAssembler::StoreReturnAddressAndCall(Register target) {
+ // This generates the final instruction sequence for calls to C functions
+ // once an exit frame has been constructed.
+ //
+ // Note that this assumes the caller code (i.e. the Code object currently
+ // being generated) is immovable or that the callee function cannot trigger
+ // GC, since the callee function will return to it.
+
+ UseScratchRegisterScope temps(this);
+ Register scratch1 = temps.AcquireX();
+
+ Label return_location;
+ Adr(scratch1, &return_location);
+ Poke(scratch1, 0);
+
+ if (emit_debug_code()) {
+ // Verify that the slot below fp[kSPOffset]-8 points to the return location.
+ Register scratch2 = temps.AcquireX();
+ Ldr(scratch2, MemOperand(fp, ExitFrameConstants::kSPOffset));
+ Ldr(scratch2, MemOperand(scratch2, -static_cast<int64_t>(kXRegSize)));
+ Cmp(scratch2, scratch1);
+ Check(eq, AbortReason::kReturnAddressNotFoundInFrame);
+ }
+
+ Blr(target);
+ Bind(&return_location);
+}
+
void TurboAssembler::IndirectCall(Address target, RelocInfo::Mode rmode) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
@@ -1994,31 +2113,24 @@ bool TurboAssembler::IsNearCallOffset(int64_t offset) {
return is_int26(offset);
}
-void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
- RelocInfo::Mode rmode) {
- DCHECK_EQ(rmode, RelocInfo::RUNTIME_ENTRY);
-
+void TurboAssembler::CallForDeoptimization(Address target, int deopt_id) {
BlockPoolsScope scope(this);
+ NoRootArrayScope no_root_array(this);
+
#ifdef DEBUG
Label start;
Bind(&start);
#endif
- // The deoptimizer requires the deoptimization id to be in x16.
- UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
- DCHECK(temp.Is(x16));
// Make sure that the deopt id can be encoded in 16 bits, so can be encoded
// in a single movz instruction with a zero shift.
DCHECK(is_uint16(deopt_id));
- movz(temp, deopt_id);
+ movz(x26, deopt_id);
int64_t offset = static_cast<int64_t>(target) -
static_cast<int64_t>(options().code_range_start);
DCHECK_EQ(offset % kInstrSize, 0);
offset = offset / static_cast<int>(kInstrSize);
DCHECK(IsNearCallOffset(offset));
near_call(static_cast<int>(offset), RelocInfo::RUNTIME_ENTRY);
-
- DCHECK_EQ(kNearCallSize + kInstrSize, SizeOfCodeGeneratedSince(&start));
}
void MacroAssembler::TryRepresentDoubleAsInt(Register as_int, VRegister value,
@@ -2241,12 +2353,11 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
Ldr(code, FieldMemOperand(function, JSFunction::kCodeOffset));
- Add(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
- Call(code);
+ CallCodeObject(code);
} else {
DCHECK(flag == JUMP_FUNCTION);
- Jump(code);
+ JumpCodeObject(code);
}
}
@@ -2527,9 +2638,14 @@ void MacroAssembler::LeaveExitFrame(bool restore_doubles,
Pop(fp, lr);
}
+void MacroAssembler::LoadGlobalProxy(Register dst) {
+ LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
+}
+
void MacroAssembler::LoadWeakValue(Register out, Register in,
Label* target_if_cleared) {
- CompareAndBranch(in, Operand(kClearedWeakHeapObject), eq, target_if_cleared);
+ CompareAndBranch(in.W(), Operand(kClearedWeakHeapObjectLower32), eq,
+ target_if_cleared);
and_(out, in, Operand(~kWeakHeapObjectMask));
}
@@ -2658,10 +2774,6 @@ void MacroAssembler::TestAndSplit(const Register& reg,
}
}
-bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
- return has_frame() || !stub->SometimesSetsUpAFrame();
-}
-
void MacroAssembler::PopSafepointRegisters() {
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
DCHECK_GE(num_unsaved, 0);
@@ -2803,25 +2915,43 @@ void TurboAssembler::RestoreRegisters(RegList registers) {
void TurboAssembler::CallRecordWriteStub(
Register object, Register address,
RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
+ CallRecordWriteStub(
+ object, address, remembered_set_action, fp_mode,
+ isolate()->builtins()->builtin_handle(Builtins::kRecordWrite),
+ kNullAddress);
+}
+
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ Address wasm_target) {
+ CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
+ Handle<Code>::null(), wasm_target);
+}
+
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ Handle<Code> code_target, Address wasm_target) {
+ DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress);
// TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
// i.e. always emit remember set and save FP registers in RecordWriteStub. If
// large performance regression is observed, we should use these values to
// avoid unnecessary work.
- Callable const callable =
- Builtins::CallableFor(isolate(), Builtins::kRecordWrite);
- RegList registers = callable.descriptor().allocatable_registers();
+ RecordWriteDescriptor descriptor;
+ RegList registers = descriptor.allocatable_registers();
SaveRegisters(registers);
- Register object_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kObject));
+ Register object_parameter(
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
Register slot_parameter(
- callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
- Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kRememberedSet));
- Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kFPMode));
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
+ Register remembered_set_parameter(
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
+ Register fp_mode_parameter(
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
Push(object, address);
@@ -2829,7 +2959,11 @@ void TurboAssembler::CallRecordWriteStub(
Mov(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
Mov(fp_mode_parameter, Smi::FromEnum(fp_mode));
- Call(callable.code(), RelocInfo::CODE_TARGET);
+ if (code_target.is_null()) {
+ Call(wasm_target, RelocInfo::WASM_STUB_CALL);
+ } else {
+ Call(code_target, RelocInfo::CODE_TARGET);
+ }
RestoreRegisters(registers);
}