aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/arm64
diff options
context:
space:
mode:
authorMyles Borins <mylesborins@google.com>2017-08-01 11:36:44 -0500
committerMyles Borins <mylesborins@google.com>2017-08-01 15:23:15 -0500
commit0a66b223e149a841669bfad5598e4254589730cb (patch)
tree5ec050f7f78aafbf5b1e0e50d639fb843141e162 /deps/v8/src/arm64
parent1782b3836ba58ef0da6b687f2bb970c0bd8199ad (diff)
downloadandroid-node-v8-0a66b223e149a841669bfad5598e4254589730cb.tar.gz
android-node-v8-0a66b223e149a841669bfad5598e4254589730cb.tar.bz2
android-node-v8-0a66b223e149a841669bfad5598e4254589730cb.zip
deps: update V8 to 6.0.286.52
PR-URL: https://github.com/nodejs/node/pull/14004 Reviewed-By: Anna Henningsen <anna@addaleax.net> Reviewed-By: Anna Henningsen <anna@addaleax.net> Reviewed-By: Franziska Hinkelmann <franziska.hinkelmann@gmail.com> Reviewed-By: James M Snell <jasnell@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
Diffstat (limited to 'deps/v8/src/arm64')
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h14
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc9
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h3
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc71
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc25
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc18
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc107
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h18
-rw-r--r--deps/v8/src/arm64/simulator-arm64.cc69
-rw-r--r--deps/v8/src/arm64/simulator-arm64.h11
10 files changed, 195 insertions, 150 deletions
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
index f6bb6a8893..e865b634b5 100644
--- a/deps/v8/src/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -832,20 +832,20 @@ template <typename ObjectVisitor>
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitEmbeddedPointer(this);
+ visitor->VisitEmbeddedPointer(host(), this);
} else if (RelocInfo::IsCodeTarget(mode)) {
- visitor->VisitCodeTarget(this);
+ visitor->VisitCodeTarget(host(), this);
} else if (mode == RelocInfo::CELL) {
- visitor->VisitCell(this);
+ visitor->VisitCellPointer(host(), this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(this);
+ visitor->VisitExternalReference(host(), this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
- visitor->VisitInternalReference(this);
+ visitor->VisitInternalReference(host(), this);
} else if (RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()) {
- visitor->VisitDebugTarget(this);
+ visitor->VisitDebugTarget(host(), this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
- visitor->VisitRuntimeEntry(this);
+ visitor->VisitRuntimeEntry(host(), this);
}
}
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
index ac6931dec7..ec12e77274 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -2905,7 +2905,14 @@ void Assembler::GrowBuffer() {
} else {
desc.buffer_size = buffer_size_ + 1 * MB;
}
- CHECK_GT(desc.buffer_size, 0); // No overflow.
+
+ // Some internal data structures overflow for very large buffers,
+ // they must ensure that kMaximalBufferSize is not too large.
+ if (desc.buffer_size > kMaximalBufferSize ||
+ static_cast<size_t>(desc.buffer_size) >
+ isolate_data().max_old_generation_size_) {
+ V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
+ }
byte* buffer = reinterpret_cast<byte*>(buffer_);
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index ea1d94f628..e4ca410abd 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -2134,6 +2134,9 @@ class Assembler : public AssemblerBase {
int next_veneer_pool_check_;
private:
+ // Avoid overflows for displacements etc.
+ static const int kMaximalBufferSize = 512 * MB;
+
// If a veneer is emitted for a branch instruction, that instruction must be
// removed from the associated label's link chain so that the assembler does
// not later attempt (likely unsuccessfully) to patch it to branch directly to
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index ec00581566..c3c3367b10 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -1267,73 +1267,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Ret();
}
-void RegExpExecStub::Generate(MacroAssembler* masm) {
-#ifdef V8_INTERPRETED_REGEXP
- // This case is handled prior to the RegExpExecStub call.
- __ Abort(kUnexpectedRegExpExecCall);
-#else // V8_INTERPRETED_REGEXP
- // Isolates: note we add an additional parameter here (isolate pointer).
- __ EnterExitFrame(false, x10, 1);
- DCHECK(csp.Is(__ StackPointer()));
-
- // We have 9 arguments to pass to the regexp code, therefore we have to pass
- // one on the stack and the rest as registers.
-
- // Note that the placement of the argument on the stack isn't standard
- // AAPCS64:
- // csp[0]: Space for the return address placed by DirectCEntryStub.
- // csp[8]: Argument 9, the current isolate address.
-
- __ Mov(x10, ExternalReference::isolate_address(isolate()));
- __ Poke(x10, kPointerSize);
-
- // Argument 1 (x0): Subject string.
- CHECK(x0.is(RegExpExecDescriptor::StringRegister()));
-
- // Argument 2 (x1): Previous index, already there.
- CHECK(x1.is(RegExpExecDescriptor::LastIndexRegister()));
-
- // Argument 3 (x2): Input start.
- // Argument 4 (x3): Input end.
- CHECK(x2.is(RegExpExecDescriptor::StringStartRegister()));
- CHECK(x3.is(RegExpExecDescriptor::StringEndRegister()));
-
- // Argument 5 (x4): static offsets vector buffer.
- __ Mov(x4, ExternalReference::address_of_static_offsets_vector(isolate()));
-
- // Argument 6 (x5): Set the number of capture registers to zero to force
- // global regexps to behave as non-global. This stub is not used for global
- // regexps.
- __ Mov(x5, 0);
-
- // Argument 7 (x6): Start (high end) of backtracking stack memory area.
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address(isolate());
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(isolate());
- __ Mov(x10, address_of_regexp_stack_memory_address);
- __ Ldr(x10, MemOperand(x10));
- __ Mov(x11, address_of_regexp_stack_memory_size);
- __ Ldr(x11, MemOperand(x11));
- __ Add(x6, x10, x11);
-
- // Argument 8 (x7): Indicate that this is a direct call from JavaScript.
- __ Mov(x7, 1);
-
- // Locate the code entry and call it.
- Register code_object = RegExpExecDescriptor::CodeRegister();
- __ Add(code_object, code_object, Code::kHeaderSize - kHeapObjectTag);
- DirectCEntryStub stub(isolate());
- stub.GenerateCall(masm, code_object);
-
- __ LeaveExitFrame(false, x10, true);
-
- // Return the smi-tagged result.
- __ SmiTag(x0);
- __ Ret();
-#endif
-}
-
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
Register argc, Register function,
@@ -3098,9 +3031,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
__ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
}
- if (!call_data_undefined()) {
- __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
- }
+ __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
Register isolate_reg = x5;
__ Mov(isolate_reg, ExternalReference::isolate_address(masm->isolate()));
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
index 901259f2b4..a178e1d95e 100644
--- a/deps/v8/src/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -31,16 +31,35 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
+ Address code_start_address = code->instruction_start();
// Invalidate the relocation information, as it will become invalid by the
// code patching below, and is not needed any more.
code->InvalidateRelocation();
- // TODO(jkummerow): if (FLAG_zap_code_space), make the code object's
- // entry sequence unusable (see other architectures).
+ // Fail hard and early if we enter this code object again.
+ byte* pointer = code->FindCodeAgeSequence();
+ if (pointer != NULL) {
+ pointer += kNoCodeAgeSequenceLength;
+ } else {
+ pointer = code->instruction_start();
+ }
+
+ {
+ PatchingAssembler patcher(Assembler::IsolateData(isolate), pointer, 1);
+ patcher.brk(0);
+ }
+
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int osr_offset = data->OsrPcOffset()->value();
+ if (osr_offset > 0) {
+ PatchingAssembler patcher(Assembler::IsolateData(isolate),
+ code_start_address + osr_offset, 1);
+ patcher.brk(0);
+ }
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
- Address code_start_address = code->instruction_start();
#ifdef DEBUG
Address prev_call_address = NULL;
#endif
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
index c73d371e8f..887adddf29 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -57,11 +57,6 @@ const Register MathPowTaggedDescriptor::exponent() { return x11; }
const Register MathPowIntegerDescriptor::exponent() { return x12; }
-const Register RegExpExecDescriptor::StringRegister() { return x0; }
-const Register RegExpExecDescriptor::LastIndexRegister() { return x1; }
-const Register RegExpExecDescriptor::StringStartRegister() { return x2; }
-const Register RegExpExecDescriptor::StringEndRegister() { return x3; }
-const Register RegExpExecDescriptor::CodeRegister() { return x8; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
@@ -182,8 +177,19 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
void CallForwardVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1: target
+ // x0: number of arguments
+ // x2: start index (to supported rest parameters)
+ Register registers[] = {x1, x0, x2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // x3: new target
+ // x1: target
+ // x0: number of arguments
// x2: start index (to supported rest parameters)
- Register registers[] = {x1, x2};
+ Register registers[] = {x1, x3, x0, x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index 5edcd7b044..2282c941ba 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -129,7 +129,12 @@ void MacroAssembler::LogicalMacro(const Register& rd,
} else {
// Immediate can't be encoded: synthesize using move immediate.
Register temp = temps.AcquireSameSizeAs(rn);
- Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate);
+
+ // If the left-hand input is the stack pointer, we can't pre-shift the
+ // immediate, as the encoding won't allow the subsequent post shift.
+ PreShiftImmMode mode = rn.Is(csp) ? kNoShift : kAnyShift;
+ Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate, mode);
+
if (rd.Is(csp)) {
// If rd is the stack pointer we cannot use it as the destination
// register so we use the temp register as an intermediate again.
@@ -437,17 +442,23 @@ bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst,
return false;
}
-
Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
- int64_t imm) {
+ int64_t imm,
+ PreShiftImmMode mode) {
int reg_size = dst.SizeInBits();
-
// Encode the immediate in a single move instruction, if possible.
if (TryOneInstrMoveImmediate(dst, imm)) {
// The move was successful; nothing to do here.
} else {
// Pre-shift the immediate to the least-significant bits of the register.
int shift_low = CountTrailingZeros(imm, reg_size);
+ if (mode == kLimitShiftForSP) {
+ // When applied to the stack pointer, the subsequent arithmetic operation
+ // can use the extend form to shift left by a maximum of four bits. Right
+ // shifts are not allowed, so we filter them out later before the new
+ // immediate is tested.
+ shift_low = std::min(shift_low, 4);
+ }
int64_t imm_low = imm >> shift_low;
// Pre-shift the immediate to the most-significant bits of the register. We
@@ -456,13 +467,13 @@ Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
// If this new immediate is encodable, the set bits will be eliminated by
// the post shift on the following instruction.
int shift_high = CountLeadingZeros(imm, reg_size);
- int64_t imm_high = (imm << shift_high) | ((1 << shift_high) - 1);
+ int64_t imm_high = (imm << shift_high) | ((INT64_C(1) << shift_high) - 1);
- if (TryOneInstrMoveImmediate(dst, imm_low)) {
+ if ((mode != kNoShift) && TryOneInstrMoveImmediate(dst, imm_low)) {
// The new immediate has been moved into the destination's low bits:
// return a new leftward-shifting operand.
return Operand(dst, LSL, shift_low);
- } else if (TryOneInstrMoveImmediate(dst, imm_high)) {
+ } else if ((mode == kAnyShift) && TryOneInstrMoveImmediate(dst, imm_high)) {
// The new immediate has been moved into the destination's high bits:
// return a new rightward-shifting operand.
return Operand(dst, LSR, shift_high);
@@ -498,8 +509,21 @@ void MacroAssembler::AddSubMacro(const Register& rd,
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireSameSizeAs(rn);
if (operand.IsImmediate()) {
+ PreShiftImmMode mode = kAnyShift;
+
+ // If the destination or source register is the stack pointer, we can
+ // only pre-shift the immediate right by values supported in the add/sub
+ // extend encoding.
+ if (rd.Is(csp)) {
+ // If the destination is SP and flags will be set, we can't pre-shift
+ // the immediate at all.
+ mode = (S == SetFlags) ? kNoShift : kLimitShiftForSP;
+ } else if (rn.Is(csp)) {
+ mode = kLimitShiftForSP;
+ }
+
Operand imm_operand =
- MoveImmediateForShiftedOp(temp, operand.ImmediateValue());
+ MoveImmediateForShiftedOp(temp, operand.ImmediateValue(), mode);
AddSub(rd, rn, imm_operand, S, op);
} else {
Mov(temp, operand);
@@ -1791,14 +1815,13 @@ void MacroAssembler::CallCFunction(ExternalReference function,
CallCFunction(temp, num_of_reg_args, num_of_double_args);
}
+static const int kRegisterPassedArguments = 8;
void MacroAssembler::CallCFunction(Register function,
int num_of_reg_args,
int num_of_double_args) {
+ DCHECK_LE(num_of_reg_args + num_of_double_args, kMaxCParameters);
DCHECK(has_frame());
- // We can pass 8 integer arguments in registers. If we need to pass more than
- // that, we'll need to implement support for passing them on the stack.
- DCHECK(num_of_reg_args <= 8);
// If we're passing doubles, we're limited to the following prototypes
// (defined by ExternalReference::Type):
@@ -1811,6 +1834,10 @@ void MacroAssembler::CallCFunction(Register function,
DCHECK((num_of_double_args + num_of_reg_args) <= 2);
}
+ // We rely on the frame alignment being 16 bytes, which means we never need
+ // to align the CSP by an unknown number of bytes and we always know the delta
+ // between the stack pointer and the frame pointer.
+ DCHECK(ActivationFrameAlignment() == 16);
// If the stack pointer is not csp, we need to derive an aligned csp from the
// current stack pointer.
@@ -1819,16 +1846,18 @@ void MacroAssembler::CallCFunction(Register function,
AssertStackConsistency();
int sp_alignment = ActivationFrameAlignment();
- // The ABI mandates at least 16-byte alignment.
- DCHECK(sp_alignment >= 16);
- DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
-
// The current stack pointer is a callee saved register, and is preserved
// across the call.
DCHECK(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
- // Align and synchronize the system stack pointer with jssp.
- Bic(csp, old_stack_pointer, sp_alignment - 1);
+ // If more than eight arguments are passed to the function, we expect the
+ // ninth argument onwards to have been placed on the csp-based stack
+ // already. We assume csp already points to the last stack-passed argument
+ // in that case.
+ // Otherwise, align and synchronize the system stack pointer with jssp.
+ if (num_of_reg_args <= kRegisterPassedArguments) {
+ Bic(csp, old_stack_pointer, sp_alignment - 1);
+ }
SetStackPointer(csp);
}
@@ -1836,19 +1865,39 @@ void MacroAssembler::CallCFunction(Register function,
// so the return address in the link register stays correct.
Call(function);
- if (!csp.Is(old_stack_pointer)) {
+ if (csp.Is(old_stack_pointer)) {
+ if (num_of_reg_args > kRegisterPassedArguments) {
+ // Drop the register passed arguments.
+ int claim_slots = RoundUp(num_of_reg_args - kRegisterPassedArguments, 2);
+ Drop(claim_slots);
+ }
+ } else {
+ DCHECK(jssp.Is(old_stack_pointer));
if (emit_debug_code()) {
- // Because the stack pointer must be aligned on a 16-byte boundary, the
- // aligned csp can be up to 12 bytes below the jssp. This is the case
- // where we only pushed one W register on top of an aligned jssp.
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- DCHECK(ActivationFrameAlignment() == 16);
- Sub(temp, csp, old_stack_pointer);
- // We want temp <= 0 && temp >= -12.
- Cmp(temp, 0);
- Ccmp(temp, -12, NFlag, le);
- Check(ge, kTheStackWasCorruptedByMacroAssemblerCall);
+
+ if (num_of_reg_args > kRegisterPassedArguments) {
+ // We don't need to drop stack arguments, as the stack pointer will be
+ // jssp when returning from this function. However, in debug builds, we
+ // can check that jssp is as expected.
+ int claim_slots =
+ RoundUp(num_of_reg_args - kRegisterPassedArguments, 2);
+
+ // Check jssp matches the previous value on the stack.
+ Ldr(temp, MemOperand(csp, claim_slots * kPointerSize));
+ Cmp(jssp, temp);
+ Check(eq, kTheStackWasCorruptedByMacroAssemblerCall);
+ } else {
+ // Because the stack pointer must be aligned on a 16-byte boundary, the
+ // aligned csp can be up to 12 bytes below the jssp. This is the case
+ // where we only pushed one W register on top of an aligned jssp.
+ Sub(temp, csp, old_stack_pointer);
+ // We want temp <= 0 && temp >= -12.
+ Cmp(temp, 0);
+ Ccmp(temp, -12, NFlag, le);
+ Check(ge, kTheStackWasCorruptedByMacroAssemblerCall);
+ }
}
SetStackPointer(old_stack_pointer);
}
@@ -2547,6 +2596,8 @@ void MacroAssembler::TruncateDoubleToI(Register result,
}
Bind(&done);
+ // Keep our invariant that the upper 32 bits are zero.
+ Uxtw(result.W(), result.W());
}
@@ -3733,7 +3784,7 @@ void MacroAssembler::RecordWriteField(
Add(scratch, object, offset - kHeapObjectTag);
if (emit_debug_code()) {
Label ok;
- Tst(scratch, (1 << kPointerSizeLog2) - 1);
+ Tst(scratch, kPointerSize - 1);
B(eq, &ok);
Abort(kUnalignedCellInWriteBarrier);
Bind(&ok);
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index e60fbe33fe..6c77dd5b01 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -162,6 +162,21 @@ enum CopyHint { kCopyUnknown, kCopyShort, kCopyLong };
enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
enum SeqStringSetCharCheckIndexType { kIndexIsSmi, kIndexIsInteger32 };
+// The macro assembler supports moving automatically pre-shifted immediates for
+// arithmetic and logical instructions, and then applying a post shift in the
+// instruction to undo the modification, in order to reduce the code emitted for
+// an operation. For example:
+//
+// Add(x0, x0, 0x1f7de) => movz x16, 0xfbef; add x0, x0, x16, lsl #1.
+//
+// This optimisation can be only partially applied when the stack pointer is an
+// operand or destination, so this enumeration is used to control the shift.
+enum PreShiftImmMode {
+ kNoShift, // Don't pre-shift.
+ kLimitShiftForSP, // Limit pre-shift for add/sub extend use.
+ kAnyShift // Allow any pre-shift.
+};
+
class MacroAssembler : public Assembler {
public:
MacroAssembler(Isolate* isolate, byte* buffer, unsigned buffer_size,
@@ -276,7 +291,8 @@ class MacroAssembler : public Assembler {
// dst is not necessarily equal to imm; it may have had a shifting operation
// applied to it that will be subsequently undone by the shift applied in the
// Operand.
- Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm);
+ Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm,
+ PreShiftImmMode mode);
// Conditional macros.
inline void Ccmp(const Register& rn,
diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/arm64/simulator-arm64.cc
index b536fd5e9c..fb0e614982 100644
--- a/deps/v8/src/arm64/simulator-arm64.cc
+++ b/deps/v8/src/arm64/simulator-arm64.cc
@@ -208,7 +208,6 @@ int64_t Simulator::CallRegExp(byte* entry,
int64_t output_size,
Address stack_base,
int64_t direct_call,
- void* return_address,
Isolate* isolate) {
CallArgument args[] = {
CallArgument(input),
@@ -219,7 +218,6 @@ int64_t Simulator::CallRegExp(byte* entry,
CallArgument(output_size),
CallArgument(stack_base),
CallArgument(direct_call),
- CallArgument(return_address),
CallArgument(isolate),
CallArgument::End()
};
@@ -540,14 +538,11 @@ void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
// uses the ObjectPair structure.
// The simulator assumes all runtime calls return two 64-bits values. If they
// don't, register x1 is clobbered. This is fine because x1 is caller-saved.
-typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0,
- int64_t arg1,
- int64_t arg2,
- int64_t arg3,
- int64_t arg4,
- int64_t arg5,
- int64_t arg6,
- int64_t arg7);
+typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0, int64_t arg1,
+ int64_t arg2, int64_t arg3,
+ int64_t arg4, int64_t arg5,
+ int64_t arg6, int64_t arg7,
+ int64_t arg8);
typedef ObjectTriple (*SimulatorRuntimeTripleCall)(int64_t arg0, int64_t arg1,
int64_t arg2, int64_t arg3,
@@ -589,6 +584,19 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
FATAL("ALIGNMENT EXCEPTION");
}
+ int64_t* stack_pointer = reinterpret_cast<int64_t*>(sp());
+
+ const int64_t arg0 = xreg(0);
+ const int64_t arg1 = xreg(1);
+ const int64_t arg2 = xreg(2);
+ const int64_t arg3 = xreg(3);
+ const int64_t arg4 = xreg(4);
+ const int64_t arg5 = xreg(5);
+ const int64_t arg6 = xreg(6);
+ const int64_t arg7 = xreg(7);
+ const int64_t arg8 = stack_pointer[0];
+ STATIC_ASSERT(kMaxCParameters == 9);
+
switch (redirection->type()) {
default:
TraceSim("Type: Unknown.\n");
@@ -606,15 +614,20 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
// We don't know how many arguments are being passed, but we can
// pass 8 without touching the stack. They will be ignored by the
// host function if they aren't used.
- TraceSim("Arguments: "
- "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
- "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
- "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
- "0x%016" PRIx64 ", 0x%016" PRIx64,
- xreg(0), xreg(1), xreg(2), xreg(3),
- xreg(4), xreg(5), xreg(6), xreg(7));
- ObjectPair result = target(xreg(0), xreg(1), xreg(2), xreg(3),
- xreg(4), xreg(5), xreg(6), xreg(7));
+ TraceSim(
+ "Arguments: "
+ "0x%016" PRIx64 ", 0x%016" PRIx64
+ ", "
+ "0x%016" PRIx64 ", 0x%016" PRIx64
+ ", "
+ "0x%016" PRIx64 ", 0x%016" PRIx64
+ ", "
+ "0x%016" PRIx64 ", 0x%016" PRIx64
+ ", "
+ "0x%016" PRIx64,
+ arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
+ ObjectPair result =
+ target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
TraceSim("Returned: {%p, %p}\n", static_cast<void*>(result.x),
static_cast<void*>(result.y));
#ifdef DEBUG
@@ -636,16 +649,18 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
// host function if they aren't used.
TraceSim(
"Arguments: "
- "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
- "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
- "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
+ "0x%016" PRIx64 ", 0x%016" PRIx64
+ ", "
+ "0x%016" PRIx64 ", 0x%016" PRIx64
+ ", "
+ "0x%016" PRIx64 ", 0x%016" PRIx64
+ ", "
"0x%016" PRIx64 ", 0x%016" PRIx64,
- xreg(0), xreg(1), xreg(2), xreg(3), xreg(4), xreg(5), xreg(6),
- xreg(7));
+ arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
// Return location passed in x8.
ObjectTriple* sim_result = reinterpret_cast<ObjectTriple*>(xreg(8));
- ObjectTriple result = target(xreg(0), xreg(1), xreg(2), xreg(3), xreg(4),
- xreg(5), xreg(6), xreg(7));
+ ObjectTriple result =
+ target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
TraceSim("Returned: {%p, %p, %p}\n", static_cast<void*>(result.x),
static_cast<void*>(result.y), static_cast<void*>(result.z));
#ifdef DEBUG
@@ -786,6 +801,8 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
void* Simulator::RedirectExternalReference(Isolate* isolate,
void* external_function,
ExternalReference::Type type) {
+ base::LockGuard<base::Mutex> lock_guard(
+ isolate->simulator_redirection_mutex());
Redirection* redirection = Redirection::Get(isolate, external_function, type);
return redirection->address_of_redirect_call();
}
diff --git a/deps/v8/src/arm64/simulator-arm64.h b/deps/v8/src/arm64/simulator-arm64.h
index 3016e616e4..48fc1c7bc6 100644
--- a/deps/v8/src/arm64/simulator-arm64.h
+++ b/deps/v8/src/arm64/simulator-arm64.h
@@ -36,17 +36,14 @@ typedef int (*arm64_regexp_matcher)(String* input,
int64_t output_size,
Address stack_base,
int64_t direct_call,
- void* return_address,
Isolate* isolate);
// Call the generated regexp code directly. The code at the entry address
// should act as a function matching the type arm64_regexp_matcher.
-// The ninth argument is a dummy that reserves the space used for
-// the return address added by the ExitFrame in native calls.
#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
p7, p8) \
(FUNCTION_CAST<arm64_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, \
- NULL, p8))
+ p8))
// Running without a simulator there is nothing to do.
class SimulatorStack : public v8::internal::AllStatic {
@@ -201,7 +198,6 @@ class Simulator : public DecoderVisitor {
int64_t output_size,
Address stack_base,
int64_t direct_call,
- void* return_address,
Isolate* isolate);
// A wrapper class that stores an argument for one of the above Call
@@ -277,7 +273,7 @@ class Simulator : public DecoderVisitor {
void ResetState();
- // Runtime call support.
+ // Runtime call support. Uses the isolate in a thread-safe way.
static void* RedirectExternalReference(Isolate* isolate,
void* external_function,
ExternalReference::Type type);
@@ -973,8 +969,7 @@ class Simulator : public DecoderVisitor {
#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
p7, p8) \
static_cast<int>(Simulator::current(isolate)->CallRegExp( \
- entry, p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
-
+ entry, p0, p1, p2, p3, p4, p5, p6, p7, p8))
// The simulator has its own stack. Thus it has a different stack limit from
// the C-based native code. The JS-based limit normally points near the end of