aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/arm64
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2016-05-27 16:37:42 +0200
committerMichaël Zasso <targos@protonmail.com>2016-06-29 09:04:28 +0200
commit2cc29517966de7257a2f1b34c58c77225a21e05d (patch)
tree210bd177df2f06eec16e1e22edafdbcbffe66f8a /deps/v8/src/arm64
parentbbf3838c70aaec1dd296fa75ae334fd1c7866df3 (diff)
downloadandroid-node-v8-2cc29517966de7257a2f1b34c58c77225a21e05d.tar.gz
android-node-v8-2cc29517966de7257a2f1b34c58c77225a21e05d.tar.bz2
android-node-v8-2cc29517966de7257a2f1b34c58c77225a21e05d.zip
deps: update V8 to 5.1.281.69
Pick up the latest branch-head for V8 5.1. This branch brings in improved language support and performance improvements. For full details: http://v8project.blogspot.com/2016/04/v8-release-51.html * Picks up the latest branch head for 5.1 [1] * Edit v8 gitignore to allow trace_event copy * Update V8 DEP trace_event as per deps/v8/DEPS [2] [1] https://chromium.googlesource.com/v8/v8.git/+/dc81244 [2] https://chromium.googlesource.com/chromium/src/base/trace_event/common/+/c8c8665 PR-URL: https://github.com/nodejs/node/pull/7016 Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl>
Diffstat (limited to 'deps/v8/src/arm64')
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h16
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc10
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h8
-rw-r--r--deps/v8/src/arm64/builtins-arm64.cc142
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc240
-rw-r--r--deps/v8/src/arm64/cpu-arm64.cc2
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc16
-rw-r--r--deps/v8/src/arm64/frames-arm64.h15
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc80
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc229
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h16
11 files changed, 376 insertions, 398 deletions
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
index aeca563c37..6191216281 100644
--- a/deps/v8/src/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -41,6 +41,18 @@ void RelocInfo::set_target_address(Address target,
}
}
+void RelocInfo::update_wasm_memory_reference(
+ Address old_base, Address new_base, size_t old_size, size_t new_size,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ DCHECK(old_base <= wasm_memory_reference() &&
+ wasm_memory_reference() < old_base + old_size);
+ Address updated_reference = new_base + (wasm_memory_reference() - old_base);
+ DCHECK(new_base <= updated_reference &&
+ updated_reference < new_base + new_size);
+ Assembler::set_target_address_at(isolate_, pc_, host_, updated_reference,
+ icache_flush_mode);
+}
inline int CPURegister::code() const {
DCHECK(IsValid());
@@ -693,6 +705,10 @@ Address RelocInfo::target_address() {
return Assembler::target_address_at(pc_, host_);
}
+Address RelocInfo::wasm_memory_reference() {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ return Assembler::target_address_at(pc_, host_);
+}
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
index ea7a732f8a..2471d5eebd 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -56,7 +56,10 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
if (cpu.implementer() == base::CPU::NVIDIA &&
cpu.variant() == base::CPU::NVIDIA_DENVER &&
cpu.part() <= base::CPU::NVIDIA_DENVER_V10) {
- supported_ |= 1u << COHERENT_CACHE;
+ // TODO(jkummerow): This is turned off as an experiment to see if it
+ // affects crash rates. Keep an eye on crash reports and either remove
+ // coherent cache support permanently, or re-enable it!
+ // supported_ |= 1u << COHERENT_CACHE;
}
}
@@ -437,7 +440,8 @@ bool ConstPool::CanBeShared(RelocInfo::Mode mode) {
DCHECK(mode != RelocInfo::NONE32);
return RelocInfo::IsNone(mode) ||
- (!assm_->serializer_enabled() && (mode >= RelocInfo::CELL));
+ (!assm_->serializer_enabled() &&
+ (mode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE));
}
@@ -2871,7 +2875,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants.
RelocInfo rinfo(isolate(), reinterpret_cast<byte*>(pc_), rmode, data, NULL);
if (((rmode >= RelocInfo::COMMENT) &&
- (rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_CALL)) ||
+ (rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL)) ||
(rmode == RelocInfo::INTERNAL_REFERENCE) ||
(rmode == RelocInfo::CONST_POOL) || (rmode == RelocInfo::VENEER_POOL) ||
(rmode == RelocInfo::DEOPT_REASON) ||
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index 47786eb710..546025475e 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -922,7 +922,9 @@ class Assembler : public AssemblerBase {
}
// Debugging ----------------------------------------------------------------
- PositionsRecorder* positions_recorder() { return &positions_recorder_; }
+ AssemblerPositionsRecorder* positions_recorder() {
+ return &positions_recorder_;
+ }
void RecordComment(const char* msg);
// Record a deoptimization reason that can be used by a log or cpu profiler.
@@ -2135,8 +2137,8 @@ class Assembler : public AssemblerBase {
void DeleteUnresolvedBranchInfoForLabelTraverse(Label* label);
private:
- PositionsRecorder positions_recorder_;
- friend class PositionsRecorder;
+ AssemblerPositionsRecorder positions_recorder_;
+ friend class AssemblerPositionsRecorder;
friend class EnsureSpace;
friend class ConstPool;
};
diff --git a/deps/v8/src/arm64/builtins-arm64.cc b/deps/v8/src/arm64/builtins-arm64.cc
index 11f66a4ef4..44bfc1762d 100644
--- a/deps/v8/src/arm64/builtins-arm64.cc
+++ b/deps/v8/src/arm64/builtins-arm64.cc
@@ -518,6 +518,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// -- x2 : allocation site or undefined
// -- x3 : new target
// -- lr : return address
+ // -- cp : context pointer
// -- sp[...]: constructor arguments
// -----------------------------------
@@ -537,6 +538,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Preserve the incoming parameters on the stack.
__ AssertUndefinedOrAllocationSite(allocation_site, x10);
+ __ Push(cp);
__ SmiTag(argc);
__ Push(allocation_site, argc);
@@ -623,7 +625,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// x0: result
// jssp[0]: receiver
// jssp[1]: number of arguments (smi-tagged)
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
if (create_implicit_receiver) {
// If the result is an object (in the ECMA sense), we should get rid
@@ -763,9 +765,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
ProfileEntryHookStub::MaybeCallEntryHook(masm);
- // Clear the context before we push it when entering the internal frame.
- __ Mov(cp, 0);
-
{
// Enter an internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -1394,23 +1393,6 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
}
-void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
- // We check the stack limit as indicator that recompilation might be done.
- Label ok;
- __ CompareRoot(jssp, Heap::kStackLimitRootIndex);
- __ B(hs, &ok);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard);
- }
- __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
- RelocInfo::CODE_TARGET);
-
- __ Bind(&ok);
- __ Ret();
-}
-
-
// static
void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
int field_index) {
@@ -1456,6 +1438,29 @@ void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
__ TailCallRuntime(Runtime::kThrowNotDateError);
}
+// static
+void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : argc
+ // -- jssp[0] : first argument (left-hand side)
+ // -- jssp[8] : receiver (right-hand side)
+ // -----------------------------------
+ ASM_LOCATION("Builtins::Generate_FunctionHasInstance");
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Ldr(InstanceOfDescriptor::LeftRegister(),
+ MemOperand(fp, 2 * kPointerSize)); // Load left-hand side.
+ __ Ldr(InstanceOfDescriptor::RightRegister(),
+ MemOperand(fp, 3 * kPointerSize)); // Load right-hand side.
+ InstanceOfStub stub(masm->isolate(), true);
+ __ CallStub(&stub);
+ }
+
+ // Pop the argument and the receiver.
+ __ Drop(2);
+ __ Ret();
+}
// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
@@ -1972,19 +1977,21 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
Comment cmnt(masm, "[ PrepareForTailCall");
- // Prepare for tail call only if the debugger is not active.
+ // Prepare for tail call only if ES2015 tail call elimination is enabled.
Label done;
- ExternalReference debug_is_active =
- ExternalReference::debug_is_active_address(masm->isolate());
- __ Mov(scratch1, Operand(debug_is_active));
+ ExternalReference is_tail_call_elimination_enabled =
+ ExternalReference::is_tail_call_elimination_enabled_address(
+ masm->isolate());
+ __ Mov(scratch1, Operand(is_tail_call_elimination_enabled));
__ Ldrb(scratch1, MemOperand(scratch1));
__ Cmp(scratch1, Operand(0));
- __ B(ne, &done);
+ __ B(eq, &done);
// Drop possible interpreter handler/stub frame.
{
Label no_interpreter_frame;
- __ Ldr(scratch3, MemOperand(fp, StandardFrameConstants::kMarkerOffset));
+ __ Ldr(scratch3,
+ MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Cmp(scratch3, Operand(Smi::FromInt(StackFrame::STUB)));
__ B(ne, &no_interpreter_frame);
__ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -1992,18 +1999,19 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
}
// Check if next frame is an arguments adaptor frame.
+ Register caller_args_count_reg = scratch1;
Label no_arguments_adaptor, formal_parameter_count_loaded;
__ Ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ Ldr(scratch3,
- MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+ MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Cmp(scratch3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ B(ne, &no_arguments_adaptor);
- // Drop arguments adaptor frame and load arguments count.
+ // Drop current frame and load arguments count from arguments adaptor frame.
__ mov(fp, scratch2);
- __ Ldr(scratch1,
+ __ Ldr(caller_args_count_reg,
MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(scratch1);
+ __ SmiUntag(caller_args_count_reg);
__ B(&formal_parameter_count_loaded);
__ bind(&no_arguments_adaptor);
@@ -2011,54 +2019,14 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
__ Ldr(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Ldr(scratch1,
FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldrsw(scratch1,
+ __ Ldrsw(caller_args_count_reg,
FieldMemOperand(scratch1,
SharedFunctionInfo::kFormalParameterCountOffset));
__ bind(&formal_parameter_count_loaded);
- // Calculate the end of destination area where we will put the arguments
- // after we drop current frame. We add kPointerSize to count the receiver
- // argument which is not included into formal parameters count.
- Register dst_reg = scratch2;
- __ add(dst_reg, fp, Operand(scratch1, LSL, kPointerSizeLog2));
- __ add(dst_reg, dst_reg,
- Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
-
- Register src_reg = scratch1;
- __ add(src_reg, jssp, Operand(args_reg, LSL, kPointerSizeLog2));
- // Count receiver argument as well (not included in args_reg).
- __ add(src_reg, src_reg, Operand(kPointerSize));
-
- if (FLAG_debug_code) {
- __ Cmp(src_reg, dst_reg);
- __ Check(lo, kStackAccessBelowStackPointer);
- }
-
- // Restore caller's frame pointer and return address now as they will be
- // overwritten by the copying loop.
- __ Ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
- __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Now copy callee arguments to the caller frame going backwards to avoid
- // callee arguments corruption (source and destination areas could overlap).
-
- // Both src_reg and dst_reg are pointing to the word after the one to copy,
- // so they must be pre-decremented in the loop.
- Register tmp_reg = scratch3;
- Label loop, entry;
- __ B(&entry);
- __ bind(&loop);
- __ Ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
- __ Str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
- __ bind(&entry);
- __ Cmp(jssp, src_reg);
- __ B(ne, &loop);
-
- // Leave current frame.
- __ Mov(jssp, dst_reg);
- __ SetStackPointer(jssp);
- __ AssertStackConsistency();
-
+ ParameterCount callee_args_count(args_reg);
+ __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+ scratch3);
__ bind(&done);
}
} // namespace
@@ -2610,30 +2578,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Register copy_to = x12;
Register scratch1 = x13, scratch2 = x14;
- // If the function is strong we need to throw an error.
- Label no_strong_error;
- __ Ldr(scratch1,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(scratch2.W(),
- FieldMemOperand(scratch1, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestAndBranchIfAllClear(scratch2.W(),
- (1 << SharedFunctionInfo::kStrongModeFunction),
- &no_strong_error);
-
- // What we really care about is the required number of arguments.
- DCHECK_EQ(kPointerSize, kInt64Size);
- __ Ldr(scratch2.W(),
- FieldMemOperand(scratch1, SharedFunctionInfo::kLengthOffset));
- __ Cmp(argc_actual, Operand(scratch2, LSR, 1));
- __ B(ge, &no_strong_error);
-
- {
- FrameScope frame(masm, StackFrame::MANUAL);
- EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
- }
-
- __ Bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
ArgumentAdaptorStackCheck(masm, &stack_overflow);
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index ad566e68fc..ee4053515a 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -4,8 +4,9 @@
#if V8_TARGET_ARCH_ARM64
-#include "src/bootstrapper.h"
#include "src/code-stubs.h"
+#include "src/api-arguments.h"
+#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
@@ -81,6 +82,10 @@ void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
}
+void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+ Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
+ descriptor->Initialize(x0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
+}
void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
@@ -425,7 +430,9 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
}
-// Fast negative check for internalized-to-internalized equality.
+// Fast negative check for internalized-to-internalized equality or receiver
+// equality. Also handles the undetectable receiver to null/undefined
+// comparison.
// See call site for description.
static void EmitCheckForInternalizedStringsOrObjects(
MacroAssembler* masm, Register left, Register right, Register left_map,
@@ -435,7 +442,7 @@ static void EmitCheckForInternalizedStringsOrObjects(
Register result = x0;
DCHECK(left.is(x0) || right.is(x0));
- Label object_test, return_unequal, undetectable;
+ Label object_test, return_equal, return_unequal, undetectable;
STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
// TODO(all): reexamine this branch sequence for optimisation wrt branch
// prediction.
@@ -463,12 +470,22 @@ static void EmitCheckForInternalizedStringsOrObjects(
__ CompareInstanceType(left_map, left_type, FIRST_JS_RECEIVER_TYPE);
__ B(lt, runtime_call);
- __ bind(&return_unequal);
+ __ Bind(&return_unequal);
// Return non-equal by returning the non-zero object pointer in x0.
__ Ret();
- __ bind(&undetectable);
+ __ Bind(&undetectable);
__ Tbz(left_bitfield, MaskToBit(1 << Map::kIsUndetectable), &return_unequal);
+
+ // If both sides are JSReceivers, then the result is false according to
+ // the HTML specification, which says that only comparisons with null or
+ // undefined are affected by special casing for document.all.
+ __ CompareInstanceType(right_map, right_type, ODDBALL_TYPE);
+ __ B(eq, &return_equal);
+ __ CompareInstanceType(left_map, left_type, ODDBALL_TYPE);
+ __ B(ne, &return_unequal);
+
+ __ Bind(&return_equal);
__ Mov(result, EQUAL);
__ Ret();
}
@@ -1324,7 +1341,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate()));
__ Ldr(x10, MemOperand(x11));
- __ Push(x13, xzr, x12, x10);
+ __ Push(x13, x12, xzr, x10);
// Set up fp.
__ Sub(fp, jssp, EntryFrameConstants::kCallerFPOffset);
@@ -1544,8 +1561,11 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
__ JumpIfNotObjectType(function, function_map, scratch, JS_FUNCTION_TYPE,
&slow_case);
- // Ensure that {function} has an instance prototype.
+ // Go to the runtime if the function is not a constructor.
__ Ldrb(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
+ __ Tbz(scratch, Map::kIsConstructor, &slow_case);
+
+ // Ensure that {function} has an instance prototype.
__ Tbnz(scratch, Map::kHasNonInstancePrototype, &slow_case);
// Get the "prototype" (or initial map) of the {function}.
@@ -1612,27 +1632,8 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
// Slow-case: Call the %InstanceOf runtime function.
__ bind(&slow_case);
__ Push(object, function);
- __ TailCallRuntime(Runtime::kInstanceOf);
-}
-
-
-void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
- // Return address is in lr.
- Label slow;
-
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
-
- // Check that the key is an array index, that is Uint32.
- __ TestAndBranchIfAnySet(key, kSmiTagMask | kSmiSignMask, &slow);
-
- // Everything is fine, call runtime.
- __ Push(receiver, key);
- __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
-
- __ Bind(&slow);
- PropertyAccessCompiler::TailCallBuiltin(
- masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
+ __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
+ : Runtime::kInstanceOf);
}
@@ -2856,10 +2857,17 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
// Handle more complex cases in runtime.
__ Bind(&runtime);
- __ Push(lhs, rhs);
if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(lhs, rhs);
+ __ CallRuntime(Runtime::kStringEqual);
+ }
+ __ LoadRoot(x1, Heap::kTrueValueRootIndex);
+ __ Sub(x0, x0, x1);
+ __ Ret();
} else {
+ __ Push(lhs, rhs);
__ TailCallRuntime(Runtime::kStringCompare);
}
@@ -3227,27 +3235,28 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ Bind(&not_smi);
Label not_heap_number;
- __ Ldr(x1, FieldMemOperand(x0, HeapObject::kMapOffset));
- __ Ldrb(x1, FieldMemOperand(x1, Map::kInstanceTypeOffset));
- // x0: object
- // x1: instance type
- __ Cmp(x1, HEAP_NUMBER_TYPE);
+ __ CompareObjectType(x0, x1, x1, HEAP_NUMBER_TYPE);
+ // x0: receiver
+ // x1: receiver instance type
__ B(ne, &not_heap_number);
__ Ret();
__ Bind(&not_heap_number);
- Label not_string, slow_string;
- __ Cmp(x1, FIRST_NONSTRING_TYPE);
+ NonNumberToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+}
+
+void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
+ // The NonNumberToNumber stub takes one argument in x0.
+ __ AssertNotNumber(x0);
+
+ Label not_string;
+ __ CompareObjectType(x0, x1, x1, FIRST_NONSTRING_TYPE);
+ // x0: receiver
+ // x1: receiver instance type
__ B(hs, &not_string);
- // Check if string has a cached array index.
- __ Ldr(x2, FieldMemOperand(x0, String::kHashFieldOffset));
- __ Tst(x2, Operand(String::kContainsCachedArrayIndexMask));
- __ B(ne, &slow_string);
- __ IndexFromHash(x2, x0);
- __ Ret();
- __ Bind(&slow_string);
- __ Push(x0); // Push argument.
- __ TailCallRuntime(Runtime::kStringToNumber);
+ StringToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
__ Bind(&not_string);
Label not_oddball;
@@ -3261,22 +3270,23 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kToNumber);
}
+void StringToNumberStub::Generate(MacroAssembler* masm) {
+ // The StringToNumber stub takes one argument in x0.
+ __ AssertString(x0);
-void ToLengthStub::Generate(MacroAssembler* masm) {
- // The ToLength stub takes one argument in x0.
- Label not_smi;
- __ JumpIfNotSmi(x0, &not_smi);
- STATIC_ASSERT(kSmiTag == 0);
- __ Tst(x0, x0);
- __ Csel(x0, x0, Operand(0), ge);
+ // Check if string has a cached array index.
+ Label runtime;
+ __ Ldr(x2, FieldMemOperand(x0, String::kHashFieldOffset));
+ __ Tst(x2, Operand(String::kContainsCachedArrayIndexMask));
+ __ B(ne, &runtime);
+ __ IndexFromHash(x2, x0);
__ Ret();
- __ Bind(&not_smi);
+ __ Bind(&runtime);
__ Push(x0); // Push argument.
- __ TailCallRuntime(Runtime::kToLength);
+ __ TailCallRuntime(Runtime::kStringToNumber);
}
-
void ToStringStub::Generate(MacroAssembler* masm) {
// The ToString stub takes one argument in x0.
Label is_number;
@@ -3449,43 +3459,6 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
}
-void StringCompareStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x1 : left
- // -- x0 : right
- // -- lr : return address
- // -----------------------------------
- __ AssertString(x1);
- __ AssertString(x0);
-
- Label not_same;
- __ Cmp(x0, x1);
- __ B(ne, &not_same);
- __ Mov(x0, Smi::FromInt(EQUAL));
- __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, x3,
- x4);
- __ Ret();
-
- __ Bind(&not_same);
-
- // Check that both objects are sequential one-byte strings.
- Label runtime;
- __ JumpIfEitherIsNotSequentialOneByteStrings(x1, x0, x12, x13, &runtime);
-
- // Compare flat one-byte strings natively.
- __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, x3,
- x4);
- StringHelper::GenerateCompareFlatOneByteStrings(masm, x1, x0, x12, x13, x14,
- x15);
-
- // Call the runtime.
- // Returns -1 (less), 0 (equal), or 1 (greater) tagged as a small integer.
- __ Bind(&runtime);
- __ Push(x1, x0);
- __ TailCallRuntime(Runtime::kStringCompare);
-}
-
-
void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x1 : left
@@ -3682,7 +3655,7 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
int parameter_count_offset =
- StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
__ Ldr(x1, MemOperand(fp, parameter_count_offset));
if (function_mode() == JS_FUNCTION_STUB_MODE) {
__ Add(x1, x1, 1);
@@ -4972,7 +4945,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
__ Bind(&loop);
__ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
__ Bind(&loop_entry);
- __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kMarkerOffset));
+ __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kFunctionOffset));
__ Cmp(x3, x1);
__ B(ne, &loop);
}
@@ -4980,8 +4953,8 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// Check if we have rest parameters (only possible if we have an
// arguments adaptor frame below the function frame).
Label no_rest_parameters;
- __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kContextOffset));
+ __ Ldr(x2, MemOperand(x2, CommonFrameConstants::kCallerFPOffset));
+ __ Ldr(x3, MemOperand(x2, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Cmp(x3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ B(ne, &no_rest_parameters);
@@ -5137,8 +5110,9 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
Label runtime;
Label adaptor_frame, try_allocate;
__ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(caller_ctx, MemOperand(caller_fp,
- StandardFrameConstants::kContextOffset));
+ __ Ldr(
+ caller_ctx,
+ MemOperand(caller_fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ B(eq, &adaptor_frame);
@@ -5401,7 +5375,7 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ Bind(&loop);
__ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
__ Bind(&loop_entry);
- __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kMarkerOffset));
+ __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kFunctionOffset));
__ Cmp(x3, x1);
__ B(ne, &loop);
}
@@ -5409,7 +5383,7 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
__ Ldr(x3, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(x4, MemOperand(x3, StandardFrameConstants::kContextOffset));
+ __ Ldr(x4, MemOperand(x3, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Cmp(x4, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ B(eq, &arguments_adaptor);
{
@@ -5804,16 +5778,12 @@ static void CallApiFunctionAndReturn(
__ B(&leave_exit_frame);
}
-static void CallApiFunctionStubHelper(MacroAssembler* masm,
- const ParameterCount& argc,
- bool return_first_arg,
- bool call_data_undefined, bool is_lazy) {
+void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : callee
// -- x4 : call_data
// -- x2 : holder
// -- x1 : api_function_address
- // -- x3 : number of arguments if argc is a register
// -- cp : context
// --
// -- sp[0] : last argument
@@ -5839,17 +5809,15 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
STATIC_ASSERT(FCA::kHolderIndex == 0);
STATIC_ASSERT(FCA::kArgsLength == 7);
- DCHECK(argc.is_immediate() || x3.is(argc.reg()));
-
// FunctionCallbackArguments: context, callee and call data.
__ Push(context, callee, call_data);
- if (!is_lazy) {
+ if (!is_lazy()) {
// Load context from callee
__ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
}
- if (!call_data_undefined) {
+ if (!call_data_undefined()) {
__ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
}
Register isolate_reg = x5;
@@ -5878,26 +5846,13 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
// x0 = FunctionCallbackInfo&
// Arguments is after the return address.
__ Add(x0, masm->StackPointer(), 1 * kPointerSize);
- if (argc.is_immediate()) {
- // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
- __ Add(x10, args,
- Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
- __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc and
- // FunctionCallbackInfo::is_construct_call = 0
- __ Mov(x10, argc.immediate());
- __ Stp(x10, xzr, MemOperand(x0, 2 * kPointerSize));
- } else {
- // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
- __ Add(x10, args, Operand(argc.reg(), LSL, kPointerSizeLog2));
- __ Add(x10, x10, (FCA::kArgsLength - 1) * kPointerSize);
- __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc and
- // FunctionCallbackInfo::is_construct_call
- __ Add(x10, argc.reg(), FCA::kArgsLength + 1);
- __ Mov(x10, Operand(x10, LSL, kPointerSizeLog2));
- __ Stp(argc.reg(), x10, MemOperand(x0, 2 * kPointerSize));
- }
+ // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
+ __ Add(x10, args, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
+ __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc and
+ // FunctionCallbackInfo::is_construct_call = 0
+ __ Mov(x10, argc());
+ __ Stp(x10, xzr, MemOperand(x0, 2 * kPointerSize));
ExternalReference thunk_ref =
ExternalReference::invoke_function_callback(masm->isolate());
@@ -5907,7 +5862,7 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
// Stores return the first js argument
int return_value_offset = 0;
- if (return_first_arg) {
+ if (is_store()) {
return_value_offset = 2 + FCA::kArgsLength;
} else {
return_value_offset = 2 + FCA::kReturnValueOffset;
@@ -5917,10 +5872,8 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
MemOperand is_construct_call_operand =
MemOperand(masm->StackPointer(), 4 * kPointerSize);
MemOperand* stack_space_operand = &is_construct_call_operand;
- if (argc.is_immediate()) {
- stack_space = argc.immediate() + FCA::kArgsLength + 1;
- stack_space_operand = NULL;
- }
+ stack_space = argc() + FCA::kArgsLength + 1;
+ stack_space_operand = NULL;
const int spill_offset = 1 + kApiStackSpace;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
@@ -5929,23 +5882,6 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
}
-void CallApiFunctionStub::Generate(MacroAssembler* masm) {
- bool call_data_undefined = this->call_data_undefined();
- CallApiFunctionStubHelper(masm, ParameterCount(x3), false,
- call_data_undefined, false);
-}
-
-
-void CallApiAccessorStub::Generate(MacroAssembler* masm) {
- bool is_store = this->is_store();
- int argc = this->argc();
- bool call_data_undefined = this->call_data_undefined();
- bool is_lazy = this->is_lazy();
- CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
- call_data_undefined, is_lazy);
-}
-
-
void CallApiGetterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- sp[0] : name
diff --git a/deps/v8/src/arm64/cpu-arm64.cc b/deps/v8/src/arm64/cpu-arm64.cc
index 37bb4a22ba..712dbbd650 100644
--- a/deps/v8/src/arm64/cpu-arm64.cc
+++ b/deps/v8/src/arm64/cpu-arm64.cc
@@ -19,7 +19,7 @@ class CacheLineSizes {
cache_type_register_ = 0;
#else
// Copy the content of the cache type register to a core register.
- __asm__ __volatile__("mrs %[ctr], ctr_el0" // NOLINT
+ __asm__ __volatile__("mrs %x[ctr], ctr_el0" // NOLINT
: [ctr] "=r"(cache_type_register_));
#endif
}
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
index 3aa1e4dfa1..fe2a269935 100644
--- a/deps/v8/src/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -65,12 +65,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
-bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
- // There is no dynamic alignment padding on ARM64 in the input frame.
- return false;
-}
-
-
void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
ApiFunction function(descriptor->deoptimization_handler());
@@ -132,12 +126,17 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// address for lazy deoptimization.
__ Mov(code_object, lr);
// Compute the fp-to-sp delta, and correct one word for bailout id.
- __ Add(fp_to_sp, masm()->StackPointer(),
+ __ Add(fp_to_sp, __ StackPointer(),
kSavedRegistersAreaSize + (1 * kPointerSize));
__ Sub(fp_to_sp, fp, fp_to_sp);
// Allocate a new deoptimizer object.
+ __ Mov(x0, 0);
+ Label context_check;
+ __ Ldr(x1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ JumpIfSmi(x1, &context_check);
__ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ bind(&context_check);
__ Mov(x1, type());
// Following arguments are already loaded:
// - x2: bailout id
@@ -212,6 +211,9 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
__ Pop(x4); // Restore deoptimizer object (class Deoptimizer).
+ __ Ldr(__ StackPointer(),
+ MemOperand(x4, Deoptimizer::caller_frame_top_offset()));
+
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop,
outer_loop_header, inner_loop_header;
diff --git a/deps/v8/src/arm64/frames-arm64.h b/deps/v8/src/arm64/frames-arm64.h
index 783514437f..f1e45f5fdc 100644
--- a/deps/v8/src/arm64/frames-arm64.h
+++ b/deps/v8/src/arm64/frames-arm64.h
@@ -34,16 +34,11 @@ class EntryFrameConstants : public AllStatic {
-(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
};
-
-class ExitFrameConstants : public AllStatic {
+class ExitFrameConstants : public TypedFrameConstants {
public:
- static const int kFrameSize = 2 * kPointerSize;
-
- static const int kCallerSPDisplacement = 2 * kPointerSize;
- static const int kCallerPCOffset = 1 * kPointerSize;
- static const int kCallerFPOffset = 0 * kPointerSize; // <- fp
- static const int kSPOffset = -1 * kPointerSize;
- static const int kCodeOffset = -2 * kPointerSize;
+ static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ DEFINE_TYPED_FRAME_SIZES(2);
static const int kLastExitFrameField = kCodeOffset;
static const int kConstantPoolOffset = 0; // Not used
@@ -59,7 +54,7 @@ class JavaScriptFrameConstants : public AllStatic {
// the arguments.
static const int kLastParameterOffset = 2 * kPointerSize;
- static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+ static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
};
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
index c6ae37e733..f307aeb6d4 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -114,37 +114,8 @@ void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
}
-void ToNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x0: value
- Register registers[] = {x0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-// static
-const Register ToLengthDescriptor::ReceiverRegister() { return x0; }
-
-
// static
-const Register ToStringDescriptor::ReceiverRegister() { return x0; }
-
-
-// static
-const Register ToNameDescriptor::ReceiverRegister() { return x0; }
-
-
-// static
-const Register ToObjectDescriptor::ReceiverRegister() { return x0; }
-
-
-void NumberToStringDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x0: value
- Register registers[] = {x0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
+const Register TypeConversionDescriptor::ArgumentRegister() { return x0; }
void TypeofDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -294,6 +265,13 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(0, nullptr, nullptr);
}
+#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
+ void Allocate##Type##Descriptor::InitializePlatformSpecific( \
+ CallInterfaceDescriptorData* data) { \
+ data->InitializePlatformSpecific(0, nullptr, nullptr); \
+ }
+SIMD128_TYPES(SIMD128_ALLOC_DESC)
+#undef SIMD128_ALLOC_DESC
void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -336,28 +314,18 @@ void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void CompareDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x1: left operand
- // x0: right operand
- Register registers[] = {x1, x0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void CompareNilDescriptor::InitializePlatformSpecific(
+void FastArrayPushDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // x0: value to compare
+ // stack param count needs (arg count)
Register registers[] = {x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void ToBooleanDescriptor::InitializePlatformSpecific(
+void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // x0: value
- Register registers[] = {x0};
+ // x1: left operand
+ // x0: right operand
+ Register registers[] = {x1, x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -444,25 +412,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
&default_descriptor);
}
-
-void ApiFunctionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- static PlatformInterfaceDescriptor default_descriptor =
- PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
-
- Register registers[] = {
- x0, // callee
- x4, // call_data
- x2, // holder
- x1, // api_function_address
- x3, // actual number of arguments
- };
- data->InitializePlatformSpecific(arraysize(registers), registers,
- &default_descriptor);
-}
-
-
-void ApiAccessorDescriptor::InitializePlatformSpecific(
+void ApiCallbackDescriptorBase::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index 953c3fd7f2..12ddd8145e 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -1355,6 +1355,14 @@ void MacroAssembler::AssertStackConsistency() {
}
}
+void MacroAssembler::AssertCspAligned() {
+ if (emit_debug_code() && use_real_aborts()) {
+ // TODO(titzer): use a real assert for alignment check?
+ UseScratchRegisterScope scope(this);
+ Register temp = scope.AcquireX();
+ ldr(temp, MemOperand(csp));
+ }
+}
void MacroAssembler::AssertFPCRState(Register fpcr) {
if (emit_debug_code()) {
@@ -1548,24 +1556,38 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver,
Register scratch1,
Register scratch2,
Label* no_memento_found) {
- ExternalReference new_space_start =
- ExternalReference::new_space_start(isolate());
+ Label map_check;
+ Label top_check;
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
-
- Add(scratch1, receiver,
- JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag);
- Cmp(scratch1, new_space_start);
- B(lt, no_memento_found);
-
- Mov(scratch2, new_space_allocation_top);
- Ldr(scratch2, MemOperand(scratch2));
- Cmp(scratch1, scratch2);
+ const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
+ const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+
+ // Bail out if the object is not in new space.
+ JumpIfNotInNewSpace(receiver, no_memento_found);
+ Add(scratch1, receiver, kMementoEndOffset);
+ // If the object is in new space, we need to check whether it is on the same
+ // page as the current top.
+ Eor(scratch2, scratch1, new_space_allocation_top);
+ Tst(scratch2, ~Page::kPageAlignmentMask);
+ B(eq, &top_check);
+ // The object is on a different page than allocation top. Bail out if the
+ // object sits on the page boundary as no memento can follow and we cannot
+ // touch the memory following it.
+ Eor(scratch2, scratch1, receiver);
+ Tst(scratch2, ~Page::kPageAlignmentMask);
+ B(ne, no_memento_found);
+ // Continue with the actual map check.
+ jmp(&map_check);
+ // If top is on the same page as the current object, we need to check whether
+ // we are below top.
+ bind(&top_check);
+ Cmp(scratch1, new_space_allocation_top);
B(gt, no_memento_found);
-
- Ldr(scratch1, MemOperand(scratch1, -AllocationMemento::kSize));
- Cmp(scratch1,
- Operand(isolate()->factory()->allocation_memento_map()));
+ // Memento map check.
+ bind(&map_check);
+ Ldr(scratch1, MemOperand(receiver, kMementoMapOffset));
+ Cmp(scratch1, Operand(isolate()->factory()->allocation_memento_map()));
}
@@ -1690,6 +1712,18 @@ void MacroAssembler::AssertPositiveOrZero(Register value) {
}
}
+void MacroAssembler::AssertNotNumber(Register value) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ Tst(value, kSmiTagMask);
+ Check(ne, kOperandIsANumber);
+ Label done;
+ JumpIfNotHeapNumber(value, &done);
+ Abort(kOperandIsANumber);
+ Bind(&done);
+ }
+}
+
void MacroAssembler::AssertNumber(Register value) {
if (emit_debug_code()) {
Label done;
@@ -2330,6 +2364,66 @@ void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register type,
B(ne, not_unique_name);
}
+void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
+ Register caller_args_count_reg,
+ Register scratch0, Register scratch1) {
+#if DEBUG
+ if (callee_args_count.is_reg()) {
+ DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
+ scratch1));
+ } else {
+ DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
+ }
+#endif
+
+ // Calculate the end of destination area where we will put the arguments
+ // after we drop current frame. We add kPointerSize to count the receiver
+ // argument which is not included into formal parameters count.
+ Register dst_reg = scratch0;
+ __ add(dst_reg, fp, Operand(caller_args_count_reg, LSL, kPointerSizeLog2));
+ __ add(dst_reg, dst_reg,
+ Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
+
+ Register src_reg = caller_args_count_reg;
+ // Calculate the end of source area. +kPointerSize is for the receiver.
+ if (callee_args_count.is_reg()) {
+ add(src_reg, jssp, Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
+ add(src_reg, src_reg, Operand(kPointerSize));
+ } else {
+ add(src_reg, jssp,
+ Operand((callee_args_count.immediate() + 1) * kPointerSize));
+ }
+
+ if (FLAG_debug_code) {
+ __ Cmp(src_reg, dst_reg);
+ __ Check(lo, kStackAccessBelowStackPointer);
+ }
+
+ // Restore caller's frame pointer and return address now as they will be
+ // overwritten by the copying loop.
+ __ Ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Now copy callee arguments to the caller frame going backwards to avoid
+ // callee arguments corruption (source and destination areas could overlap).
+
+ // Both src_reg and dst_reg are pointing to the word after the one to copy,
+ // so they must be pre-decremented in the loop.
+ Register tmp_reg = scratch1;
+ Label loop, entry;
+ __ B(&entry);
+ __ bind(&loop);
+ __ Ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
+ __ Str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
+ __ bind(&entry);
+ __ Cmp(jssp, src_reg);
+ __ B(ne, &loop);
+
+ // Leave current frame.
+ __ Mov(jssp, dst_reg);
+ __ SetStackPointer(jssp);
+ __ AssertStackConsistency();
+}
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
@@ -2651,18 +2745,17 @@ void MacroAssembler::TruncateHeapNumberToI(Register result,
Bind(&done);
}
-
-void MacroAssembler::StubPrologue() {
+void MacroAssembler::StubPrologue(StackFrame::Type type, int frame_slots) {
UseScratchRegisterScope temps(this);
+ frame_slots -= TypedFrameConstants::kFixedSlotCountAboveFp;
Register temp = temps.AcquireX();
- __ Mov(temp, Smi::FromInt(StackFrame::STUB));
- // Compiled stubs don't age, and so they don't need the predictable code
- // ageing sequence.
- __ Push(lr, fp, cp, temp);
- __ Add(fp, StackPointer(), StandardFrameConstants::kFixedFrameSizeFromFp);
+ Mov(temp, Smi::FromInt(type));
+ Push(lr, fp);
+ Mov(fp, StackPointer());
+ Claim(frame_slots);
+ str(temp, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
}
-
void MacroAssembler::Prologue(bool code_pre_aging) {
if (code_pre_aging) {
Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
@@ -2694,18 +2787,26 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
Register type_reg = temps.AcquireX();
Register code_reg = temps.AcquireX();
- Push(lr, fp, cp);
- Mov(type_reg, Smi::FromInt(type));
- Mov(code_reg, Operand(CodeObject()));
- Push(type_reg, code_reg);
- // jssp[4] : lr
- // jssp[3] : fp
- // jssp[2] : cp
- // jssp[1] : type
- // jssp[0] : code object
-
- // Adjust FP to point to saved FP.
- Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+ if (type == StackFrame::INTERNAL) {
+ Mov(type_reg, Smi::FromInt(type));
+ Push(lr, fp);
+ Push(type_reg);
+ Mov(code_reg, Operand(CodeObject()));
+ Push(code_reg);
+ Add(fp, jssp, InternalFrameConstants::kFixedFrameSizeFromFp);
+ // jssp[4] : lr
+ // jssp[3] : fp
+ // jssp[1] : type
+ // jssp[0] : [code object]
+ } else {
+ Mov(type_reg, Smi::FromInt(type));
+ Push(lr, fp);
+ Push(type_reg);
+ Add(fp, jssp, TypedFrameConstants::kFixedFrameSizeFromFp);
+ // jssp[2] : lr
+ // jssp[1] : fp
+ // jssp[0] : type
+ }
}
@@ -2746,20 +2847,23 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
DCHECK(jssp.Is(StackPointer()));
// Set up the new stack frame.
- Mov(scratch, Operand(CodeObject()));
Push(lr, fp);
Mov(fp, StackPointer());
- Push(xzr, scratch);
+ Mov(scratch, Smi::FromInt(StackFrame::EXIT));
+ Push(scratch);
+ Push(xzr);
+ Mov(scratch, Operand(CodeObject()));
+ Push(scratch);
// fp[8]: CallerPC (lr)
// fp -> fp[0]: CallerFP (old fp)
- // fp[-8]: Space reserved for SPOffset.
- // jssp -> fp[-16]: CodeObject()
- STATIC_ASSERT((2 * kPointerSize) ==
- ExitFrameConstants::kCallerSPDisplacement);
+ // fp[-8]: STUB marker
+ // fp[-16]: Space reserved for SPOffset.
+ // jssp -> fp[-24]: CodeObject()
+ STATIC_ASSERT((2 * kPointerSize) == ExitFrameConstants::kCallerSPOffset);
STATIC_ASSERT((1 * kPointerSize) == ExitFrameConstants::kCallerPCOffset);
STATIC_ASSERT((0 * kPointerSize) == ExitFrameConstants::kCallerFPOffset);
- STATIC_ASSERT((-1 * kPointerSize) == ExitFrameConstants::kSPOffset);
- STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kCodeOffset);
+ STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kSPOffset);
+ STATIC_ASSERT((-3 * kPointerSize) == ExitFrameConstants::kCodeOffset);
// Save the frame pointer and context pointer in the top frame.
Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
@@ -2769,8 +2873,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
isolate())));
Str(cp, MemOperand(scratch));
- STATIC_ASSERT((-2 * kPointerSize) ==
- ExitFrameConstants::kLastExitFrameField);
+ STATIC_ASSERT((-3 * kPointerSize) == ExitFrameConstants::kLastExitFrameField);
if (save_doubles) {
ExitFramePreserveFPRegs();
}
@@ -2781,9 +2884,10 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
Claim(extra_space + 1, kXRegSize);
// fp[8]: CallerPC (lr)
// fp -> fp[0]: CallerFP (old fp)
- // fp[-8]: Space reserved for SPOffset.
- // fp[-16]: CodeObject()
- // fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
+ // fp[-8]: STUB marker
+ // fp[-16]: Space reserved for SPOffset.
+ // fp[-24]: CodeObject()
+ // fp[-24 - fp_size]: Saved doubles (if save_doubles is true).
// jssp[8]: Extra space reserved for caller (if extra_space != 0).
// jssp -> jssp[0]: Space reserved for the return address.
@@ -2793,9 +2897,10 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
// fp[8]: CallerPC (lr)
// fp -> fp[0]: CallerFP (old fp)
- // fp[-8]: Space reserved for SPOffset.
- // fp[-16]: CodeObject()
- // fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
+ // fp[-8]: STUB marker
+ // fp[-16]: Space reserved for SPOffset.
+ // fp[-24]: CodeObject()
+ // fp[-24 - fp_size]: Saved doubles (if save_doubles is true).
// csp[8]: Memory reserved for the caller if extra_space != 0.
// Alignment padding, if necessary.
// csp -> csp[0]: Space reserved for the return address.
@@ -3678,8 +3783,19 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
DCHECK(!AreAliased(holder_reg, scratch1, scratch2));
Label same_contexts;
- // Load current lexical context from the stack frame.
- Ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Load current lexical context from the active StandardFrame, which
+ // may require crawling past STUB frames.
+ Label load_context;
+ Label has_context;
+ Mov(scratch2, fp);
+ bind(&load_context);
+ Ldr(scratch1,
+ MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
+ JumpIfNotSmi(scratch1, &has_context);
+ Ldr(scratch2, MemOperand(scratch2, CommonFrameConstants::kCallerFPOffset));
+ B(&load_context);
+ bind(&has_context);
+
// In debug mode, make sure the lexical context is set.
#ifdef DEBUG
Cmp(scratch1, 0);
@@ -3916,13 +4032,12 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Str(scratch1, MemOperand(scratch2));
// Call stub on end of buffer.
// Check for end of buffer.
- DCHECK(StoreBuffer::kStoreBufferOverflowBit ==
- (1 << (14 + kPointerSizeLog2)));
+ Tst(scratch1, StoreBuffer::kStoreBufferMask);
if (and_then == kFallThroughAtEnd) {
- Tbz(scratch1, (14 + kPointerSizeLog2), &done);
+ B(ne, &done);
} else {
DCHECK(and_then == kReturnAtEnd);
- Tbnz(scratch1, (14 + kPointerSizeLog2), &store_buffer_overflow);
+ B(eq, &store_buffer_overflow);
Ret();
}
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index ff41c4f27f..4b6b3c0fb1 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -788,6 +788,9 @@ class MacroAssembler : public Assembler {
// If emit_debug_code() is false, this emits no code.
void AssertStackConsistency();
+ // Emits a runtime assert that the CSP is aligned.
+ void AssertCspAligned();
+
// Preserve the callee-saved registers (as defined by AAPCS64).
//
// Higher-numbered registers are pushed before lower-numbered registers, and
@@ -895,6 +898,7 @@ class MacroAssembler : public Assembler {
// This is required for compatibility with architecture independant code.
// Remove if not needed.
inline void Move(Register dst, Register src) { Mov(dst, src); }
+ inline void Move(Register dst, Handle<Object> x) { LoadObject(dst, x); }
inline void Move(Register dst, Smi* src) { Mov(dst, src); }
void LoadInstanceDescriptors(Register map,
@@ -986,6 +990,7 @@ class MacroAssembler : public Assembler {
// Abort execution if argument is not a number (heap number or smi).
void AssertNumber(Register value);
+ void AssertNotNumber(Register value);
void JumpIfHeapNumber(Register object, Label* on_heap_number,
SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
@@ -1165,6 +1170,15 @@ class MacroAssembler : public Assembler {
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
TypeFeedbackId ast_id = TypeFeedbackId::None());
+ // Removes current frame and its arguments from the stack preserving
+ // the arguments and a return address pushed to the stack for the next call.
+ // Both |callee_args_count| and |caller_args_count_reg| do not include
+ // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
+ // is trashed.
+ void PrepareForTailCall(const ParameterCount& callee_args_count,
+ Register caller_args_count_reg, Register scratch0,
+ Register scratch1);
+
// Registers used through the invocation chain are hard-coded.
// We force passing the parameters to ensure the contracts are correctly
// honoured by the caller.
@@ -1621,7 +1635,7 @@ class MacroAssembler : public Assembler {
void ExitFrameRestoreFPRegs();
// Generates function and stub prologue code.
- void StubPrologue();
+ void StubPrologue(StackFrame::Type type, int frame_slots);
void Prologue(bool code_pre_aging);
// Enter exit frame. Exit frames are used when calling C code from generated