aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/arm64
diff options
context:
space:
mode:
authorMichaƫl Zasso <mic.besace@gmail.com>2015-10-06 08:42:38 +0200
committerAli Ijaz Sheikh <ofrobots@google.com>2015-10-14 11:20:34 -0700
commitd8011d1683fe0d977de2bea1147f5213d4490c5a (patch)
tree54967df8dc1732e59eef39e5c5b39fe99ad88977 /deps/v8/src/arm64
parentd1a2e5357ef0357cec9b516fa9ac78cc38a984aa (diff)
downloadandroid-node-v8-d8011d1683fe0d977de2bea1147f5213d4490c5a.tar.gz
android-node-v8-d8011d1683fe0d977de2bea1147f5213d4490c5a.tar.bz2
android-node-v8-d8011d1683fe0d977de2bea1147f5213d4490c5a.zip
deps: upgrade V8 to 4.6.85.23
PR-URL: https://github.com/nodejs/node/pull/3351 Reviewed-By: indutny - Fedor Indutny <fedor.indutny@gmail.com> Reviewed-By: bnoordhuis - Ben Noordhuis <info@bnoordhuis.nl>
Diffstat (limited to 'deps/v8/src/arm64')
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h63
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc57
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h39
-rw-r--r--deps/v8/src/arm64/builtins-arm64.cc442
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc287
-rw-r--r--deps/v8/src/arm64/codegen-arm64.cc2
-rw-r--r--deps/v8/src/arm64/constants-arm64.h16
-rw-r--r--deps/v8/src/arm64/cpu-arm64.cc3
-rw-r--r--deps/v8/src/arm64/debug-arm64.cc305
-rw-r--r--deps/v8/src/arm64/decoder-arm64-inl.h3
-rw-r--r--deps/v8/src/arm64/decoder-arm64.cc2
-rw-r--r--deps/v8/src/arm64/decoder-arm64.h1
-rw-r--r--deps/v8/src/arm64/delayed-masm-arm64.cc2
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc5
-rw-r--r--deps/v8/src/arm64/disasm-arm64.cc21
-rw-r--r--deps/v8/src/arm64/disasm-arm64.h2
-rw-r--r--deps/v8/src/arm64/frames-arm64.cc2
-rw-r--r--deps/v8/src/arm64/frames-arm64.h6
-rw-r--r--deps/v8/src/arm64/full-codegen-arm64.cc5578
-rw-r--r--deps/v8/src/arm64/instructions-arm64.cc2
-rw-r--r--deps/v8/src/arm64/instrument-arm64.cc6
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc40
-rw-r--r--deps/v8/src/arm64/lithium-arm64.cc54
-rw-r--r--deps/v8/src/arm64/lithium-arm64.h42
-rw-r--r--deps/v8/src/arm64/lithium-codegen-arm64.cc140
-rw-r--r--deps/v8/src/arm64/lithium-gap-resolver-arm64.cc2
-rw-r--r--deps/v8/src/arm64/lithium-gap-resolver-arm64.h2
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64-inl.h17
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc62
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h33
-rw-r--r--deps/v8/src/arm64/regexp-macro-assembler-arm64.cc1617
-rw-r--r--deps/v8/src/arm64/regexp-macro-assembler-arm64.h295
-rw-r--r--deps/v8/src/arm64/simulator-arm64.cc21
-rw-r--r--deps/v8/src/arm64/simulator-arm64.h13
-rw-r--r--deps/v8/src/arm64/utils-arm64.h1
35 files changed, 809 insertions, 8374 deletions
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
index bbd44c5f10..3fbb09147b 100644
--- a/deps/v8/src/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -7,7 +7,7 @@
#include "src/arm64/assembler-arm64.h"
#include "src/assembler.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
namespace v8 {
@@ -17,7 +17,7 @@ namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return true; }
-void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
+void RelocInfo::apply(intptr_t delta) {
// On arm64 only internal references need extra work.
DCHECK(RelocInfo::IsInternalReference(rmode_));
@@ -611,11 +611,6 @@ Address Assembler::target_address_from_return_address(Address pc) {
}
-Address Assembler::break_address_from_return_address(Address pc) {
- return pc - Assembler::kPatchDebugBreakSlotReturnOffset;
-}
-
-
Address Assembler::return_address_from_call_start(Address pc) {
// The call, generated by MacroAssembler::Call, is one of two possible
// sequences:
@@ -825,18 +820,18 @@ void RelocInfo::set_code_age_stub(Code* stub,
}
-Address RelocInfo::call_address() {
- DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+Address RelocInfo::debug_call_address() {
+ DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
// For the above sequences the Relocinfo points to the load literal loading
// the call address.
+ STATIC_ASSERT(Assembler::kPatchDebugBreakSlotAddressOffset == 0);
return Assembler::target_address_at(pc_, host_);
}
-void RelocInfo::set_call_address(Address target) {
- DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+void RelocInfo::set_debug_call_address(Address target) {
+ DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
+ STATIC_ASSERT(Assembler::kPatchDebugBreakSlotAddressOffset == 0);
Assembler::set_target_address_at(pc_, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
@@ -862,7 +857,7 @@ bool RelocInfo::IsPatchedReturnSequence() {
// The sequence must be:
// ldr ip0, [pc, #offset]
// blr ip0
- // See arm64/debug-arm64.cc BreakLocation::SetDebugBreakAtReturn().
+ // See arm64/debug-arm64.cc DebugCodegen::PatchDebugBreakSlot
Instruction* i1 = reinterpret_cast<Instruction*>(pc_);
Instruction* i2 = i1->following();
return i1->IsLdrLiteralX() && (i1->Rt() == kIp0Code) &&
@@ -888,11 +883,8 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
visitor->VisitExternalReference(this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
visitor->VisitInternalReference(this);
- } else if (((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence())) &&
- isolate->debug()->has_break_points()) {
+ } else if (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()) {
visitor->VisitDebugTarget(this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(this);
@@ -913,11 +905,8 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitExternalReference(this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
StaticVisitor::VisitInternalReference(this);
- } else if (heap->isolate()->debug()->has_break_points() &&
- ((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence()))) {
+ } else if (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()) {
StaticVisitor::VisitDebugTarget(heap, this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
@@ -973,32 +962,6 @@ LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
}
-LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor(
- const CPURegister& rt, const CPURegister& rt2) {
- DCHECK(AreSameSizeAndType(rt, rt2));
- USE(rt2);
- if (rt.IsRegister()) {
- return rt.Is64Bits() ? LDNP_x : LDNP_w;
- } else {
- DCHECK(rt.IsFPRegister());
- return rt.Is64Bits() ? LDNP_d : LDNP_s;
- }
-}
-
-
-LoadStorePairNonTemporalOp Assembler::StorePairNonTemporalOpFor(
- const CPURegister& rt, const CPURegister& rt2) {
- DCHECK(AreSameSizeAndType(rt, rt2));
- USE(rt2);
- if (rt.IsRegister()) {
- return rt.Is64Bits() ? STNP_x : STNP_w;
- } else {
- DCHECK(rt.IsFPRegister());
- return rt.Is64Bits() ? STNP_d : STNP_s;
- }
-}
-
-
LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) {
if (rt.IsRegister()) {
return rt.Is64Bits() ? LDR_x_lit : LDR_w_lit;
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
index f27d3b97b0..235b5ee2bc 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -26,13 +26,12 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM64
#define ARM64_DEFINE_REG_STATICS
#include "src/arm64/assembler-arm64-inl.h"
+#include "src/arm64/frames-arm64.h"
#include "src/base/bits.h"
#include "src/base/cpu.h"
@@ -1628,37 +1627,6 @@ void Assembler::LoadStorePair(const CPURegister& rt,
}
-void Assembler::ldnp(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& src) {
- LoadStorePairNonTemporal(rt, rt2, src,
- LoadPairNonTemporalOpFor(rt, rt2));
-}
-
-
-void Assembler::stnp(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& dst) {
- LoadStorePairNonTemporal(rt, rt2, dst,
- StorePairNonTemporalOpFor(rt, rt2));
-}
-
-
-void Assembler::LoadStorePairNonTemporal(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& addr,
- LoadStorePairNonTemporalOp op) {
- DCHECK(!rt.Is(rt2));
- DCHECK(AreSameSizeAndType(rt, rt2));
- DCHECK(addr.IsImmediateOffset());
- LSDataSize size = CalcLSPairDataSize(
- static_cast<LoadStorePairOp>(op & LoadStorePairMask));
- DCHECK(IsImmLSPair(addr.offset(), size));
- int offset = static_cast<int>(addr.offset());
- Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) | ImmLSPair(offset, size));
-}
-
-
// Memory instructions.
void Assembler::ldrb(const Register& rt, const MemOperand& src) {
LoadStore(rt, src, LDRB_w);
@@ -2902,21 +2870,18 @@ void Assembler::GrowBuffer() {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants.
RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL);
- if (((rmode >= RelocInfo::JS_RETURN) &&
- (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
+ if (((rmode >= RelocInfo::COMMENT) &&
+ (rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL)) ||
(rmode == RelocInfo::INTERNAL_REFERENCE) ||
- (rmode == RelocInfo::CONST_POOL) ||
- (rmode == RelocInfo::VENEER_POOL) ||
- (rmode == RelocInfo::DEOPT_REASON)) {
+ (rmode == RelocInfo::CONST_POOL) || (rmode == RelocInfo::VENEER_POOL) ||
+ (rmode == RelocInfo::DEOPT_REASON) ||
+ (rmode == RelocInfo::GENERATOR_CONTINUATION)) {
// Adjust code for new modes.
- DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
- || RelocInfo::IsJSReturn(rmode)
- || RelocInfo::IsComment(rmode)
- || RelocInfo::IsDeoptReason(rmode)
- || RelocInfo::IsPosition(rmode)
- || RelocInfo::IsInternalReference(rmode)
- || RelocInfo::IsConstPool(rmode)
- || RelocInfo::IsVeneerPool(rmode));
+ DCHECK(RelocInfo::IsDebugBreakSlot(rmode) || RelocInfo::IsComment(rmode) ||
+ RelocInfo::IsDeoptReason(rmode) || RelocInfo::IsPosition(rmode) ||
+ RelocInfo::IsInternalReference(rmode) ||
+ RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode) ||
+ RelocInfo::IsGeneratorContinuation(rmode));
// These modes do not need an entry in the constant pool.
} else {
constpool_.RecordEntry(data, rmode);
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index 5fab081d4b..a7e5a06640 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -893,9 +893,6 @@ class Assembler : public AssemblerBase {
// instruction stream that call will return from.
inline static Address return_address_from_call_start(Address pc);
- // Return the code target address of the patch debug break slot
- inline static Address break_address_from_return_address(Address pc);
-
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
@@ -955,25 +952,13 @@ class Assembler : public AssemblerBase {
return SizeOfCodeGeneratedSince(label) / kInstructionSize;
}
- // Number of instructions generated for the return sequence in
- // FullCodeGenerator::EmitReturnSequence.
- static const int kJSReturnSequenceInstructions = 7;
- static const int kJSReturnSequenceLength =
- kJSReturnSequenceInstructions * kInstructionSize;
- // Distance between start of patched return sequence and the emitted address
- // to jump to.
- static const int kPatchReturnSequenceAddressOffset = 0;
static const int kPatchDebugBreakSlotAddressOffset = 0;
// Number of instructions necessary to be able to later patch it to a call.
- // See DebugCodegen::GenerateSlot() and
- // BreakLocation::SetDebugBreakAtSlot().
- static const int kDebugBreakSlotInstructions = 4;
+ static const int kDebugBreakSlotInstructions = 5;
static const int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstructionSize;
- static const int kPatchDebugBreakSlotReturnOffset = 2 * kInstructionSize;
-
// Prevent contant pool emission until EndBlockConstPool is called.
// Call to this function can be nested but must be followed by an equal
// number of call to EndBlockConstpool.
@@ -1022,11 +1007,11 @@ class Assembler : public AssemblerBase {
int buffer_space() const;
- // Mark address of the ExitJSFrame code.
- void RecordJSReturn();
+ // Mark generator continuation.
+ void RecordGeneratorContinuation();
// Mark address of a debug break slot.
- void RecordDebugBreakSlot();
+ void RecordDebugBreakSlot(RelocInfo::Mode mode, int argc = 0);
// Record the emission of a constant pool.
//
@@ -1507,14 +1492,6 @@ class Assembler : public AssemblerBase {
// Load word pair with sign extension.
void ldpsw(const Register& rt, const Register& rt2, const MemOperand& src);
- // Load integer or FP register pair, non-temporal.
- void ldnp(const CPURegister& rt, const CPURegister& rt2,
- const MemOperand& src);
-
- // Store integer or FP register pair, non-temporal.
- void stnp(const CPURegister& rt, const CPURegister& rt2,
- const MemOperand& dst);
-
// Load literal to register from a pc relative address.
void ldr_pcrel(const CPURegister& rt, int imm19);
@@ -2022,10 +1999,6 @@ class Assembler : public AssemblerBase {
static inline LoadStoreOp StoreOpFor(const CPURegister& rt);
static inline LoadStorePairOp StorePairOpFor(const CPURegister& rt,
const CPURegister& rt2);
- static inline LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor(
- const CPURegister& rt, const CPURegister& rt2);
- static inline LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(
- const CPURegister& rt, const CPURegister& rt2);
static inline LoadLiteralOp LoadLiteralOpFor(const CPURegister& rt);
// Remove the specified branch from the unbound label link chain.
@@ -2051,10 +2024,6 @@ class Assembler : public AssemblerBase {
const Operand& operand,
FlagsUpdate S,
Instr op);
- void LoadStorePairNonTemporal(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& addr,
- LoadStorePairNonTemporalOp op);
void ConditionalSelect(const Register& rd,
const Register& rn,
const Register& rm,
diff --git a/deps/v8/src/arm64/builtins-arm64.cc b/deps/v8/src/arm64/builtins-arm64.cc
index 45ac1a063b..19a83646f9 100644
--- a/deps/v8/src/arm64/builtins-arm64.cc
+++ b/deps/v8/src/arm64/builtins-arm64.cc
@@ -2,14 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM64
+#include "src/arm64/frames-arm64.h"
#include "src/codegen.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/deoptimizer.h"
-#include "src/full-codegen.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -302,36 +301,8 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
}
-static void Generate_Runtime_NewObject(MacroAssembler* masm,
- bool create_memento,
- Register original_constructor,
- Label* count_incremented,
- Label* allocated) {
- if (create_memento) {
- // Get the cell or allocation site.
- __ Peek(x4, 2 * kXRegSize);
- __ Push(x4);
- __ Push(x1); // Argument for Runtime_NewObject.
- __ Push(original_constructor);
- __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
- __ Mov(x4, x0);
- // If we ended up using the runtime, and we want a memento, then the
- // runtime call made it for us, and we shouldn't do create count
- // increment.
- __ jmp(count_incremented);
- } else {
- __ Push(x1); // Argument for Runtime_NewObject.
- __ Push(original_constructor);
- __ CallRuntime(Runtime::kNewObject, 2);
- __ Mov(x4, x0);
- __ jmp(allocated);
- }
-}
-
-
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool use_new_target,
bool create_memento) {
// ----------- S t a t e -------------
// -- x0 : number of arguments
@@ -352,44 +323,35 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::CONSTRUCT);
- // Preserve the three incoming parameters on the stack.
- if (create_memento) {
- __ AssertUndefinedOrAllocationSite(x2, x10);
- __ Push(x2);
- }
-
+ // Preserve the four incoming parameters on the stack.
Register argc = x0;
Register constructor = x1;
+ Register allocation_site = x2;
Register original_constructor = x3;
// Preserve the incoming parameters on the stack.
+ __ AssertUndefinedOrAllocationSite(allocation_site, x10);
__ SmiTag(argc);
- if (use_new_target) {
- __ Push(argc, constructor, original_constructor);
- } else {
- __ Push(argc, constructor);
- }
- // sp[0]: new.target (if used)
- // sp[0/1]: Constructor function.
- // sp[1/2]: number of arguments (smi-tagged)
-
- Label rt_call, count_incremented, allocated, normal_new;
- __ Cmp(constructor, original_constructor);
- __ B(eq, &normal_new);
- Generate_Runtime_NewObject(masm, create_memento, original_constructor,
- &count_incremented, &allocated);
-
- __ Bind(&normal_new);
+ __ Push(allocation_site, argc, constructor, original_constructor);
+ // sp[0]: new.target
+ // sp[1]: Constructor function.
+ // sp[2]: number of arguments (smi-tagged)
+ // sp[3]: allocation site
// Try to allocate the object without transitioning into C code. If any of
// the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
if (FLAG_inline_new) {
- Label undo_allocation;
ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address(isolate);
__ Mov(x2, Operand(debug_step_in_fp));
__ Ldr(x2, MemOperand(x2));
__ Cbnz(x2, &rt_call);
+
+ // Fall back to runtime if the original constructor and function differ.
+ __ Cmp(constructor, original_constructor);
+ __ B(ne, &rt_call);
+
// Load the initial map and verify that it is in fact a map.
Register init_map = x2;
__ Ldr(init_map,
@@ -430,15 +392,18 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Now allocate the JSObject on the heap.
+ Label rt_call_reload_new_target;
Register obj_size = x3;
Register new_obj = x4;
__ Ldrb(obj_size, FieldMemOperand(init_map, Map::kInstanceSizeOffset));
if (create_memento) {
__ Add(x7, obj_size,
Operand(AllocationMemento::kSize / kPointerSize));
- __ Allocate(x7, new_obj, x10, x11, &rt_call, SIZE_IN_WORDS);
+ __ Allocate(x7, new_obj, x10, x11, &rt_call_reload_new_target,
+ SIZE_IN_WORDS);
} else {
- __ Allocate(obj_size, new_obj, x10, x11, &rt_call, SIZE_IN_WORDS);
+ __ Allocate(obj_size, new_obj, x10, x11, &rt_call_reload_new_target,
+ SIZE_IN_WORDS);
}
// Allocated the JSObject, now initialize the fields. Map is set to
@@ -460,15 +425,21 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Obtain number of pre-allocated property fields and in-object
// properties.
- Register prealloc_fields = x10;
+ Register unused_props = x10;
Register inobject_props = x11;
- Register inst_sizes = x11;
- __ Ldr(inst_sizes, FieldMemOperand(init_map, Map::kInstanceSizesOffset));
- __ Ubfx(prealloc_fields, inst_sizes,
- Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
- kBitsPerByte);
- __ Ubfx(inobject_props, inst_sizes,
- Map::kInObjectPropertiesByte * kBitsPerByte, kBitsPerByte);
+ Register inst_sizes_or_attrs = x11;
+ Register prealloc_fields = x10;
+ __ Ldr(inst_sizes_or_attrs,
+ FieldMemOperand(init_map, Map::kInstanceAttributesOffset));
+ __ Ubfx(unused_props, inst_sizes_or_attrs,
+ Map::kUnusedPropertyFieldsByte * kBitsPerByte, kBitsPerByte);
+ __ Ldr(inst_sizes_or_attrs,
+ FieldMemOperand(init_map, Map::kInstanceSizesOffset));
+ __ Ubfx(
+ inobject_props, inst_sizes_or_attrs,
+ Map::kInObjectPropertiesOrConstructorFunctionIndexByte * kBitsPerByte,
+ kBitsPerByte);
+ __ Sub(prealloc_fields, inobject_props, unused_props);
// Calculate number of property fields in the object.
Register prop_fields = x6;
@@ -511,7 +482,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
DCHECK_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
__ Str(x14, MemOperand(first_prop, kPointerSize, PostIndex));
// Load the AllocationSite
- __ Peek(x14, 2 * kXRegSize);
+ __ Peek(x14, 3 * kXRegSize);
+ __ AssertUndefinedOrAllocationSite(x14, x10);
DCHECK_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
__ Str(x14, MemOperand(first_prop, kPointerSize, PostIndex));
first_prop = NoReg;
@@ -523,72 +495,44 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on. Any
- // failures need to undo the allocation, so that the heap is in a
- // consistent state and verifiable.
+ // and jump into the continuation code at any time from now on.
__ Add(new_obj, new_obj, kHeapObjectTag);
- // Check if a non-empty properties array is needed. Continue with
- // allocated object if not; allocate and initialize a FixedArray if yes.
- Register element_count = x3;
- __ Ldrb(element_count,
- FieldMemOperand(init_map, Map::kUnusedPropertyFieldsOffset));
- // The field instance sizes contains both pre-allocated property fields
- // and in-object properties.
- __ Add(element_count, element_count, prealloc_fields);
- __ Subs(element_count, element_count, inobject_props);
-
- // Done if no extra properties are to be allocated.
- __ B(eq, &allocated);
- __ Assert(pl, kPropertyAllocationCountFailed);
-
- // Scale the number of elements by pointer size and add the header for
- // FixedArrays to the start of the next object calculation from above.
- Register new_array = x5;
- Register array_size = x6;
- __ Add(array_size, element_count, FixedArray::kHeaderSize / kPointerSize);
- __ Allocate(array_size, new_array, x11, x12, &undo_allocation,
- static_cast<AllocationFlags>(RESULT_CONTAINS_TOP |
- SIZE_IN_WORDS));
-
- Register array_map = x10;
- __ LoadRoot(array_map, Heap::kFixedArrayMapRootIndex);
- __ Str(array_map, MemOperand(new_array, FixedArray::kMapOffset));
- __ SmiTag(x0, element_count);
- __ Str(x0, MemOperand(new_array, FixedArray::kLengthOffset));
-
- // Initialize the fields to undefined.
- Register elements = x10;
- __ Add(elements, new_array, FixedArray::kHeaderSize);
- __ FillFields(elements, element_count, filler);
-
- // Store the initialized FixedArray into the properties field of the
- // JSObject.
- __ Add(new_array, new_array, kHeapObjectTag);
- __ Str(new_array, FieldMemOperand(new_obj, JSObject::kPropertiesOffset));
-
// Continue with JSObject being successfully allocated.
__ B(&allocated);
- // Undo the setting of the new top so that the heap is verifiable. For
- // example, the map's unused properties potentially do not match the
- // allocated objects unused properties.
- __ Bind(&undo_allocation);
- __ UndoAllocationInNewSpace(new_obj, x14);
+ // Reload the original constructor and fall-through.
+ __ Bind(&rt_call_reload_new_target);
+ __ Peek(x3, 0 * kXRegSize);
}
// Allocate the new receiver object using the runtime call.
+ // x1: constructor function
+ // x3: original constructor
__ Bind(&rt_call);
- Generate_Runtime_NewObject(masm, create_memento, constructor,
- &count_incremented, &allocated);
+ Label count_incremented;
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ Peek(x4, 3 * kXRegSize);
+ __ Push(x4, constructor, original_constructor); // arguments 1-3
+ __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
+ __ Mov(x4, x0);
+ // If we ended up using the runtime, and we want a memento, then the
+ // runtime call made it for us, and we shouldn't do create count
+ // increment.
+ __ B(&count_incremented);
+ } else {
+ __ Push(constructor, original_constructor); // arguments 1-2
+ __ CallRuntime(Runtime::kNewObject, 2);
+ __ Mov(x4, x0);
+ }
// Receiver for constructor call allocated.
// x4: JSObject
__ Bind(&allocated);
if (create_memento) {
- int offset = (use_new_target ? 3 : 2) * kXRegSize;
- __ Peek(x10, offset);
+ __ Peek(x10, 3 * kXRegSize);
__ JumpIfRoot(x10, Heap::kUndefinedValueRootIndex, &count_incremented);
// r2 is an AllocationSite. We are creating a memento from it, so we
// need to increment the memento create count.
@@ -601,9 +545,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Restore the parameters.
- if (use_new_target) {
- __ Pop(original_constructor);
- }
+ __ Pop(original_constructor);
__ Pop(constructor);
// Reload the number of arguments from the stack.
@@ -612,11 +554,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Peek(argc, 0); // Load number of arguments.
__ SmiUntag(argc);
- if (use_new_target) {
- __ Push(original_constructor, x4, x4);
- } else {
- __ Push(x4, x4);
- }
+ __ Push(original_constructor, x4, x4);
// Set up pointer to last argument.
__ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
@@ -628,8 +566,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// x2: address of last argument (caller sp)
// jssp[0]: receiver
// jssp[1]: receiver
- // jssp[2]: new.target (if used)
- // jssp[2/3]: number of arguments (smi-tagged)
+ // jssp[2]: new.target
+ // jssp[3]: number of arguments (smi-tagged)
// Compute the start address of the copy in x3.
__ Add(x3, x2, Operand(argc, LSL, kPointerSizeLog2));
Label loop, entry, done_copying_arguments;
@@ -660,17 +598,15 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Store offset of return address for deoptimizer.
- // TODO(arv): Remove the "!use_new_target" before supporting optimization
- // of functions that reference new.target
- if (!is_api_function && !use_new_target) {
+ if (!is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore the context from the frame.
// x0: result
// jssp[0]: receiver
- // jssp[1]: new.target (if used)
- // jssp[1/2]: number of arguments (smi-tagged)
+ // jssp[1]: new.target
+ // jssp[2]: number of arguments (smi-tagged)
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// If the result is an object (in the ECMA sense), we should get rid
@@ -698,10 +634,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Bind(&exit);
// x0: result
// jssp[0]: receiver (newly allocated object)
- // jssp[1]: new.target (if used)
- // jssp[1/2]: number of arguments (smi-tagged)
- int offset = (use_new_target ? 2 : 1) * kXRegSize;
- __ Peek(x1, offset);
+ // jssp[1]: new.target (original constructor)
+ // jssp[2]: number of arguments (smi-tagged)
+ __ Peek(x1, 2 * kXRegSize);
// Leave construct frame.
}
@@ -714,17 +649,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false, false);
-}
-
-
-void Builtins::Generate_JSConstructStubNewTarget(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, true, false);
}
@@ -739,18 +669,18 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// -----------------------------------
ASM_LOCATION("Builtins::Generate_JSConstructStubForDerived");
- // TODO(dslomov): support pretenuring
- CHECK(!FLAG_pretenuring_call_new);
-
{
FrameScope frame_scope(masm, StackFrame::CONSTRUCT);
+
+ __ AssertUndefinedOrAllocationSite(x2, x10);
__ Mov(x4, x0);
__ SmiTag(x4);
__ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
- __ Push(x4, x3, x10);
- // sp[0]: number of arguments
+ __ Push(x2, x4, x3, x10);
+ // sp[0]: receiver (the hole)
// sp[1]: new.target
- // sp[2]: receiver (the hole)
+ // sp[2]: number of arguments
+ // sp[3]: allocation site
// Set up pointer to last argument.
__ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
@@ -964,6 +894,144 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
+// Generate code for entering a JS function with the interpreter.
+// On entry to the function the receiver and arguments have been pushed on the
+// stack left to right. The actual argument count matches the formal parameter
+// count expected by the function.
+//
+// The live registers are:
+// - x1: the JS function object being called.
+// - cp: our context.
+// - fp: our caller's frame pointer.
+// - jssp: stack pointer.
+// - lr: return address.
+//
+// The function builds a JS frame. Please see JavaScriptFrameConstants in
+// frames-arm64.h for its layout.
+// TODO(rmcilroy): We will need to include the current bytecode pointer in the
+// frame.
+void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ Push(lr, fp, cp, x1);
+ __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
+
+ // Get the bytecode array from the function object and load the pointer to the
+ // first entry into kInterpreterBytecodeRegister.
+ __ Ldr(x0, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(x0, SharedFunctionInfo::kFunctionDataOffset));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ AssertNotSmi(kInterpreterBytecodeArrayRegister,
+ kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ CompareObjectType(kInterpreterBytecodeArrayRegister, x0, x0,
+ BYTECODE_ARRAY_TYPE);
+ __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ }
+
+ // Allocate the local and temporary register file on the stack.
+ {
+ // Load frame size from the BytecodeArray object.
+ __ Ldr(w11, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kFrameSizeOffset));
+
+ // Do a stack check to ensure we don't go over the limit.
+ Label ok;
+ DCHECK(jssp.Is(__ StackPointer()));
+ __ Sub(x10, jssp, Operand(x11));
+ __ CompareRoot(x10, Heap::kRealStackLimitRootIndex);
+ __ B(hs, &ok);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ Bind(&ok);
+
+ // If ok, push undefined as the initial value for all register file entries.
+ // Note: there should always be at least one stack slot for the return
+ // register in the register file.
+ Label loop_header;
+ __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
+ // TODO(rmcilroy): Ensure we always have an even number of registers to
+ // allow stack to be 16 bit aligned (and remove need for jssp).
+ __ Lsr(x11, x11, kPointerSizeLog2);
+ __ PushMultipleTimes(x10, x11);
+ __ Bind(&loop_header);
+ }
+
+ // TODO(rmcilroy): List of things not currently dealt with here but done in
+ // fullcodegen's prologue:
+ // - Support profiler (specifically profiling_counter).
+ // - Call ProfileEntryHookStub when isolate has a function_entry_hook.
+ // - Allow simulator stop operations if FLAG_stop_at is set.
+ // - Deal with sloppy mode functions which need to replace the
+ // receiver with the global proxy when called as functions (without an
+ // explicit receiver object).
+ // - Code aging of the BytecodeArray object.
+ // - Supporting FLAG_trace.
+ //
+ // The following items are also not done here, and will probably be done using
+ // explicit bytecodes instead:
+ // - Allocating a new local context if applicable.
+ // - Setting up a local binding to the this function, which is used in
+ // derived constructors with super calls.
+ // - Setting new.target if required.
+ // - Dealing with REST parameters (only if
+ // https://codereview.chromium.org/1235153006 doesn't land by then).
+ // - Dealing with argument objects.
+
+ // Perform stack guard check.
+ {
+ Label ok;
+ __ CompareRoot(jssp, Heap::kStackLimitRootIndex);
+ __ B(hs, &ok);
+ __ CallRuntime(Runtime::kStackGuard, 0);
+ __ Bind(&ok);
+ }
+
+ // Load accumulator, register file, bytecode offset, dispatch table into
+ // registers.
+ __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
+ __ Sub(kInterpreterRegisterFileRegister, fp,
+ Operand(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ Mov(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(kInterpreterDispatchTableRegister,
+ Heap::kInterpreterTableRootIndex);
+ __ Add(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Dispatch to the first bytecode handler for the function.
+ __ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister));
+ __ Mov(x1, Operand(x1, LSL, kPointerSizeLog2));
+ __ Ldr(ip0, MemOperand(kInterpreterDispatchTableRegister, x1));
+ // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
+ // and header removal.
+ __ Add(ip0, ip0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(ip0);
+}
+
+
+void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
+ // TODO(rmcilroy): List of things not currently dealt with here but done in
+ // fullcodegen's EmitReturnSequence.
+ // - Supporting FLAG_trace for Runtime::TraceExit.
+ // - Support profiler (specifically decrementing profiling_counter
+ // appropriately and calling out to HandleInterrupts if necessary).
+
+ // The return value is in accumulator, which is already in x0.
+
+ // Leave the frame (also dropping the register file).
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ // Drop receiver + arguments.
+ // TODO(rmcilroy): Get number of arguments from BytecodeArray.
+ __ Drop(1, kXRegSize);
+ __ Ret();
+}
+
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
@@ -1291,8 +1359,10 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(argc);
- __ Push(argc, receiver);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Push(argc);
+ __ Mov(x0, receiver);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ Mov(receiver, x0);
__ Pop(argc);
@@ -1400,6 +1470,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
static void Generate_PushAppliedArguments(MacroAssembler* masm,
+ const int vectorOffset,
const int argumentsOffset,
const int indexOffset,
const int limitOffset) {
@@ -1417,12 +1488,9 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
__ Ldr(receiver, MemOperand(fp, argumentsOffset));
// Use inline caching to speed up access to arguments.
- FeedbackVectorSpec spec(0, Code::KEYED_LOAD_IC);
- Handle<TypeFeedbackVector> feedback_vector =
- masm->isolate()->factory()->NewTypeFeedbackVector(&spec);
- int index = feedback_vector->GetIndex(FeedbackVectorICSlot(0));
- __ Mov(slot, Smi::FromInt(index));
- __ Mov(vector, feedback_vector);
+ int slot_index = TypeFeedbackVector::PushAppliedArgumentsIndex();
+ __ Mov(slot, Operand(Smi::FromInt(slot_index)));
+ __ Ldr(vector, MemOperand(fp, vectorOffset));
Handle<Code> ic =
KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
__ Call(ic, RelocInfo::CODE_TARGET);
@@ -1457,14 +1525,24 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
const int kReceiverOffset = kArgumentsOffset + kPointerSize;
const int kFunctionOffset = kReceiverOffset + kPointerSize;
- const int kIndexOffset =
- StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+ const int kVectorOffset =
+ InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
+ const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
+ const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
Register args = x12;
Register receiver = x14;
Register function = x15;
+ Register apply_function = x1;
+
+ // Push the vector.
+ __ Ldr(
+ apply_function,
+ FieldMemOperand(apply_function, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(apply_function,
+ FieldMemOperand(apply_function,
+ SharedFunctionInfo::kFeedbackVectorOffset));
+ __ Push(apply_function);
// Get the length of the arguments via a builtin call.
__ Ldr(function, MemOperand(fp, kFunctionOffset));
@@ -1518,8 +1596,9 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
// Call a builtin to convert the receiver to a regular object.
__ Bind(&convert_receiver_to_object);
- __ Push(receiver);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Mov(x0, receiver);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ Mov(receiver, x0);
__ B(&push_receiver);
@@ -1532,8 +1611,8 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
__ Push(receiver);
// Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(
- masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
+ Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
+ kIndexOffset, kLimitOffset);
// At the end of the loop, the number of arguments is stored in 'current',
// represented as a smi.
@@ -1576,16 +1655,25 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
const int kFunctionOffset = kArgumentsOffset + kPointerSize;
-
- const int kIndexOffset =
- StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+ const int kVectorOffset =
+ InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
+ const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
+ const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
// Is x11 safe to use?
Register newTarget = x11;
Register args = x12;
Register function = x15;
+ Register construct_function = x1;
+
+ // Push the vector.
+ __ Ldr(construct_function,
+ FieldMemOperand(construct_function,
+ JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(construct_function,
+ FieldMemOperand(construct_function,
+ SharedFunctionInfo::kFeedbackVectorOffset));
+ __ Push(construct_function);
// If newTarget is not supplied, set it to constructor
Label validate_arguments;
@@ -1606,24 +1694,24 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
Generate_CheckStackOverflow(masm, kFunctionOffset, argc, kArgcIsSmiTagged);
- // Push current limit and index, constructor & newTarget
+ // Push current limit and index & constructor function as callee.
__ Mov(x1, 0); // Initial index.
- __ Ldr(newTarget, MemOperand(fp, kNewTargetOffset));
- __ Push(argc, x1, newTarget, function);
+ __ Push(argc, x1, function);
// Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(
- masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
+ Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
+ kIndexOffset, kLimitOffset);
- __ Ldr(x1, MemOperand(fp, kFunctionOffset));
// Use undefined feedback vector
__ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
+ __ Ldr(x1, MemOperand(fp, kFunctionOffset));
+ __ Ldr(x4, MemOperand(fp, kNewTargetOffset));
// Call the function.
CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
- __ Drop(1);
+ // Leave internal frame.
}
__ Drop(kStackSize);
__ Ret();
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index e67b4fd2be..716910ea91 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -2,10 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM64
+#include "src/arm64/frames-arm64.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
@@ -13,8 +12,8 @@
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
-#include "src/jsregexp.h"
-#include "src/regexp-macro-assembler.h"
+#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -36,7 +35,7 @@ static void InitializeArrayConstructorDescriptor(
JS_FUNCTION_STUB_MODE);
} else {
descriptor->Initialize(x0, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
+ JS_FUNCTION_STUB_MODE);
}
}
@@ -70,7 +69,7 @@ static void InitializeInternalArrayConstructorDescriptor(
JS_FUNCTION_STUB_MODE);
} else {
descriptor->Initialize(x0, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
+ JS_FUNCTION_STUB_MODE);
}
}
@@ -227,6 +226,9 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
// Call runtime on identical symbols since we need to throw a TypeError.
__ Cmp(right_type, SYMBOL_TYPE);
__ B(eq, slow);
+ // Call runtime on identical SIMD values since we must throw a TypeError.
+ __ Cmp(right_type, SIMD128_VALUE_TYPE);
+ __ B(eq, slow);
if (is_strong(strength)) {
// Call the runtime on anything that is converted in the semantics, since
// we need to throw a TypeError. Smis have already been ruled out.
@@ -246,6 +248,9 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
// Call runtime on identical symbols since we need to throw a TypeError.
__ Cmp(right_type, SYMBOL_TYPE);
__ B(eq, slow);
+ // Call runtime on identical SIMD values since we must throw a TypeError.
+ __ Cmp(right_type, SIMD128_VALUE_TYPE);
+ __ B(eq, slow);
if (is_strong(strength)) {
// Call the runtime on anything that is converted in the semantics,
// since we need to throw a TypeError. Smis and heap numbers have
@@ -645,26 +650,30 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
- Builtins::JavaScript native;
- if (cond == eq) {
- native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ if (cond == eq && strict()) {
+ __ TailCallRuntime(Runtime::kStrictEquals, 2, 1);
} else {
- native =
- is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
- int ncr; // NaN compare result
- if ((cond == lt) || (cond == le)) {
- ncr = GREATER;
+ Builtins::JavaScript native;
+ if (cond == eq) {
+ native = Builtins::EQUALS;
} else {
- DCHECK((cond == gt) || (cond == ge)); // remaining cases
- ncr = LESS;
+ native =
+ is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
+ int ncr; // NaN compare result
+ if ((cond == lt) || (cond == le)) {
+ ncr = GREATER;
+ } else {
+ DCHECK((cond == gt) || (cond == ge)); // remaining cases
+ ncr = LESS;
+ }
+ __ Mov(x10, Smi::FromInt(ncr));
+ __ Push(x10);
}
- __ Mov(x10, Smi::FromInt(ncr));
- __ Push(x10);
- }
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ InvokeBuiltin(native, JUMP_FUNCTION);
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(native, JUMP_FUNCTION);
+ }
__ Bind(&miss);
GenerateMiss(masm);
@@ -1731,7 +1740,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// the runtime system.
__ Bind(&slow);
__ Push(key);
- __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+ __ TailCallRuntime(Runtime::kArguments, 1, 1);
}
@@ -2050,10 +2059,7 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
// Everything is fine, call runtime.
__ Push(receiver, key);
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
- masm->isolate()),
- 2, 1);
+ __ TailCallRuntime(Runtime::kLoadElementWithInterceptor, 2, 1);
__ Bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
@@ -2451,8 +2457,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Find the code object based on the assumptions above.
// kDataOneByteCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
// of kPointerSize to reach the latter.
- DCHECK_EQ(JSRegExp::kDataOneByteCodeOffset + kPointerSize,
- JSRegExp::kDataUC16CodeOffset);
+ STATIC_ASSERT(JSRegExp::kDataOneByteCodeOffset + kPointerSize ==
+ JSRegExp::kDataUC16CodeOffset);
__ Mov(x10, kPointerSize);
// We will need the encoding later: Latin1 = 0x04
// UC16 = 0x00
@@ -2742,18 +2748,26 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
Register argc, Register function,
- Register feedback_vector,
- Register index) {
+ Register feedback_vector, Register index,
+ Register orig_construct, bool is_super) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(argc);
- __ Push(argc, function, feedback_vector, index);
+ if (is_super) {
+ __ Push(argc, function, feedback_vector, index, orig_construct);
+ } else {
+ __ Push(argc, function, feedback_vector, index);
+ }
DCHECK(feedback_vector.Is(x2) && index.Is(x3));
__ CallStub(stub);
- __ Pop(index, feedback_vector, function, argc);
+ if (is_super) {
+ __ Pop(orig_construct, index, feedback_vector, function, argc);
+ } else {
+ __ Pop(index, feedback_vector, function, argc);
+ }
__ SmiUntag(argc);
}
@@ -2761,17 +2775,19 @@ static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
Register function,
Register feedback_vector, Register index,
- Register scratch1, Register scratch2,
- Register scratch3) {
+ Register orig_construct, Register scratch1,
+ Register scratch2, Register scratch3,
+ bool is_super) {
ASM_LOCATION("GenerateRecordCallTarget");
DCHECK(!AreAliased(scratch1, scratch2, scratch3, argc, function,
- feedback_vector, index));
+ feedback_vector, index, orig_construct));
// Cache the called function in a feedback vector slot. Cache states are
// uninitialized, monomorphic (indicated by a JSFunction), and megamorphic.
// argc : number of arguments to the construct function
// function : the function to call
// feedback_vector : the feedback vector
// index : slot in feedback vector (smi)
+ // orig_construct : original constructor (for IsSuperConstructorCall)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
@@ -2850,7 +2866,8 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub, argc, function,
- feedback_vector, index);
+ feedback_vector, index, orig_construct,
+ is_super);
__ B(&done);
__ Bind(&not_array_function);
@@ -2858,7 +2875,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
CreateWeakCellStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub, argc, function,
- feedback_vector, index);
+ feedback_vector, index, orig_construct, is_super);
__ Bind(&done);
}
@@ -2907,8 +2924,10 @@ static void EmitSlowCase(MacroAssembler* masm,
static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
// Wrap the receiver and patch it back onto the stack.
{ FrameScope frame_scope(masm, StackFrame::INTERNAL);
- __ Push(x1, x3);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Push(x1);
+ __ Mov(x0, x3);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ Pop(x1);
}
__ Poke(x0, argc * kPointerSize);
@@ -2985,7 +3004,8 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// x0 : number of arguments
// x1 : the function to call
// x2 : feedback vector
- // x3 : slot in feedback vector (smi) (if r2 is not the megamorphic symbol)
+ // x3 : slot in feedback vector (Smi, for RecordCallTarget)
+ // x4 : original constructor (for IsSuperConstructorCall)
Register function = x1;
Label slow, non_function_call;
@@ -2997,7 +3017,8 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
&slow);
if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm, x0, function, x2, x3, x4, x5, x11);
+ GenerateRecordCallTarget(masm, x0, function, x2, x3, x4, x5, x11, x12,
+ IsSuperConstructorCall());
__ Add(x5, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
if (FLAG_pretenuring_call_new) {
@@ -3020,9 +3041,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
}
if (IsSuperConstructorCall()) {
- __ Mov(x4, Operand(1 * kPointerSize));
- __ Add(x4, x4, Operand(x0, LSL, kPointerSizeLog2));
- __ Peek(x3, x4);
+ __ Mov(x3, x4);
} else {
__ Mov(x3, function);
}
@@ -3299,11 +3318,10 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(x1, x2, x3);
// Call the entry.
- IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
- : IC::kCallIC_Customization_Miss;
-
- ExternalReference miss = ExternalReference(IC_Utility(id), masm->isolate());
- __ CallExternalReference(miss, 3);
+ Runtime::FunctionId id = GetICState() == DEFAULT
+ ? Runtime::kCallIC_Miss
+ : Runtime::kCallIC_Customization_Miss;
+ __ CallRuntime(id, 3);
// Move result to edi and exit the internal frame.
__ Mov(x1, x0);
@@ -3672,7 +3690,7 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
__ Bind(&miss);
@@ -3744,9 +3762,6 @@ void CompareICStub::GenerateMiss(MacroAssembler* masm) {
Register stub_entry = x11;
{
- ExternalReference miss =
- ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
-
FrameScope scope(masm, StackFrame::INTERNAL);
Register op = x10;
Register left = x1;
@@ -3758,7 +3773,7 @@ void CompareICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(left, right, op);
// Call the miss handler. This also pops the arguments.
- __ CallExternalReference(miss, 3);
+ __ CallRuntime(Runtime::kCompareIC_Miss, 3);
// Compute the entry point of the rewritten stub.
__ Add(stub_entry, x0, Code::kHeaderSize - kHeapObjectTag);
@@ -4004,7 +4019,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ Ret();
__ Bind(&runtime);
- __ TailCallRuntime(Runtime::kSubStringRT, 3, 1);
+ __ TailCallRuntime(Runtime::kSubString, 3, 1);
__ bind(&single_char);
// x1: result_length
@@ -4212,7 +4227,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Call the runtime.
// Returns -1 (less), 0 (equal), or 1 (greater) tagged as a small integer.
- __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
@@ -4655,7 +4670,7 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
- false, receiver, name, feedback,
+ receiver, name, feedback,
receiver_map, scratch1, x7);
__ Bind(&miss);
@@ -4930,7 +4945,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(
__ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
// Scale the index by multiplying by the element size.
- DCHECK(NameDictionary::kEntrySize == 3);
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
// Check if the key is identical to the name.
@@ -4999,7 +5014,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ And(index, index, name->Hash() + NameDictionary::GetProbeOffset(i));
// Scale the index by multiplying by the entry size.
- DCHECK(NameDictionary::kEntrySize == 3);
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
Register entity_name = scratch0;
@@ -5090,7 +5105,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ And(index, mask, Operand(index, LSR, Name::kHashShift));
// Scale the index by multiplying by the entry size.
- DCHECK(NameDictionary::kEntrySize == 3);
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
__ Add(index, dictionary, Operand(index, LSL, kPointerSizeLog2));
@@ -5484,6 +5499,156 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
+void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
+ Register context = cp;
+ Register result = x0;
+ Register slot = x2;
+ Label slow_case;
+
+ // Go up the context chain to the script context.
+ for (int i = 0; i < depth(); ++i) {
+ __ Ldr(result, ContextMemOperand(context, Context::PREVIOUS_INDEX));
+ context = result;
+ }
+
+ // Load the PropertyCell value at the specified slot.
+ __ Add(result, context, Operand(slot, LSL, kPointerSizeLog2));
+ __ Ldr(result, ContextMemOperand(result));
+ __ Ldr(result, FieldMemOperand(result, PropertyCell::kValueOffset));
+
+ // If the result is not the_hole, return. Otherwise, handle in the runtime.
+ __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &slow_case);
+ __ Ret();
+
+ // Fallback to runtime.
+ __ Bind(&slow_case);
+ __ SmiTag(slot);
+ __ Push(slot);
+ __ TailCallRuntime(Runtime::kLoadGlobalViaContext, 1, 1);
+}
+
+
+void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
+ Register context = cp;
+ Register value = x0;
+ Register slot = x2;
+ Register context_temp = x10;
+ Register cell = x10;
+ Register cell_details = x11;
+ Register cell_value = x12;
+ Register cell_value_map = x13;
+ Register value_map = x14;
+ Label fast_heapobject_case, fast_smi_case, slow_case;
+
+ if (FLAG_debug_code) {
+ __ CompareRoot(value, Heap::kTheHoleValueRootIndex);
+ __ Check(ne, kUnexpectedValue);
+ }
+
+ // Go up the context chain to the script context.
+ for (int i = 0; i < depth(); i++) {
+ __ Ldr(context_temp, ContextMemOperand(context, Context::PREVIOUS_INDEX));
+ context = context_temp;
+ }
+
+ // Load the PropertyCell at the specified slot.
+ __ Add(cell, context, Operand(slot, LSL, kPointerSizeLog2));
+ __ Ldr(cell, ContextMemOperand(cell));
+
+ // Load PropertyDetails for the cell (actually only the cell_type and kind).
+ __ Ldr(cell_details,
+ UntagSmiFieldMemOperand(cell, PropertyCell::kDetailsOffset));
+ __ And(cell_details, cell_details,
+ PropertyDetails::PropertyCellTypeField::kMask |
+ PropertyDetails::KindField::kMask |
+ PropertyDetails::kAttributesReadOnlyMask);
+
+ // Check if PropertyCell holds mutable data.
+ Label not_mutable_data;
+ __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kMutable) |
+ PropertyDetails::KindField::encode(kData));
+ __ B(ne, &not_mutable_data);
+ __ JumpIfSmi(value, &fast_smi_case);
+ __ Bind(&fast_heapobject_case);
+ __ Str(value, FieldMemOperand(cell, PropertyCell::kValueOffset));
+ // RecordWriteField clobbers the value register, so we copy it before the
+ // call.
+ __ Mov(x11, value);
+ __ RecordWriteField(cell, PropertyCell::kValueOffset, x11, x12,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ Ret();
+
+ __ Bind(&not_mutable_data);
+ // Check if PropertyCell value matches the new value (relevant for Constant,
+ // ConstantType and Undefined cells).
+ Label not_same_value;
+ __ Ldr(cell_value, FieldMemOperand(cell, PropertyCell::kValueOffset));
+ __ Cmp(cell_value, value);
+ __ B(ne, &not_same_value);
+
+ // Make sure the PropertyCell is not marked READ_ONLY.
+ __ Tst(cell_details, PropertyDetails::kAttributesReadOnlyMask);
+ __ B(ne, &slow_case);
+
+ if (FLAG_debug_code) {
+ Label done;
+ // This can only be true for Constant, ConstantType and Undefined cells,
+ // because we never store the_hole via this stub.
+ __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstant) |
+ PropertyDetails::KindField::encode(kData));
+ __ B(eq, &done);
+ __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstantType) |
+ PropertyDetails::KindField::encode(kData));
+ __ B(eq, &done);
+ __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kUndefined) |
+ PropertyDetails::KindField::encode(kData));
+ __ Check(eq, kUnexpectedValue);
+ __ Bind(&done);
+ }
+ __ Ret();
+ __ Bind(&not_same_value);
+
+ // Check if PropertyCell contains data with constant type (and is not
+ // READ_ONLY).
+ __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstantType) |
+ PropertyDetails::KindField::encode(kData));
+ __ B(ne, &slow_case);
+
+ // Now either both old and new values must be smis or both must be heap
+ // objects with same map.
+ Label value_is_heap_object;
+ __ JumpIfNotSmi(value, &value_is_heap_object);
+ __ JumpIfNotSmi(cell_value, &slow_case);
+ // Old and new values are smis, no need for a write barrier here.
+ __ Bind(&fast_smi_case);
+ __ Str(value, FieldMemOperand(cell, PropertyCell::kValueOffset));
+ __ Ret();
+
+ __ Bind(&value_is_heap_object);
+ __ JumpIfSmi(cell_value, &slow_case);
+
+ __ Ldr(cell_value_map, FieldMemOperand(cell_value, HeapObject::kMapOffset));
+ __ Ldr(value_map, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ Cmp(cell_value_map, value_map);
+ __ B(eq, &fast_heapobject_case);
+
+ // Fall back to the runtime.
+ __ Bind(&slow_case);
+ __ SmiTag(slot);
+ __ Push(slot, value);
+ __ TailCallRuntime(is_strict(language_mode())
+ ? Runtime::kStoreGlobalViaContext_Strict
+ : Runtime::kStoreGlobalViaContext_Sloppy,
+ 2, 1);
+}
+
+
// The number of register that CallApiFunctionAndReturn will need to save on
// the stack. The space for these registers need to be allocated in the
// ExitFrame before calling CallApiFunctionAndReturn.
diff --git a/deps/v8/src/arm64/codegen-arm64.cc b/deps/v8/src/arm64/codegen-arm64.cc
index 2d1ef57f38..c381df713d 100644
--- a/deps/v8/src/arm64/codegen-arm64.cc
+++ b/deps/v8/src/arm64/codegen-arm64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/simulator-arm64.h"
diff --git a/deps/v8/src/arm64/constants-arm64.h b/deps/v8/src/arm64/constants-arm64.h
index fc7bef69e9..1529c647ff 100644
--- a/deps/v8/src/arm64/constants-arm64.h
+++ b/deps/v8/src/arm64/constants-arm64.h
@@ -5,6 +5,8 @@
#ifndef V8_ARM64_CONSTANTS_ARM64_H_
#define V8_ARM64_CONSTANTS_ARM64_H_
+#include "src/base/macros.h"
+#include "src/globals.h"
// Assert that this is an LP64 system.
STATIC_ASSERT(sizeof(int) == sizeof(int32_t)); // NOLINT(runtime/sizeof)
@@ -762,20 +764,6 @@ enum LoadStorePairOffsetOp {
#undef LOAD_STORE_PAIR_OFFSET
};
-enum LoadStorePairNonTemporalOp {
- LoadStorePairNonTemporalFixed = 0x28000000,
- LoadStorePairNonTemporalFMask = 0x3B800000,
- LoadStorePairNonTemporalMask = 0xFFC00000,
- STNP_w = LoadStorePairNonTemporalFixed | STP_w,
- LDNP_w = LoadStorePairNonTemporalFixed | LDP_w,
- STNP_x = LoadStorePairNonTemporalFixed | STP_x,
- LDNP_x = LoadStorePairNonTemporalFixed | LDP_x,
- STNP_s = LoadStorePairNonTemporalFixed | STP_s,
- LDNP_s = LoadStorePairNonTemporalFixed | LDP_s,
- STNP_d = LoadStorePairNonTemporalFixed | STP_d,
- LDNP_d = LoadStorePairNonTemporalFixed | LDP_d
-};
-
// Load literal.
enum LoadLiteralOp {
LoadLiteralFixed = 0x18000000,
diff --git a/deps/v8/src/arm64/cpu-arm64.cc b/deps/v8/src/arm64/cpu-arm64.cc
index 8258fbfde3..bde3e4aeb9 100644
--- a/deps/v8/src/arm64/cpu-arm64.cc
+++ b/deps/v8/src/arm64/cpu-arm64.cc
@@ -4,12 +4,11 @@
// CPU specific code for arm independent of OS goes here.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/utils-arm64.h"
#include "src/assembler.h"
+#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/arm64/debug-arm64.cc b/deps/v8/src/arm64/debug-arm64.cc
deleted file mode 100644
index 2eec4466e1..0000000000
--- a/deps/v8/src/arm64/debug-arm64.cc
+++ /dev/null
@@ -1,305 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_ARM64
-
-#include "src/codegen.h"
-#include "src/debug.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define __ ACCESS_MASM(masm)
-
-
-void BreakLocation::SetDebugBreakAtReturn() {
- // Patch the code emitted by FullCodeGenerator::EmitReturnSequence, changing
- // the return from JS function sequence from
- // mov sp, fp
- // ldp fp, lr, [sp] #16
- // lrd ip0, [pc, #(3 * kInstructionSize)]
- // add sp, sp, ip0
- // ret
- // <number of paramters ...
- // ... plus one (64 bits)>
- // to a call to the debug break return code.
- // ldr ip0, [pc, #(3 * kInstructionSize)]
- // blr ip0
- // hlt kHltBadCode @ code should not return, catch if it does.
- // <debug break return code ...
- // ... entry point address (64 bits)>
-
- // The patching code must not overflow the space occupied by the return
- // sequence.
- STATIC_ASSERT(Assembler::kJSReturnSequenceInstructions >= 5);
- PatchingAssembler patcher(reinterpret_cast<Instruction*>(pc()), 5);
- byte* entry =
- debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry();
-
- // The first instruction of a patched return sequence must be a load literal
- // loading the address of the debug break return code.
- patcher.ldr_pcrel(ip0, (3 * kInstructionSize) >> kLoadLiteralScaleLog2);
- // TODO(all): check the following is correct.
- // The debug break return code will push a frame and call statically compiled
- // code. By using blr, even though control will not return after the branch,
- // this call site will be registered in the frame (lr being saved as the pc
- // of the next instruction to execute for this frame). The debugger can now
- // iterate on the frames to find call to debug break return code.
- patcher.blr(ip0);
- patcher.hlt(kHltBadCode);
- patcher.dc64(reinterpret_cast<int64_t>(entry));
-}
-
-
-void BreakLocation::SetDebugBreakAtSlot() {
- // Patch the code emitted by DebugCodegen::GenerateSlots, changing the debug
- // break slot code from
- // mov x0, x0 @ nop DEBUG_BREAK_NOP
- // mov x0, x0 @ nop DEBUG_BREAK_NOP
- // mov x0, x0 @ nop DEBUG_BREAK_NOP
- // mov x0, x0 @ nop DEBUG_BREAK_NOP
- // to a call to the debug slot code.
- // ldr ip0, [pc, #(2 * kInstructionSize)]
- // blr ip0
- // <debug break slot code ...
- // ... entry point address (64 bits)>
-
- // TODO(all): consider adding a hlt instruction after the blr as we don't
- // expect control to return here. This implies increasing
- // kDebugBreakSlotInstructions to 5 instructions.
-
- // The patching code must not overflow the space occupied by the return
- // sequence.
- STATIC_ASSERT(Assembler::kDebugBreakSlotInstructions >= 4);
- PatchingAssembler patcher(reinterpret_cast<Instruction*>(pc()), 4);
- byte* entry =
- debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry();
-
- // The first instruction of a patched debug break slot must be a load literal
- // loading the address of the debug break slot code.
- patcher.ldr_pcrel(ip0, (2 * kInstructionSize) >> kLoadLiteralScaleLog2);
- // TODO(all): check the following is correct.
- // The debug break slot code will push a frame and call statically compiled
- // code. By using blr, event hough control will not return after the branch,
- // this call site will be registered in the frame (lr being saved as the pc
- // of the next instruction to execute for this frame). The debugger can now
- // iterate on the frames to find call to debug break slot code.
- patcher.blr(ip0);
- patcher.dc64(reinterpret_cast<int64_t>(entry));
-}
-
-
-static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
- RegList object_regs,
- RegList non_object_regs,
- Register scratch) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Load padding words on stack.
- __ Mov(scratch, Smi::FromInt(LiveEdit::kFramePaddingValue));
- __ PushMultipleTimes(scratch, LiveEdit::kFramePaddingInitialSize);
- __ Mov(scratch, Smi::FromInt(LiveEdit::kFramePaddingInitialSize));
- __ Push(scratch);
-
- // Any live values (object_regs and non_object_regs) in caller-saved
- // registers (or lr) need to be stored on the stack so that their values are
- // safely preserved for a call into C code.
- //
- // Also:
- // * object_regs may be modified during the C code by the garbage
- // collector. Every object register must be a valid tagged pointer or
- // SMI.
- //
- // * non_object_regs will be converted to SMIs so that the garbage
- // collector doesn't try to interpret them as pointers.
- //
- // TODO(jbramley): Why can't this handle callee-saved registers?
- DCHECK((~kCallerSaved.list() & object_regs) == 0);
- DCHECK((~kCallerSaved.list() & non_object_regs) == 0);
- DCHECK((object_regs & non_object_regs) == 0);
- DCHECK((scratch.Bit() & object_regs) == 0);
- DCHECK((scratch.Bit() & non_object_regs) == 0);
- DCHECK((masm->TmpList()->list() & (object_regs | non_object_regs)) == 0);
- STATIC_ASSERT(kSmiValueSize == 32);
-
- CPURegList non_object_list =
- CPURegList(CPURegister::kRegister, kXRegSizeInBits, non_object_regs);
- while (!non_object_list.IsEmpty()) {
- // Store each non-object register as two SMIs.
- Register reg = Register(non_object_list.PopLowestIndex());
- __ Lsr(scratch, reg, 32);
- __ SmiTagAndPush(scratch, reg);
-
- // Stack:
- // jssp[12]: reg[63:32]
- // jssp[8]: 0x00000000 (SMI tag & padding)
- // jssp[4]: reg[31:0]
- // jssp[0]: 0x00000000 (SMI tag & padding)
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(static_cast<unsigned>(kSmiShift) == kWRegSizeInBits);
- }
-
- if (object_regs != 0) {
- __ PushXRegList(object_regs);
- }
-
-#ifdef DEBUG
- __ RecordComment("// Calling from debug break to runtime - come in - over");
-#endif
- __ Mov(x0, 0); // No arguments.
- __ Mov(x1, ExternalReference::debug_break(masm->isolate()));
-
- CEntryStub stub(masm->isolate(), 1);
- __ CallStub(&stub);
-
- // Restore the register values from the expression stack.
- if (object_regs != 0) {
- __ PopXRegList(object_regs);
- }
-
- non_object_list =
- CPURegList(CPURegister::kRegister, kXRegSizeInBits, non_object_regs);
- while (!non_object_list.IsEmpty()) {
- // Load each non-object register from two SMIs.
- // Stack:
- // jssp[12]: reg[63:32]
- // jssp[8]: 0x00000000 (SMI tag & padding)
- // jssp[4]: reg[31:0]
- // jssp[0]: 0x00000000 (SMI tag & padding)
- Register reg = Register(non_object_list.PopHighestIndex());
- __ Pop(scratch, reg);
- __ Bfxil(reg, scratch, 32, 32);
- }
-
- // Don't bother removing padding bytes pushed on the stack
- // as the frame is going to be restored right away.
-
- // Leave the internal frame.
- }
-
- // Now that the break point has been handled, resume normal execution by
- // jumping to the target address intended by the caller and that was
- // overwritten by the address of DebugBreakXXX.
- ExternalReference after_break_target =
- ExternalReference::debug_after_break_target_address(masm->isolate());
- __ Mov(scratch, after_break_target);
- __ Ldr(scratch, MemOperand(scratch));
- __ Br(scratch);
-}
-
-
-void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallICStub
- // ----------- S t a t e -------------
- // -- x1 : function
- // -- x3 : slot in feedback array
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, x1.Bit() | x3.Bit(), 0, x10);
-}
-
-
-void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
- // In places other than IC call sites it is expected that r0 is TOS which
- // is an object - this is not generally the case so this should be used with
- // care.
- Generate_DebugBreakCallHelper(masm, x0.Bit(), 0, x10);
-}
-
-
-void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-arm64.cc).
- // ----------- S t a t e -------------
- // -- x1 : function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, x1.Bit(), 0, x10);
-}
-
-
-void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
- // Calling convention for CallConstructStub (from code-stubs-arm64.cc).
- // ----------- S t a t e -------------
- // -- x0 : number of arguments (not smi)
- // -- x1 : constructor function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, x1.Bit(), x0.Bit(), x10);
-}
-
-
-void DebugCodegen::GenerateCallConstructStubRecordDebugBreak(
- MacroAssembler* masm) {
- // Calling convention for CallConstructStub (from code-stubs-arm64.cc).
- // ----------- S t a t e -------------
- // -- x0 : number of arguments (not smi)
- // -- x1 : constructor function
- // -- x2 : feedback array
- // -- x3 : feedback slot (smi)
- // -----------------------------------
- Generate_DebugBreakCallHelper(
- masm, x1.Bit() | x2.Bit() | x3.Bit(), x0.Bit(), x10);
-}
-
-
-void DebugCodegen::GenerateSlot(MacroAssembler* masm) {
- // Generate enough nop's to make space for a call instruction. Avoid emitting
- // the constant pool in the debug break slot code.
- InstructionAccurateScope scope(masm, Assembler::kDebugBreakSlotInstructions);
-
- __ RecordDebugBreakSlot();
- for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
- __ nop(Assembler::DEBUG_BREAK_NOP);
- }
-}
-
-
-void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) {
- // In the places where a debug break slot is inserted no registers can contain
- // object pointers.
- Generate_DebugBreakCallHelper(masm, 0, 0, x10);
-}
-
-
-void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- __ Ret();
-}
-
-
-void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- ExternalReference restarter_frame_function_slot =
- ExternalReference::debug_restarter_frame_function_pointer_address(
- masm->isolate());
- UseScratchRegisterScope temps(masm);
- Register scratch = temps.AcquireX();
-
- __ Mov(scratch, restarter_frame_function_slot);
- __ Str(xzr, MemOperand(scratch));
-
- // We do not know our frame height, but set sp based on fp.
- __ Sub(masm->StackPointer(), fp, kPointerSize);
- __ AssertStackConsistency();
-
- __ Pop(x1, fp, lr); // Function, Frame, Return address.
-
- // Load context from the function.
- __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
-
- // Get function code.
- __ Ldr(scratch, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(scratch, FieldMemOperand(scratch, SharedFunctionInfo::kCodeOffset));
- __ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag);
-
- // Re-run JSFunction, x1 is function, cp is context.
- __ Br(scratch);
-}
-
-
-const bool LiveEdit::kFrameDropperSupported = true;
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/decoder-arm64-inl.h b/deps/v8/src/arm64/decoder-arm64-inl.h
index 5dd2fd9cc0..c29f2d3c5e 100644
--- a/deps/v8/src/arm64/decoder-arm64-inl.h
+++ b/deps/v8/src/arm64/decoder-arm64-inl.h
@@ -231,7 +231,8 @@ void Decoder<V>::DecodeLoadStore(Instruction* instr) {
if (instr->Mask(0xC4400000) == 0xC0400000) {
V::VisitUnallocated(instr);
} else {
- V::VisitLoadStorePairNonTemporal(instr);
+ // Nontemporals are unimplemented.
+ V::VisitUnimplemented(instr);
}
} else {
V::VisitLoadStorePairPostIndex(instr);
diff --git a/deps/v8/src/arm64/decoder-arm64.cc b/deps/v8/src/arm64/decoder-arm64.cc
index 08aab4286e..56b3e0255e 100644
--- a/deps/v8/src/arm64/decoder-arm64.cc
+++ b/deps/v8/src/arm64/decoder-arm64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/decoder-arm64.h"
diff --git a/deps/v8/src/arm64/decoder-arm64.h b/deps/v8/src/arm64/decoder-arm64.h
index af6bcc6f4f..6140bc2818 100644
--- a/deps/v8/src/arm64/decoder-arm64.h
+++ b/deps/v8/src/arm64/decoder-arm64.h
@@ -33,7 +33,6 @@ namespace internal {
V(LoadStorePairPostIndex) \
V(LoadStorePairOffset) \
V(LoadStorePairPreIndex) \
- V(LoadStorePairNonTemporal) \
V(LoadLiteral) \
V(LoadStoreUnscaledOffset) \
V(LoadStorePostIndex) \
diff --git a/deps/v8/src/arm64/delayed-masm-arm64.cc b/deps/v8/src/arm64/delayed-masm-arm64.cc
index 77ad79199e..e86f10262f 100644
--- a/deps/v8/src/arm64/delayed-masm-arm64.cc
+++ b/deps/v8/src/arm64/delayed-masm-arm64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/delayed-masm-arm64.h"
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
index 41a87643f2..65fb93e53c 100644
--- a/deps/v8/src/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -2,11 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
+#include "src/arm64/frames-arm64.h"
#include "src/codegen.h"
#include "src/deoptimizer.h"
-#include "src/full-codegen.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/safepoint-table.h"
diff --git a/deps/v8/src/arm64/disasm-arm64.cc b/deps/v8/src/arm64/disasm-arm64.cc
index 232dfce5f0..fb3b692d08 100644
--- a/deps/v8/src/arm64/disasm-arm64.cc
+++ b/deps/v8/src/arm64/disasm-arm64.cc
@@ -7,8 +7,6 @@
#include <stdio.h>
#include <string.h>
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/decoder-arm64-inl.h"
@@ -917,25 +915,6 @@ void Disassembler::VisitLoadStorePairOffset(Instruction* instr) {
}
-void Disassembler::VisitLoadStorePairNonTemporal(Instruction* instr) {
- const char *mnemonic = "unimplemented";
- const char *form;
-
- switch (instr->Mask(LoadStorePairNonTemporalMask)) {
- case STNP_w: mnemonic = "stnp"; form = "'Wt, 'Wt2, ['Xns'ILP4]"; break;
- case LDNP_w: mnemonic = "ldnp"; form = "'Wt, 'Wt2, ['Xns'ILP4]"; break;
- case STNP_x: mnemonic = "stnp"; form = "'Xt, 'Xt2, ['Xns'ILP8]"; break;
- case LDNP_x: mnemonic = "ldnp"; form = "'Xt, 'Xt2, ['Xns'ILP8]"; break;
- case STNP_s: mnemonic = "stnp"; form = "'St, 'St2, ['Xns'ILP4]"; break;
- case LDNP_s: mnemonic = "ldnp"; form = "'St, 'St2, ['Xns'ILP4]"; break;
- case STNP_d: mnemonic = "stnp"; form = "'Dt, 'Dt2, ['Xns'ILP8]"; break;
- case LDNP_d: mnemonic = "ldnp"; form = "'Dt, 'Dt2, ['Xns'ILP8]"; break;
- default: form = "(LoadStorePairNonTemporal)";
- }
- Format(instr, mnemonic, form);
-}
-
-
void Disassembler::VisitFPCompare(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "'Fn, 'Fm";
diff --git a/deps/v8/src/arm64/disasm-arm64.h b/deps/v8/src/arm64/disasm-arm64.h
index 8cd3b80dbe..c6b189bf97 100644
--- a/deps/v8/src/arm64/disasm-arm64.h
+++ b/deps/v8/src/arm64/disasm-arm64.h
@@ -5,8 +5,6 @@
#ifndef V8_ARM64_DISASM_ARM64_H
#define V8_ARM64_DISASM_ARM64_H
-#include "src/v8.h"
-
#include "src/arm64/decoder-arm64.h"
#include "src/arm64/instructions-arm64.h"
#include "src/globals.h"
diff --git a/deps/v8/src/arm64/frames-arm64.cc b/deps/v8/src/arm64/frames-arm64.cc
index 73c678aaa6..d3dea408bd 100644
--- a/deps/v8/src/arm64/frames-arm64.cc
+++ b/deps/v8/src/arm64/frames-arm64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/assembler-arm64-inl.h"
diff --git a/deps/v8/src/arm64/frames-arm64.h b/deps/v8/src/arm64/frames-arm64.h
index 963dc3e025..9e6551783d 100644
--- a/deps/v8/src/arm64/frames-arm64.h
+++ b/deps/v8/src/arm64/frames-arm64.h
@@ -63,12 +63,6 @@ class JavaScriptFrameConstants : public AllStatic {
};
-inline Object* JavaScriptFrame::function_slot_object() const {
- const int offset = JavaScriptFrameConstants::kFunctionOffset;
- return Memory::Object_at(fp() + offset);
-}
-
-
} } // namespace v8::internal
#endif // V8_ARM64_FRAMES_ARM64_H_
diff --git a/deps/v8/src/arm64/full-codegen-arm64.cc b/deps/v8/src/arm64/full-codegen-arm64.cc
deleted file mode 100644
index 324bfb8160..0000000000
--- a/deps/v8/src/arm64/full-codegen-arm64.cc
+++ /dev/null
@@ -1,5578 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_ARM64
-
-#include "src/code-factory.h"
-#include "src/code-stubs.h"
-#include "src/codegen.h"
-#include "src/compiler.h"
-#include "src/debug.h"
-#include "src/full-codegen.h"
-#include "src/ic/ic.h"
-#include "src/parser.h"
-#include "src/scopes.h"
-
-#include "src/arm64/code-stubs-arm64.h"
-#include "src/arm64/macro-assembler-arm64.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-class JumpPatchSite BASE_EMBEDDED {
- public:
- explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm), reg_(NoReg) {
-#ifdef DEBUG
- info_emitted_ = false;
-#endif
- }
-
- ~JumpPatchSite() {
- if (patch_site_.is_bound()) {
- DCHECK(info_emitted_);
- } else {
- DCHECK(reg_.IsNone());
- }
- }
-
- void EmitJumpIfNotSmi(Register reg, Label* target) {
- // This code will be patched by PatchInlinedSmiCode, in ic-arm64.cc.
- InstructionAccurateScope scope(masm_, 1);
- DCHECK(!info_emitted_);
- DCHECK(reg.Is64Bits());
- DCHECK(!reg.Is(csp));
- reg_ = reg;
- __ bind(&patch_site_);
- __ tbz(xzr, 0, target); // Always taken before patched.
- }
-
- void EmitJumpIfSmi(Register reg, Label* target) {
- // This code will be patched by PatchInlinedSmiCode, in ic-arm64.cc.
- InstructionAccurateScope scope(masm_, 1);
- DCHECK(!info_emitted_);
- DCHECK(reg.Is64Bits());
- DCHECK(!reg.Is(csp));
- reg_ = reg;
- __ bind(&patch_site_);
- __ tbnz(xzr, 0, target); // Never taken before patched.
- }
-
- void EmitJumpIfEitherNotSmi(Register reg1, Register reg2, Label* target) {
- UseScratchRegisterScope temps(masm_);
- Register temp = temps.AcquireX();
- __ Orr(temp, reg1, reg2);
- EmitJumpIfNotSmi(temp, target);
- }
-
- void EmitPatchInfo() {
- Assembler::BlockPoolsScope scope(masm_);
- InlineSmiCheckInfo::Emit(masm_, reg_, &patch_site_);
-#ifdef DEBUG
- info_emitted_ = true;
-#endif
- }
-
- private:
- MacroAssembler* masm_;
- Label patch_site_;
- Register reg_;
-#ifdef DEBUG
- bool info_emitted_;
-#endif
-};
-
-
-// Generate code for a JS function. On entry to the function the receiver
-// and arguments have been pushed on the stack left to right. The actual
-// argument count matches the formal parameter count expected by the
-// function.
-//
-// The live registers are:
-// - x1: the JS function object being called (i.e. ourselves).
-// - cp: our context.
-// - fp: our caller's frame pointer.
-// - jssp: stack pointer.
-// - lr: return address.
-//
-// The function builds a JS frame. See JavaScriptFrameConstants in
-// frames-arm.h for its layout.
-void FullCodeGenerator::Generate() {
- CompilationInfo* info = info_;
- profiling_counter_ = isolate()->factory()->NewCell(
- Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
- SetFunctionPosition(function());
- Comment cmnt(masm_, "[ Function compiled by full code generator");
-
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
- __ Debug("stop-at", __LINE__, BREAK);
- }
-#endif
-
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- if (is_sloppy(info->language_mode()) && !info->is_native() &&
- info->MayUseThis() && info->scope()->has_this_declaration()) {
- Label ok;
- int receiver_offset = info->scope()->num_parameters() * kXRegSize;
- __ Peek(x10, receiver_offset);
- __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
-
- __ Ldr(x10, GlobalObjectMemOperand());
- __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
- __ Poke(x10, receiver_offset);
-
- __ Bind(&ok);
- }
-
-
- // Open a frame scope to indicate that there is a frame on the stack.
- // The MANUAL indicates that the scope shouldn't actually generate code
- // to set up the frame because we do it manually below.
- FrameScope frame_scope(masm_, StackFrame::MANUAL);
-
- // This call emits the following sequence in a way that can be patched for
- // code ageing support:
- // Push(lr, fp, cp, x1);
- // Add(fp, jssp, 2 * kPointerSize);
- info->set_prologue_offset(masm_->pc_offset());
- __ Prologue(info->IsCodePreAgingActive());
- info->AddNoFrameRange(0, masm_->pc_offset());
-
- // Reserve space on the stack for locals.
- { Comment cmnt(masm_, "[ Allocate locals");
- int locals_count = info->scope()->num_stack_slots();
- // Generators allocate locals, if any, in context slots.
- DCHECK(!IsGeneratorFunction(info->function()->kind()) || locals_count == 0);
-
- if (locals_count > 0) {
- if (locals_count >= 128) {
- Label ok;
- DCHECK(jssp.Is(__ StackPointer()));
- __ Sub(x10, jssp, locals_count * kPointerSize);
- __ CompareRoot(x10, Heap::kRealStackLimitRootIndex);
- __ B(hs, &ok);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
- __ Bind(&ok);
- }
- __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
- if (FLAG_optimize_for_size) {
- __ PushMultipleTimes(x10 , locals_count);
- } else {
- const int kMaxPushes = 32;
- if (locals_count >= kMaxPushes) {
- int loop_iterations = locals_count / kMaxPushes;
- __ Mov(x3, loop_iterations);
- Label loop_header;
- __ Bind(&loop_header);
- // Do pushes.
- __ PushMultipleTimes(x10 , kMaxPushes);
- __ Subs(x3, x3, 1);
- __ B(ne, &loop_header);
- }
- int remaining = locals_count % kMaxPushes;
- // Emit the remaining pushes.
- __ PushMultipleTimes(x10 , remaining);
- }
- }
- }
-
- bool function_in_register_x1 = true;
-
- if (info->scope()->num_heap_slots() > 0) {
- // Argument to NewContext is the function, which is still in x1.
- Comment cmnt(masm_, "[ Allocate context");
- bool need_write_barrier = true;
- int slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (info->scope()->is_script_scope()) {
- __ Mov(x10, Operand(info->scope()->GetScopeInfo(info->isolate())));
- __ Push(x1, x10);
- __ CallRuntime(Runtime::kNewScriptContext, 2);
- } else if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
- __ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
- need_write_barrier = false;
- } else {
- __ Push(x1);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
- }
- function_in_register_x1 = false;
- // Context is returned in x0. It replaces the context passed to us.
- // It's saved in the stack and kept live in cp.
- __ Mov(cp, x0);
- __ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Copy any necessary parameters into the context.
- int num_parameters = info->scope()->num_parameters();
- int first_parameter = info->scope()->has_this_declaration() ? -1 : 0;
- for (int i = first_parameter; i < num_parameters; i++) {
- Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
- if (var->IsContextSlot()) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ Ldr(x10, MemOperand(fp, parameter_offset));
- // Store it in the context.
- MemOperand target = ContextMemOperand(cp, var->index());
- __ Str(x10, target);
-
- // Update the write barrier.
- if (need_write_barrier) {
- __ RecordWriteContextSlot(cp, static_cast<int>(target.offset()), x10,
- x11, kLRHasBeenSaved, kDontSaveFPRegs);
- } else if (FLAG_debug_code) {
- Label done;
- __ JumpIfInNewSpace(cp, &done);
- __ Abort(kExpectedNewSpaceObject);
- __ bind(&done);
- }
- }
- }
- }
-
- // Possibly set up a local binding to the this function which is used in
- // derived constructors with super calls.
- Variable* this_function_var = scope()->this_function_var();
- if (this_function_var != nullptr) {
- Comment cmnt(masm_, "[ This function");
- if (!function_in_register_x1) {
- __ Ldr(x1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- // The write barrier clobbers register again, keep is marked as such.
- }
- SetVar(this_function_var, x1, x0, x2);
- }
-
- Variable* new_target_var = scope()->new_target_var();
- if (new_target_var != nullptr) {
- Comment cmnt(masm_, "[ new.target");
- // Get the frame pointer for the calling frame.
- __ Ldr(x2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- Label check_frame_marker;
- __ Ldr(x1, MemOperand(x2, StandardFrameConstants::kContextOffset));
- __ Cmp(x1, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ B(ne, &check_frame_marker);
- __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
- __ Bind(&check_frame_marker);
- __ Ldr(x1, MemOperand(x2, StandardFrameConstants::kMarkerOffset));
- __ Cmp(x1, Smi::FromInt(StackFrame::CONSTRUCT));
-
- Label non_construct_frame, done;
-
- __ B(ne, &non_construct_frame);
- __ Ldr(x0,
- MemOperand(x2, ConstructFrameConstants::kOriginalConstructorOffset));
- __ B(&done);
-
- __ Bind(&non_construct_frame);
- __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
-
- __ Bind(&done);
-
- SetVar(new_target_var, x0, x2, x3);
- }
-
- // Possibly allocate RestParameters
- int rest_index;
- Variable* rest_param = scope()->rest_parameter(&rest_index);
- if (rest_param) {
- Comment cmnt(masm_, "[ Allocate rest parameter array");
-
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
-
- __ Add(x3, fp, StandardFrameConstants::kCallerSPOffset + offset);
- __ Mov(x2, Smi::FromInt(num_parameters));
- __ Mov(x1, Smi::FromInt(rest_index));
- __ Mov(x0, Smi::FromInt(language_mode()));
- __ Push(x3, x2, x1, x0);
-
- RestParamAccessStub stub(isolate());
- __ CallStub(&stub);
-
- SetVar(rest_param, x0, x1, x2);
- }
-
- Variable* arguments = scope()->arguments();
- if (arguments != NULL) {
- // Function uses arguments object.
- Comment cmnt(masm_, "[ Allocate arguments object");
- if (!function_in_register_x1) {
- // Load this again, if it's used by the local context below.
- __ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- } else {
- __ Mov(x3, x1);
- }
- // Receiver is just before the parameters on the caller's stack.
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
- __ Add(x2, fp, StandardFrameConstants::kCallerSPOffset + offset);
- __ Mov(x1, Smi::FromInt(num_parameters));
- __ Push(x3, x2, x1);
-
- // Arguments to ArgumentsAccessStub:
- // function, receiver address, parameter count.
- // The stub will rewrite receiver and parameter count if the previous
- // stack frame was an arguments adapter frame.
- ArgumentsAccessStub::Type type;
- if (is_strict(language_mode()) || !is_simple_parameter_list()) {
- type = ArgumentsAccessStub::NEW_STRICT;
- } else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
- } else {
- type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
- }
- ArgumentsAccessStub stub(isolate(), type);
- __ CallStub(&stub);
-
- SetVar(arguments, x0, x1, x2);
- }
-
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
-
- // Visit the declarations and body unless there is an illegal
- // redeclaration.
- if (scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ Declarations");
- scope()->VisitIllegalRedeclaration(this);
-
- } else {
- PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
- { Comment cmnt(masm_, "[ Declarations");
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- VariableDeclaration* function = scope()->function();
- DCHECK(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_LEGACY);
- DCHECK(!function->proxy()->var()->IsUnallocatedOrGlobalSlot());
- VisitVariableDeclaration(function);
- }
- VisitDeclarations(scope()->declarations());
- }
-
- {
- Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
- Label ok;
- DCHECK(jssp.Is(__ StackPointer()));
- __ CompareRoot(jssp, Heap::kStackLimitRootIndex);
- __ B(hs, &ok);
- PredictableCodeSizeScope predictable(masm_,
- Assembler::kCallSizeWithRelocation);
- __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
- __ Bind(&ok);
- }
-
- {
- Comment cmnt(masm_, "[ Body");
- DCHECK(loop_depth() == 0);
- VisitStatements(function()->body());
- DCHECK(loop_depth() == 0);
- }
- }
-
- // Always emit a 'return undefined' in case control fell off the end of
- // the body.
- { Comment cmnt(masm_, "[ return <undefined>;");
- __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
- }
- EmitReturnSequence();
-
- // Force emission of the pools, so they don't get emitted in the middle
- // of the back edge table.
- masm()->CheckVeneerPool(true, false);
- masm()->CheckConstPool(true, false);
-}
-
-
-void FullCodeGenerator::ClearAccumulator() {
- __ Mov(x0, Smi::FromInt(0));
-}
-
-
-void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
- __ Mov(x2, Operand(profiling_counter_));
- __ Ldr(x3, FieldMemOperand(x2, Cell::kValueOffset));
- __ Subs(x3, x3, Smi::FromInt(delta));
- __ Str(x3, FieldMemOperand(x2, Cell::kValueOffset));
-}
-
-
-void FullCodeGenerator::EmitProfilingCounterReset() {
- int reset_value = FLAG_interrupt_budget;
- if (info_->is_debug()) {
- // Detect debug break requests as soon as possible.
- reset_value = FLAG_interrupt_budget >> 4;
- }
- __ Mov(x2, Operand(profiling_counter_));
- __ Mov(x3, Smi::FromInt(reset_value));
- __ Str(x3, FieldMemOperand(x2, Cell::kValueOffset));
-}
-
-
-void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
- Label* back_edge_target) {
- DCHECK(jssp.Is(__ StackPointer()));
- Comment cmnt(masm_, "[ Back edge bookkeeping");
- // Block literal pools whilst emitting back edge code.
- Assembler::BlockPoolsScope block_const_pool(masm_);
- Label ok;
-
- DCHECK(back_edge_target->is_bound());
- // We want to do a round rather than a floor of distance/kCodeSizeMultiplier
- // to reduce the absolute error due to the integer division. To do that,
- // we add kCodeSizeMultiplier/2 to the distance (equivalent to adding 0.5 to
- // the result).
- int distance =
- static_cast<int>(masm_->SizeOfCodeGeneratedSince(back_edge_target) +
- kCodeSizeMultiplier / 2);
- int weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
- EmitProfilingCounterDecrement(weight);
- __ B(pl, &ok);
- __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
-
- // Record a mapping of this PC offset to the OSR id. This is used to find
- // the AST id from the unoptimized code in order to use it as a key into
- // the deoptimization input data found in the optimized code.
- RecordBackEdge(stmt->OsrEntryId());
-
- EmitProfilingCounterReset();
-
- __ Bind(&ok);
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
- // Record a mapping of the OSR id to this PC. This is used if the OSR
- // entry becomes the target of a bailout. We don't expect it to be, but
- // we want it to work if it is.
- PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::EmitReturnSequence() {
- Comment cmnt(masm_, "[ Return sequence");
-
- if (return_label_.is_bound()) {
- __ B(&return_label_);
-
- } else {
- __ Bind(&return_label_);
- if (FLAG_trace) {
- // Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns its parameter in x0.
- __ Push(result_register());
- __ CallRuntime(Runtime::kTraceExit, 1);
- DCHECK(x0.Is(result_register()));
- }
- // Pretend that the exit is a backwards jump to the entry.
- int weight = 1;
- if (info_->ShouldSelfOptimize()) {
- weight = FLAG_interrupt_budget / FLAG_self_opt_count;
- } else {
- int distance = masm_->pc_offset() + kCodeSizeMultiplier / 2;
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
- }
- EmitProfilingCounterDecrement(weight);
- Label ok;
- __ B(pl, &ok);
- __ Push(x0);
- __ Call(isolate()->builtins()->InterruptCheck(),
- RelocInfo::CODE_TARGET);
- __ Pop(x0);
- EmitProfilingCounterReset();
- __ Bind(&ok);
-
- // Make sure that the constant pool is not emitted inside of the return
- // sequence. This sequence can get patched when the debugger is used. See
- // debug-arm64.cc:BreakLocation::SetDebugBreakAtReturn().
- {
- InstructionAccurateScope scope(masm_,
- Assembler::kJSReturnSequenceInstructions);
- SetReturnPosition(function());
- __ RecordJSReturn();
- // This code is generated using Assembler methods rather than Macro
- // Assembler methods because it will be patched later on, and so the size
- // of the generated code must be consistent.
- const Register& current_sp = __ StackPointer();
- // Nothing ensures 16 bytes alignment here.
- DCHECK(!current_sp.Is(csp));
- __ mov(current_sp, fp);
- int no_frame_start = masm_->pc_offset();
- __ ldp(fp, lr, MemOperand(current_sp, 2 * kXRegSize, PostIndex));
- // Drop the arguments and receiver and return.
- // TODO(all): This implementation is overkill as it supports 2**31+1
- // arguments, consider how to improve it without creating a security
- // hole.
- __ ldr_pcrel(ip0, (3 * kInstructionSize) >> kLoadLiteralScaleLog2);
- __ add(current_sp, current_sp, ip0);
- __ ret();
- int32_t arg_count = info_->scope()->num_parameters() + 1;
- __ dc64(kXRegSize * arg_count);
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
- }
- }
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- codegen()->GetVar(result_register(), var);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- codegen()->GetVar(result_register(), var);
- __ Push(result_register());
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Variable* var) const {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- // For simplicity we always test the accumulator register.
- codegen()->GetVar(result_register(), var);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
- // Root values have no side effects.
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Heap::RootListIndex index) const {
- __ LoadRoot(result_register(), index);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(
- Heap::RootListIndex index) const {
- __ LoadRoot(result_register(), index);
- __ Push(result_register());
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
- false_label_);
- if (index == Heap::kUndefinedValueRootIndex ||
- index == Heap::kNullValueRootIndex ||
- index == Heap::kFalseValueRootIndex) {
- if (false_label_ != fall_through_) __ B(false_label_);
- } else if (index == Heap::kTrueValueRootIndex) {
- if (true_label_ != fall_through_) __ B(true_label_);
- } else {
- __ LoadRoot(result_register(), index);
- codegen()->DoTest(this);
- }
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Handle<Object> lit) const {
- __ Mov(result_register(), Operand(lit));
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
- // Immediates cannot be pushed directly.
- __ Mov(result_register(), Operand(lit));
- __ Push(result_register());
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
- DCHECK(!lit->IsUndetectableObject()); // There are no undetectable literals.
- if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
- if (false_label_ != fall_through_) __ B(false_label_);
- } else if (lit->IsTrue() || lit->IsJSObject()) {
- if (true_label_ != fall_through_) __ B(true_label_);
- } else if (lit->IsString()) {
- if (String::cast(*lit)->length() == 0) {
- if (false_label_ != fall_through_) __ B(false_label_);
- } else {
- if (true_label_ != fall_through_) __ B(true_label_);
- }
- } else if (lit->IsSmi()) {
- if (Smi::cast(*lit)->value() == 0) {
- if (false_label_ != fall_through_) __ B(false_label_);
- } else {
- if (true_label_ != fall_through_) __ B(true_label_);
- }
- } else {
- // For simplicity we always test the accumulator register.
- __ Mov(result_register(), Operand(lit));
- codegen()->DoTest(this);
- }
-}
-
-
-void FullCodeGenerator::EffectContext::DropAndPlug(int count,
- Register reg) const {
- DCHECK(count > 0);
- __ Drop(count);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
- int count,
- Register reg) const {
- DCHECK(count > 0);
- __ Drop(count);
- __ Move(result_register(), reg);
-}
-
-
-void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
- Register reg) const {
- DCHECK(count > 0);
- if (count > 1) __ Drop(count - 1);
- __ Poke(reg, 0);
-}
-
-
-void FullCodeGenerator::TestContext::DropAndPlug(int count,
- Register reg) const {
- DCHECK(count > 0);
- // For simplicity we always test the accumulator register.
- __ Drop(count);
- __ Mov(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
- Label* materialize_false) const {
- DCHECK(materialize_true == materialize_false);
- __ Bind(materialize_true);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Label* materialize_true,
- Label* materialize_false) const {
- Label done;
- __ Bind(materialize_true);
- __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
- __ B(&done);
- __ Bind(materialize_false);
- __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
- __ Bind(&done);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(
- Label* materialize_true,
- Label* materialize_false) const {
- Label done;
- __ Bind(materialize_true);
- __ LoadRoot(x10, Heap::kTrueValueRootIndex);
- __ B(&done);
- __ Bind(materialize_false);
- __ LoadRoot(x10, Heap::kFalseValueRootIndex);
- __ Bind(&done);
- __ Push(x10);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
- Label* materialize_false) const {
- DCHECK(materialize_true == true_label_);
- DCHECK(materialize_false == false_label_);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(bool flag) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
- Heap::RootListIndex value_root_index =
- flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
- __ LoadRoot(result_register(), value_root_index);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
- Heap::RootListIndex value_root_index =
- flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
- __ LoadRoot(x10, value_root_index);
- __ Push(x10);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(bool flag) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
- if (flag) {
- if (true_label_ != fall_through_) {
- __ B(true_label_);
- }
- } else {
- if (false_label_ != fall_through_) {
- __ B(false_label_);
- }
- }
-}
-
-
-void FullCodeGenerator::DoTest(Expression* condition,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(ic, condition->test_id());
- __ CompareAndSplit(result_register(), 0, ne, if_true, if_false, fall_through);
-}
-
-
-// If (cond), branch to if_true.
-// If (!cond), branch to if_false.
-// fall_through is used as an optimization in cases where only one branch
-// instruction is necessary.
-void FullCodeGenerator::Split(Condition cond,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- if (if_false == fall_through) {
- __ B(cond, if_true);
- } else if (if_true == fall_through) {
- DCHECK(if_false != fall_through);
- __ B(NegateCondition(cond), if_false);
- } else {
- __ B(cond, if_true);
- __ B(if_false);
- }
-}
-
-
-MemOperand FullCodeGenerator::StackOperand(Variable* var) {
- // Offset is negative because higher indexes are at lower addresses.
- int offset = -var->index() * kXRegSize;
- // Adjust by a (parameter or local) base offset.
- if (var->IsParameter()) {
- offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
- } else {
- offset += JavaScriptFrameConstants::kLocal0Offset;
- }
- return MemOperand(fp, offset);
-}
-
-
-MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
- DCHECK(var->IsContextSlot() || var->IsStackAllocated());
- if (var->IsContextSlot()) {
- int context_chain_length = scope()->ContextChainLength(var->scope());
- __ LoadContext(scratch, context_chain_length);
- return ContextMemOperand(scratch, var->index());
- } else {
- return StackOperand(var);
- }
-}
-
-
-void FullCodeGenerator::GetVar(Register dest, Variable* var) {
- // Use destination as scratch.
- MemOperand location = VarOperand(var, dest);
- __ Ldr(dest, location);
-}
-
-
-void FullCodeGenerator::SetVar(Variable* var,
- Register src,
- Register scratch0,
- Register scratch1) {
- DCHECK(var->IsContextSlot() || var->IsStackAllocated());
- DCHECK(!AreAliased(src, scratch0, scratch1));
- MemOperand location = VarOperand(var, scratch0);
- __ Str(src, location);
-
- // Emit the write barrier code if the location is in the heap.
- if (var->IsContextSlot()) {
- // scratch0 contains the correct context.
- __ RecordWriteContextSlot(scratch0, static_cast<int>(location.offset()),
- src, scratch1, kLRHasBeenSaved, kDontSaveFPRegs);
- }
-}
-
-
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
- bool should_normalize,
- Label* if_true,
- Label* if_false) {
- // Only prepare for bailouts before splits if we're in a test
- // context. Otherwise, we let the Visit function deal with the
- // preparation to avoid preparing with the same AST id twice.
- if (!context()->IsTest() || !info_->IsOptimizable()) return;
-
- // TODO(all): Investigate to see if there is something to work on here.
- Label skip;
- if (should_normalize) {
- __ B(&skip);
- }
- PrepareForBailout(expr, TOS_REG);
- if (should_normalize) {
- __ CompareRoot(x0, Heap::kTrueValueRootIndex);
- Split(eq, if_true, if_false, NULL);
- __ Bind(&skip);
- }
-}
-
-
-void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
- // The variable in the declaration always resides in the current function
- // context.
- DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (generate_debug_code_) {
- // Check that we're not inside a with or catch context.
- __ Ldr(x1, FieldMemOperand(cp, HeapObject::kMapOffset));
- __ CompareRoot(x1, Heap::kWithContextMapRootIndex);
- __ Check(ne, kDeclarationInWithContext);
- __ CompareRoot(x1, Heap::kCatchContextMapRootIndex);
- __ Check(ne, kDeclarationInCatchContext);
- }
-}
-
-
-void FullCodeGenerator::VisitVariableDeclaration(
- VariableDeclaration* declaration) {
- // If it was not possible to allocate the variable at compile time, we
- // need to "declare" it at runtime to make sure it actually exists in the
- // local context.
- VariableProxy* proxy = declaration->proxy();
- VariableMode mode = declaration->mode();
- Variable* variable = proxy->var();
- bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
-
- switch (variable->location()) {
- case VariableLocation::GLOBAL:
- case VariableLocation::UNALLOCATED:
- globals_->Add(variable->name(), zone());
- globals_->Add(variable->binding_needs_init()
- ? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value(),
- zone());
- break;
-
- case VariableLocation::PARAMETER:
- case VariableLocation::LOCAL:
- if (hole_init) {
- Comment cmnt(masm_, "[ VariableDeclaration");
- __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
- __ Str(x10, StackOperand(variable));
- }
- break;
-
- case VariableLocation::CONTEXT:
- if (hole_init) {
- Comment cmnt(masm_, "[ VariableDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
- __ Str(x10, ContextMemOperand(cp, variable->index()));
- // No write barrier since the_hole_value is in old space.
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- }
- break;
-
- case VariableLocation::LOOKUP: {
- Comment cmnt(masm_, "[ VariableDeclaration");
- __ Mov(x2, Operand(variable->name()));
- // Declaration nodes are always introduced in one of four modes.
- DCHECK(IsDeclaredVariableMode(mode));
- PropertyAttributes attr = IsImmutableVariableMode(mode) ? READ_ONLY
- : NONE;
- __ Mov(x1, Smi::FromInt(attr));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (hole_init) {
- __ LoadRoot(x0, Heap::kTheHoleValueRootIndex);
- __ Push(cp, x2, x1, x0);
- } else {
- // Pushing 0 (xzr) indicates no initial value.
- __ Push(cp, x2, x1, xzr);
- }
- __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitFunctionDeclaration(
- FunctionDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case VariableLocation::GLOBAL:
- case VariableLocation::UNALLOCATED: {
- globals_->Add(variable->name(), zone());
- Handle<SharedFunctionInfo> function =
- Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
- // Check for stack overflow exception.
- if (function.is_null()) return SetStackOverflow();
- globals_->Add(function, zone());
- break;
- }
-
- case VariableLocation::PARAMETER:
- case VariableLocation::LOCAL: {
- Comment cmnt(masm_, "[ Function Declaration");
- VisitForAccumulatorValue(declaration->fun());
- __ Str(result_register(), StackOperand(variable));
- break;
- }
-
- case VariableLocation::CONTEXT: {
- Comment cmnt(masm_, "[ Function Declaration");
- EmitDebugCheckDeclarationContext(variable);
- VisitForAccumulatorValue(declaration->fun());
- __ Str(result_register(), ContextMemOperand(cp, variable->index()));
- int offset = Context::SlotOffset(variable->index());
- // We know that we have written a function, which is not a smi.
- __ RecordWriteContextSlot(cp,
- offset,
- result_register(),
- x2,
- kLRHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- break;
- }
-
- case VariableLocation::LOOKUP: {
- Comment cmnt(masm_, "[ Function Declaration");
- __ Mov(x2, Operand(variable->name()));
- __ Mov(x1, Smi::FromInt(NONE));
- __ Push(cp, x2, x1);
- // Push initial value for function declaration.
- VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case VariableLocation::GLOBAL:
- case VariableLocation::UNALLOCATED:
- // TODO(rossberg)
- break;
-
- case VariableLocation::CONTEXT: {
- Comment cmnt(masm_, "[ ImportDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- // TODO(rossberg)
- break;
- }
-
- case VariableLocation::PARAMETER:
- case VariableLocation::LOCAL:
- case VariableLocation::LOOKUP:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
- // TODO(rossberg)
-}
-
-
-void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- // Call the runtime to declare the globals.
- __ Mov(x11, Operand(pairs));
- Register flags = xzr;
- if (Smi::FromInt(DeclareGlobalsFlags())) {
- flags = x10;
- __ Mov(flags, Smi::FromInt(DeclareGlobalsFlags()));
- }
- __ Push(cp, x11, flags);
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
- // Return value is ignored.
-}
-
-
-void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
- // Call the runtime to declare the modules.
- __ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
- // Return value is ignored.
-}
-
-
-void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
- ASM_LOCATION("FullCodeGenerator::VisitSwitchStatement");
- Comment cmnt(masm_, "[ SwitchStatement");
- Breakable nested_statement(this, stmt);
- SetStatementPosition(stmt);
-
- // Keep the switch value on the stack until a case matches.
- VisitForStackValue(stmt->tag());
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
-
- ZoneList<CaseClause*>* clauses = stmt->cases();
- CaseClause* default_clause = NULL; // Can occur anywhere in the list.
-
- Label next_test; // Recycled for each test.
- // Compile all the tests with branches to their bodies.
- for (int i = 0; i < clauses->length(); i++) {
- CaseClause* clause = clauses->at(i);
- clause->body_target()->Unuse();
-
- // The default is not a test, but remember it as final fall through.
- if (clause->is_default()) {
- default_clause = clause;
- continue;
- }
-
- Comment cmnt(masm_, "[ Case comparison");
- __ Bind(&next_test);
- next_test.Unuse();
-
- // Compile the label expression.
- VisitForAccumulatorValue(clause->label());
-
- // Perform the comparison as if via '==='.
- __ Peek(x1, 0); // Switch value.
-
- JumpPatchSite patch_site(masm_);
- if (ShouldInlineSmiCase(Token::EQ_STRICT)) {
- Label slow_case;
- patch_site.EmitJumpIfEitherNotSmi(x0, x1, &slow_case);
- __ Cmp(x1, x0);
- __ B(ne, &next_test);
- __ Drop(1); // Switch value is no longer needed.
- __ B(clause->body_target());
- __ Bind(&slow_case);
- }
-
- // Record position before stub call for type feedback.
- SetExpressionPosition(clause);
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), Token::EQ_STRICT,
- strength(language_mode())).code();
- CallIC(ic, clause->CompareId());
- patch_site.EmitPatchInfo();
-
- Label skip;
- __ B(&skip);
- PrepareForBailout(clause, TOS_REG);
- __ JumpIfNotRoot(x0, Heap::kTrueValueRootIndex, &next_test);
- __ Drop(1);
- __ B(clause->body_target());
- __ Bind(&skip);
-
- __ Cbnz(x0, &next_test);
- __ Drop(1); // Switch value is no longer needed.
- __ B(clause->body_target());
- }
-
- // Discard the test value and jump to the default if present, otherwise to
- // the end of the statement.
- __ Bind(&next_test);
- __ Drop(1); // Switch value is no longer needed.
- if (default_clause == NULL) {
- __ B(nested_statement.break_label());
- } else {
- __ B(default_clause->body_target());
- }
-
- // Compile all the case bodies.
- for (int i = 0; i < clauses->length(); i++) {
- Comment cmnt(masm_, "[ Case body");
- CaseClause* clause = clauses->at(i);
- __ Bind(clause->body_target());
- PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
- VisitStatements(clause->statements());
- }
-
- __ Bind(nested_statement.break_label());
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
- ASM_LOCATION("FullCodeGenerator::VisitForInStatement");
- Comment cmnt(masm_, "[ ForInStatement");
- SetStatementPosition(stmt, SKIP_BREAK);
-
- FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
-
- // TODO(all): This visitor probably needs better comments and a revisit.
-
- Label loop, exit;
- ForIn loop_statement(this, stmt);
- increment_loop_depth();
-
- // Get the object to enumerate over. If the object is null or undefined, skip
- // over the loop. See ECMA-262 version 5, section 12.6.4.
- SetExpressionAsStatementPosition(stmt->enumerable());
- VisitForAccumulatorValue(stmt->enumerable());
- __ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex, &exit);
- Register null_value = x15;
- __ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ Cmp(x0, null_value);
- __ B(eq, &exit);
-
- PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
-
- // Convert the object to a JS object.
- Label convert, done_convert;
- __ JumpIfSmi(x0, &convert);
- __ JumpIfObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE, &done_convert, ge);
- __ Bind(&convert);
- __ Push(x0);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ Bind(&done_convert);
- PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
- __ Push(x0);
-
- // Check for proxies.
- Label call_runtime;
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ JumpIfObjectType(x0, x10, x11, LAST_JS_PROXY_TYPE, &call_runtime, le);
-
- // Check cache validity in generated code. This is a fast case for
- // the JSObject::IsSimpleEnum cache validity checks. If we cannot
- // guarantee cache validity, call the runtime system to check cache
- // validity or get the property names in a fixed array.
- __ CheckEnumCache(x0, null_value, x10, x11, x12, x13, &call_runtime);
-
- // The enum cache is valid. Load the map of the object being
- // iterated over and use the cache for the iteration.
- Label use_cache;
- __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset));
- __ B(&use_cache);
-
- // Get the set of properties to enumerate.
- __ Bind(&call_runtime);
- __ Push(x0); // Duplicate the enumerable object on the stack.
- __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
- PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
-
- // If we got a map from the runtime call, we can do a fast
- // modification check. Otherwise, we got a fixed array, and we have
- // to do a slow check.
- Label fixed_array, no_descriptors;
- __ Ldr(x2, FieldMemOperand(x0, HeapObject::kMapOffset));
- __ JumpIfNotRoot(x2, Heap::kMetaMapRootIndex, &fixed_array);
-
- // We got a map in register x0. Get the enumeration cache from it.
- __ Bind(&use_cache);
-
- __ EnumLengthUntagged(x1, x0);
- __ Cbz(x1, &no_descriptors);
-
- __ LoadInstanceDescriptors(x0, x2);
- __ Ldr(x2, FieldMemOperand(x2, DescriptorArray::kEnumCacheOffset));
- __ Ldr(x2,
- FieldMemOperand(x2, DescriptorArray::kEnumCacheBridgeCacheOffset));
-
- // Set up the four remaining stack slots.
- __ SmiTag(x1);
- // Map, enumeration cache, enum cache length, zero (both last as smis).
- __ Push(x0, x2, x1, xzr);
- __ B(&loop);
-
- __ Bind(&no_descriptors);
- __ Drop(1);
- __ B(&exit);
-
- // We got a fixed array in register x0. Iterate through that.
- __ Bind(&fixed_array);
-
- __ LoadObject(x1, FeedbackVector());
- __ Mov(x10, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
- int vector_index = FeedbackVector()->GetIndex(slot);
- __ Str(x10, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(vector_index)));
-
- __ Mov(x1, Smi::FromInt(1)); // Smi indicates slow check.
- __ Peek(x10, 0); // Get enumerated object.
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- // TODO(all): similar check was done already. Can we avoid it here?
- __ CompareObjectType(x10, x11, x12, LAST_JS_PROXY_TYPE);
- DCHECK(Smi::FromInt(0) == 0);
- __ CzeroX(x1, le); // Zero indicates proxy.
- __ Ldr(x2, FieldMemOperand(x0, FixedArray::kLengthOffset));
- // Smi and array, fixed array length (as smi) and initial index.
- __ Push(x1, x0, x2, xzr);
-
- // Generate code for doing the condition check.
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
- __ Bind(&loop);
- SetExpressionAsStatementPosition(stmt->each());
-
- // Load the current count to x0, load the length to x1.
- __ PeekPair(x0, x1, 0);
- __ Cmp(x0, x1); // Compare to the array length.
- __ B(hs, loop_statement.break_label());
-
- // Get the current entry of the array into register r3.
- __ Peek(x10, 2 * kXRegSize);
- __ Add(x10, x10, Operand::UntagSmiAndScale(x0, kPointerSizeLog2));
- __ Ldr(x3, MemOperand(x10, FixedArray::kHeaderSize - kHeapObjectTag));
-
- // Get the expected map from the stack or a smi in the
- // permanent slow case into register x10.
- __ Peek(x2, 3 * kXRegSize);
-
- // Check if the expected map still matches that of the enumerable.
- // If not, we may have to filter the key.
- Label update_each;
- __ Peek(x1, 4 * kXRegSize);
- __ Ldr(x11, FieldMemOperand(x1, HeapObject::kMapOffset));
- __ Cmp(x11, x2);
- __ B(eq, &update_each);
-
- // For proxies, no filtering is done.
- // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
- STATIC_ASSERT(kSmiTag == 0);
- __ Cbz(x2, &update_each);
-
- // Convert the entry to a string or (smi) 0 if it isn't a property
- // any more. If the property has been removed while iterating, we
- // just skip it.
- __ Push(x1, x3);
- __ CallRuntime(Runtime::kForInFilter, 2);
- PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
- __ Mov(x3, x0);
- __ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex,
- loop_statement.continue_label());
-
- // Update the 'each' property or variable from the possibly filtered
- // entry in register x3.
- __ Bind(&update_each);
- __ Mov(result_register(), x3);
- // Perform the assignment as if via '='.
- { EffectContext context(this);
- EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
- PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
- }
-
- // Generate code for the body of the loop.
- Visit(stmt->body());
-
- // Generate code for going to the next element by incrementing
- // the index (smi) stored on top of the stack.
- __ Bind(loop_statement.continue_label());
- // TODO(all): We could use a callee saved register to avoid popping.
- __ Pop(x0);
- __ Add(x0, x0, Smi::FromInt(1));
- __ Push(x0);
-
- EmitBackEdgeBookkeeping(stmt, &loop);
- __ B(&loop);
-
- // Remove the pointers stored on the stack.
- __ Bind(loop_statement.break_label());
- __ Drop(5);
-
- // Exit and decrement the loop depth.
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
- __ Bind(&exit);
- decrement_loop_depth();
-}
-
-
-void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
- bool pretenure) {
- // Use the fast case closure allocation code that allocates in new space for
- // nested functions that don't need literals cloning. If we're running with
- // the --always-opt or the --prepare-always-opt flag, we need to use the
- // runtime function so that the new function we are creating here gets a
- // chance to have its code optimized and doesn't just get a copy of the
- // existing unoptimized code.
- if (!FLAG_always_opt &&
- !FLAG_prepare_always_opt &&
- !pretenure &&
- scope()->is_function_scope() &&
- info->num_literals() == 0) {
- FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
- __ Mov(x2, Operand(info));
- __ CallStub(&stub);
- } else {
- __ Mov(x11, Operand(info));
- __ LoadRoot(x10, pretenure ? Heap::kTrueValueRootIndex
- : Heap::kFalseValueRootIndex);
- __ Push(cp, x11, x10);
- __ CallRuntime(Runtime::kNewClosure, 3);
- }
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
- Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr);
-}
-
-
-void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
- int offset,
- FeedbackVectorICSlot slot) {
- if (NeedsHomeObject(initializer)) {
- __ Peek(StoreDescriptor::ReceiverRegister(), 0);
- __ Mov(StoreDescriptor::NameRegister(),
- Operand(isolate()->factory()->home_object_symbol()));
- __ Peek(StoreDescriptor::ValueRegister(), offset * kPointerSize);
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
- CallStoreIC();
- }
-}
-
-
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
- TypeofState typeof_state,
- Label* slow) {
- Register current = cp;
- Register next = x10;
- Register temp = x11;
-
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
- __ Ldr(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
- __ Cbnz(temp, slow);
- }
- // Load next context in chain.
- __ Ldr(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering cp.
- current = next;
- }
- // If no outer scope calls eval, we do not need to check more
- // context extensions.
- if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
-
- if (s->is_eval_scope()) {
- Label loop, fast;
- __ Mov(next, current);
-
- __ Bind(&loop);
- // Terminate at native context.
- __ Ldr(temp, FieldMemOperand(next, HeapObject::kMapOffset));
- __ JumpIfRoot(temp, Heap::kNativeContextMapRootIndex, &fast);
- // Check that extension is NULL.
- __ Ldr(temp, ContextMemOperand(next, Context::EXTENSION_INDEX));
- __ Cbnz(temp, slow);
- // Load next context in chain.
- __ Ldr(next, ContextMemOperand(next, Context::PREVIOUS_INDEX));
- __ B(&loop);
- __ Bind(&fast);
- }
-
- // All extension objects were empty and it is safe to use a normal global
- // load machinery.
- EmitGlobalVariableLoad(proxy, typeof_state);
-}
-
-
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
- Label* slow) {
- DCHECK(var->IsContextSlot());
- Register context = cp;
- Register next = x10;
- Register temp = x11;
-
- for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
- __ Ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
- __ Cbnz(temp, slow);
- }
- __ Ldr(next, ContextMemOperand(context, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering cp.
- context = next;
- }
- }
- // Check that last extension is NULL.
- __ Ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
- __ Cbnz(temp, slow);
-
- // This function is used only for loads, not stores, so it's safe to
- // return an cp-based operand (the write barrier cannot be allowed to
- // destroy the cp register).
- return ContextMemOperand(context, var->index());
-}
-
-
-void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
- TypeofState typeof_state,
- Label* slow,
- Label* done) {
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- Variable* var = proxy->var();
- if (var->mode() == DYNAMIC_GLOBAL) {
- EmitLoadGlobalCheckExtensions(proxy, typeof_state, slow);
- __ B(done);
- } else if (var->mode() == DYNAMIC_LOCAL) {
- Variable* local = var->local_if_not_shadowed();
- __ Ldr(x0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET || local->mode() == CONST ||
- local->mode() == CONST_LEGACY) {
- __ JumpIfNotRoot(x0, Heap::kTheHoleValueRootIndex, done);
- if (local->mode() == CONST_LEGACY) {
- __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
- } else { // LET || CONST
- __ Mov(x0, Operand(var->name()));
- __ Push(x0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- }
- }
- __ B(done);
- }
-}
-
-
-void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
- TypeofState typeof_state) {
- Variable* var = proxy->var();
- DCHECK(var->IsUnallocatedOrGlobalSlot() ||
- (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- __ Ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
- __ Mov(LoadDescriptor::NameRegister(), Operand(var->name()));
- __ Mov(LoadDescriptor::SlotRegister(),
- SmiFromSlot(proxy->VariableFeedbackSlot()));
- // Inside typeof use a regular load, not a contextual load, to avoid
- // a reference error.
- CallLoadIC(typeof_state == NOT_INSIDE_TYPEOF ? CONTEXTUAL : NOT_CONTEXTUAL);
-}
-
-
-void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
- TypeofState typeof_state) {
- // Record position before possible IC call.
- SetExpressionPosition(proxy);
- PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
- Variable* var = proxy->var();
-
- // Three cases: global variables, lookup variables, and all other types of
- // variables.
- switch (var->location()) {
- case VariableLocation::GLOBAL:
- case VariableLocation::UNALLOCATED: {
- Comment cmnt(masm_, "Global variable");
- EmitGlobalVariableLoad(proxy, typeof_state);
- context()->Plug(x0);
- break;
- }
-
- case VariableLocation::PARAMETER:
- case VariableLocation::LOCAL:
- case VariableLocation::CONTEXT: {
- DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_state);
- Comment cmnt(masm_, var->IsContextSlot()
- ? "Context variable"
- : "Stack variable");
- if (var->binding_needs_init()) {
- // var->scope() may be NULL when the proxy is located in eval code and
- // refers to a potential outside binding. Currently those bindings are
- // always looked up dynamically, i.e. in that case
- // var->location() == LOOKUP.
- // always holds.
- DCHECK(var->scope() != NULL);
-
- // Check if the binding really needs an initialization check. The check
- // can be skipped in the following situation: we have a LET or CONST
- // binding in harmony mode, both the Variable and the VariableProxy have
- // the same declaration scope (i.e. they are both in global code, in the
- // same function or in the same eval code) and the VariableProxy is in
- // the source physically located after the initializer of the variable.
- //
- // We cannot skip any initialization checks for CONST in non-harmony
- // mode because const variables may be declared but never initialized:
- // if (false) { const x; }; var y = x;
- //
- // The condition on the declaration scopes is a conservative check for
- // nested functions that access a binding and are called before the
- // binding is initialized:
- // function() { f(); let x = 1; function f() { x = 2; } }
- //
- bool skip_init_check;
- if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
- skip_init_check = false;
- } else if (var->is_this()) {
- CHECK(info_->function() != nullptr &&
- (info_->function()->kind() & kSubclassConstructor) != 0);
- // TODO(dslomov): implement 'this' hole check elimination.
- skip_init_check = false;
- } else {
- // Check that we always have valid source position.
- DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
- DCHECK(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST_LEGACY &&
- var->initializer_position() < proxy->position();
- }
-
- if (!skip_init_check) {
- // Let and const need a read barrier.
- GetVar(x0, var);
- Label done;
- __ JumpIfNotRoot(x0, Heap::kTheHoleValueRootIndex, &done);
- if (var->mode() == LET || var->mode() == CONST) {
- // Throw a reference error when using an uninitialized let/const
- // binding in harmony mode.
- __ Mov(x0, Operand(var->name()));
- __ Push(x0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ Bind(&done);
- } else {
- // Uninitalized const bindings outside of harmony mode are unholed.
- DCHECK(var->mode() == CONST_LEGACY);
- __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
- __ Bind(&done);
- }
- context()->Plug(x0);
- break;
- }
- }
- context()->Plug(var);
- break;
- }
-
- case VariableLocation::LOOKUP: {
- Label done, slow;
- // Generate code for loading from variables potentially shadowed by
- // eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, typeof_state, &slow, &done);
- __ Bind(&slow);
- Comment cmnt(masm_, "Lookup variable");
- __ Mov(x1, Operand(var->name()));
- __ Push(cp, x1); // Context and name.
- Runtime::FunctionId function_id =
- typeof_state == NOT_INSIDE_TYPEOF
- ? Runtime::kLoadLookupSlot
- : Runtime::kLoadLookupSlotNoReferenceError;
- __ CallRuntime(function_id, 2);
- __ Bind(&done);
- context()->Plug(x0);
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- Comment cmnt(masm_, "[ RegExpLiteral");
- Label materialized;
- // Registers will be used as follows:
- // x5 = materialized value (RegExp literal)
- // x4 = JS function, literals array
- // x3 = literal index
- // x2 = RegExp pattern
- // x1 = RegExp flags
- // x0 = RegExp literal clone
- __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Ldr(x4, FieldMemOperand(x10, JSFunction::kLiteralsOffset));
- int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
- __ Ldr(x5, FieldMemOperand(x4, literal_offset));
- __ JumpIfNotRoot(x5, Heap::kUndefinedValueRootIndex, &materialized);
-
- // Create regexp literal using runtime function.
- // Result will be in x0.
- __ Mov(x3, Smi::FromInt(expr->literal_index()));
- __ Mov(x2, Operand(expr->pattern()));
- __ Mov(x1, Operand(expr->flags()));
- __ Push(x4, x3, x2, x1);
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ Mov(x5, x0);
-
- __ Bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ Allocate(size, x0, x2, x3, &runtime_allocate, TAG_OBJECT);
- __ B(&allocated);
-
- __ Bind(&runtime_allocate);
- __ Mov(x10, Smi::FromInt(size));
- __ Push(x5, x10);
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ Pop(x5);
-
- __ Bind(&allocated);
- // After this, registers are used as follows:
- // x0: Newly allocated regexp.
- // x5: Materialized regexp.
- // x10, x11, x12: temps.
- __ CopyFields(x0, x5, CPURegList(x10, x11, x12), size / kPointerSize);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitAccessor(Expression* expression) {
- if (expression == NULL) {
- __ LoadRoot(x10, Heap::kNullValueRootIndex);
- __ Push(x10);
- } else {
- VisitForStackValue(expression);
- }
-}
-
-
-void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
- Comment cmnt(masm_, "[ ObjectLiteral");
-
- Handle<FixedArray> constant_properties = expr->constant_properties();
- __ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Ldr(x3, FieldMemOperand(x3, JSFunction::kLiteralsOffset));
- __ Mov(x2, Smi::FromInt(expr->literal_index()));
- __ Mov(x1, Operand(constant_properties));
- int flags = expr->ComputeFlags();
- __ Mov(x0, Smi::FromInt(flags));
- if (MustCreateObjectLiteralWithRuntime(expr)) {
- __ Push(x3, x2, x1, x0);
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else {
- FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
- __ CallStub(&stub);
- }
- PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
-
- // If result_saved is true the result is on top of the stack. If
- // result_saved is false the result is in x0.
- bool result_saved = false;
-
- AccessorTable accessor_table(zone());
- int property_index = 0;
- // store_slot_index points to the vector IC slot for the next store IC used.
- // ObjectLiteral::ComputeFeedbackRequirements controls the allocation of slots
- // and must be updated if the number of store ICs emitted here changes.
- int store_slot_index = 0;
- for (; property_index < expr->properties()->length(); property_index++) {
- ObjectLiteral::Property* property = expr->properties()->at(property_index);
- if (property->is_computed_name()) break;
- if (property->IsCompileTimeValue()) continue;
-
- Literal* key = property->key()->AsLiteral();
- Expression* value = property->value();
- if (!result_saved) {
- __ Push(x0); // Save result on stack
- result_saved = true;
- }
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- UNREACHABLE();
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value()));
- // Fall through.
- case ObjectLiteral::Property::COMPUTED:
- // It is safe to use [[Put]] here because the boilerplate already
- // contains computed properties with an uninitialized value.
- if (key->value()->IsInternalizedString()) {
- if (property->emit_store()) {
- VisitForAccumulatorValue(value);
- DCHECK(StoreDescriptor::ValueRegister().is(x0));
- __ Mov(StoreDescriptor::NameRegister(), Operand(key->value()));
- __ Peek(StoreDescriptor::ReceiverRegister(), 0);
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
- CallStoreIC();
- } else {
- CallStoreIC(key->LiteralFeedbackId());
- }
- PrepareForBailoutForId(key->id(), NO_REGISTERS);
-
- if (NeedsHomeObject(value)) {
- __ Mov(StoreDescriptor::ReceiverRegister(), x0);
- __ Mov(StoreDescriptor::NameRegister(),
- Operand(isolate()->factory()->home_object_symbol()));
- __ Peek(StoreDescriptor::ValueRegister(), 0);
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
- }
- CallStoreIC();
- }
- } else {
- VisitForEffect(value);
- }
- break;
- }
- __ Peek(x0, 0);
- __ Push(x0);
- VisitForStackValue(key);
- VisitForStackValue(value);
- if (property->emit_store()) {
- EmitSetHomeObjectIfNeeded(
- value, 2, expr->SlotForHomeObject(value, &store_slot_index));
- __ Mov(x0, Smi::FromInt(SLOPPY)); // Language mode
- __ Push(x0);
- __ CallRuntime(Runtime::kSetProperty, 4);
- } else {
- __ Drop(3);
- }
- break;
- case ObjectLiteral::Property::PROTOTYPE:
- DCHECK(property->emit_store());
- // Duplicate receiver on stack.
- __ Peek(x0, 0);
- __ Push(x0);
- VisitForStackValue(value);
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
- break;
- case ObjectLiteral::Property::GETTER:
- if (property->emit_store()) {
- accessor_table.lookup(key)->second->getter = value;
- }
- break;
- case ObjectLiteral::Property::SETTER:
- if (property->emit_store()) {
- accessor_table.lookup(key)->second->setter = value;
- }
- break;
- }
- }
-
- // Emit code to define accessors, using only a single call to the runtime for
- // each pair of corresponding getters and setters.
- for (AccessorTable::Iterator it = accessor_table.begin();
- it != accessor_table.end();
- ++it) {
- __ Peek(x10, 0); // Duplicate receiver.
- __ Push(x10);
- VisitForStackValue(it->first);
- EmitAccessor(it->second->getter);
- EmitSetHomeObjectIfNeeded(
- it->second->getter, 2,
- expr->SlotForHomeObject(it->second->getter, &store_slot_index));
- EmitAccessor(it->second->setter);
- EmitSetHomeObjectIfNeeded(
- it->second->setter, 3,
- expr->SlotForHomeObject(it->second->setter, &store_slot_index));
- __ Mov(x10, Smi::FromInt(NONE));
- __ Push(x10);
- __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
- }
-
- // Object literals have two parts. The "static" part on the left contains no
- // computed property names, and so we can compute its map ahead of time; see
- // runtime.cc::CreateObjectLiteralBoilerplate. The second "dynamic" part
- // starts with the first computed property name, and continues with all
- // properties to its right. All the code from above initializes the static
- // component of the object literal, and arranges for the map of the result to
- // reflect the static order in which the keys appear. For the dynamic
- // properties, we compile them into a series of "SetOwnProperty" runtime
- // calls. This will preserve insertion order.
- for (; property_index < expr->properties()->length(); property_index++) {
- ObjectLiteral::Property* property = expr->properties()->at(property_index);
-
- Expression* value = property->value();
- if (!result_saved) {
- __ Push(x0); // Save result on stack
- result_saved = true;
- }
-
- __ Peek(x10, 0); // Duplicate receiver.
- __ Push(x10);
-
- if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
- DCHECK(!property->is_computed_name());
- VisitForStackValue(value);
- DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
- } else {
- EmitPropertyKey(property, expr->GetIdForProperty(property_index));
- VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(
- value, 2, expr->SlotForHomeObject(value, &store_slot_index));
-
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- case ObjectLiteral::Property::COMPUTED:
- if (property->emit_store()) {
- __ Mov(x0, Smi::FromInt(NONE));
- __ Push(x0);
- __ CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
- } else {
- __ Drop(3);
- }
- break;
-
- case ObjectLiteral::Property::PROTOTYPE:
- UNREACHABLE();
- break;
-
- case ObjectLiteral::Property::GETTER:
- __ Mov(x0, Smi::FromInt(NONE));
- __ Push(x0);
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
- break;
-
- case ObjectLiteral::Property::SETTER:
- __ Mov(x0, Smi::FromInt(NONE));
- __ Push(x0);
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
- break;
- }
- }
- }
-
- if (expr->has_function()) {
- DCHECK(result_saved);
- __ Peek(x0, 0);
- __ Push(x0);
- __ CallRuntime(Runtime::kToFastProperties, 1);
- }
-
- if (result_saved) {
- context()->PlugTOS();
- } else {
- context()->Plug(x0);
- }
-
- // Verify that compilation exactly consumed the number of store ic slots that
- // the ObjectLiteral node had to offer.
- DCHECK(!FLAG_vector_stores || store_slot_index == expr->slot_count());
-}
-
-
-void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
- Comment cmnt(masm_, "[ ArrayLiteral");
-
- expr->BuildConstantElements(isolate());
- Handle<FixedArray> constant_elements = expr->constant_elements();
- bool has_fast_elements =
- IsFastObjectElementsKind(expr->constant_elements_kind());
-
- AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
- if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
- // If the only customer of allocation sites is transitioning, then
- // we can turn it off if we don't have anywhere else to transition to.
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
- }
-
- __ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Ldr(x3, FieldMemOperand(x3, JSFunction::kLiteralsOffset));
- __ Mov(x2, Smi::FromInt(expr->literal_index()));
- __ Mov(x1, Operand(constant_elements));
- if (MustCreateArrayLiteralWithRuntime(expr)) {
- __ Mov(x0, Smi::FromInt(expr->ComputeFlags()));
- __ Push(x3, x2, x1, x0);
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
- } else {
- FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
- __ CallStub(&stub);
- }
- PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
-
- bool result_saved = false; // Is the result saved to the stack?
- ZoneList<Expression*>* subexprs = expr->values();
- int length = subexprs->length();
-
- // Emit code to evaluate all the non-constant subexpressions and to store
- // them into the newly cloned array.
- int array_index = 0;
- for (; array_index < length; array_index++) {
- Expression* subexpr = subexprs->at(array_index);
- if (subexpr->IsSpread()) break;
-
- // If the subexpression is a literal or a simple materialized literal it
- // is already set in the cloned array.
- if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
-
- if (!result_saved) {
- __ Mov(x1, Smi::FromInt(expr->literal_index()));
- __ Push(x0, x1);
- result_saved = true;
- }
- VisitForAccumulatorValue(subexpr);
-
- if (has_fast_elements) {
- int offset = FixedArray::kHeaderSize + (array_index * kPointerSize);
- __ Peek(x6, kPointerSize); // Copy of array literal.
- __ Ldr(x1, FieldMemOperand(x6, JSObject::kElementsOffset));
- __ Str(result_register(), FieldMemOperand(x1, offset));
- // Update the write barrier for the array store.
- __ RecordWriteField(x1, offset, result_register(), x10,
- kLRHasBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
- } else {
- __ Mov(x3, Smi::FromInt(array_index));
- StoreArrayLiteralElementStub stub(isolate());
- __ CallStub(&stub);
- }
-
- PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
- }
-
- // In case the array literal contains spread expressions it has two parts. The
- // first part is the "static" array which has a literal index is handled
- // above. The second part is the part after the first spread expression
- // (inclusive) and these elements gets appended to the array. Note that the
- // number elements an iterable produces is unknown ahead of time.
- if (array_index < length && result_saved) {
- __ Drop(1); // literal index
- __ Pop(x0);
- result_saved = false;
- }
- for (; array_index < length; array_index++) {
- Expression* subexpr = subexprs->at(array_index);
-
- __ Push(x0);
- if (subexpr->IsSpread()) {
- VisitForStackValue(subexpr->AsSpread()->expression());
- __ InvokeBuiltin(Builtins::CONCAT_ITERABLE_TO_ARRAY, CALL_FUNCTION);
- } else {
- VisitForStackValue(subexpr);
- __ CallRuntime(Runtime::kAppendElement, 2);
- }
-
- PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
- }
-
- if (result_saved) {
- __ Drop(1); // literal index
- context()->PlugTOS();
- } else {
- context()->Plug(x0);
- }
-}
-
-
-void FullCodeGenerator::VisitAssignment(Assignment* expr) {
- DCHECK(expr->target()->IsValidReferenceExpression());
-
- Comment cmnt(masm_, "[ Assignment");
- SetExpressionPosition(expr, INSERT_BREAK);
-
- Property* property = expr->target()->AsProperty();
- LhsKind assign_type = Property::GetAssignType(property);
-
- // Evaluate LHS expression.
- switch (assign_type) {
- case VARIABLE:
- // Nothing to do here.
- break;
- case NAMED_PROPERTY:
- if (expr->is_compound()) {
- // We need the receiver both on the stack and in the register.
- VisitForStackValue(property->obj());
- __ Peek(LoadDescriptor::ReceiverRegister(), 0);
- } else {
- VisitForStackValue(property->obj());
- }
- break;
- case NAMED_SUPER_PROPERTY:
- VisitForStackValue(
- property->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
- property->obj()->AsSuperPropertyReference()->home_object());
- __ Push(result_register());
- if (expr->is_compound()) {
- const Register scratch = x10;
- __ Peek(scratch, kPointerSize);
- __ Push(scratch, result_register());
- }
- break;
- case KEYED_SUPER_PROPERTY:
- VisitForStackValue(
- property->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- property->obj()->AsSuperPropertyReference()->home_object());
- VisitForAccumulatorValue(property->key());
- __ Push(result_register());
- if (expr->is_compound()) {
- const Register scratch1 = x10;
- const Register scratch2 = x11;
- __ Peek(scratch1, 2 * kPointerSize);
- __ Peek(scratch2, kPointerSize);
- __ Push(scratch1, scratch2, result_register());
- }
- break;
- case KEYED_PROPERTY:
- if (expr->is_compound()) {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- __ Peek(LoadDescriptor::ReceiverRegister(), 1 * kPointerSize);
- __ Peek(LoadDescriptor::NameRegister(), 0);
- } else {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- }
- break;
- }
-
- // For compound assignments we need another deoptimization point after the
- // variable/property load.
- if (expr->is_compound()) {
- { AccumulatorValueContext context(this);
- switch (assign_type) {
- case VARIABLE:
- EmitVariableLoad(expr->target()->AsVariableProxy());
- PrepareForBailout(expr->target(), TOS_REG);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
- break;
- case NAMED_SUPER_PROPERTY:
- EmitNamedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
- break;
- case KEYED_SUPER_PROPERTY:
- EmitKeyedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
- break;
- }
- }
-
- Token::Value op = expr->binary_op();
- __ Push(x0); // Left operand goes on the stack.
- VisitForAccumulatorValue(expr->value());
-
- AccumulatorValueContext context(this);
- if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr->binary_operation(),
- op,
- expr->target(),
- expr->value());
- } else {
- EmitBinaryOp(expr->binary_operation(), op);
- }
-
- // Deoptimization point in case the binary operation may have side effects.
- PrepareForBailout(expr->binary_operation(), TOS_REG);
- } else {
- VisitForAccumulatorValue(expr->value());
- }
-
- SetExpressionPosition(expr);
-
- // Store the value.
- switch (assign_type) {
- case VARIABLE:
- EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- expr->op(), expr->AssignmentSlot());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(x0);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyAssignment(expr);
- break;
- case NAMED_SUPER_PROPERTY:
- EmitNamedSuperPropertyStore(property);
- context()->Plug(x0);
- break;
- case KEYED_SUPER_PROPERTY:
- EmitKeyedSuperPropertyStore(property);
- context()->Plug(x0);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyAssignment(expr);
- break;
- }
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
- SetExpressionPosition(prop);
- Literal* key = prop->key()->AsLiteral();
- DCHECK(!prop->IsSuperAccess());
-
- __ Mov(LoadDescriptor::NameRegister(), Operand(key->value()));
- __ Mov(LoadDescriptor::SlotRegister(),
- SmiFromSlot(prop->PropertyFeedbackSlot()));
- CallLoadIC(NOT_CONTEXTUAL, language_mode());
-}
-
-
-void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
- // Stack: receiver, home_object.
- SetExpressionPosition(prop);
- Literal* key = prop->key()->AsLiteral();
- DCHECK(!key->value()->IsSmi());
- DCHECK(prop->IsSuperAccess());
-
- __ Push(key->value());
- __ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
- SetExpressionPosition(prop);
- // Call keyed load IC. It has arguments key and receiver in x0 and x1.
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), language_mode()).code();
- __ Mov(LoadDescriptor::SlotRegister(),
- SmiFromSlot(prop->PropertyFeedbackSlot()));
- CallIC(ic);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
- // Stack: receiver, home_object, key.
- SetExpressionPosition(prop);
- __ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
-}
-
-
-void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
- Token::Value op,
- Expression* left_expr,
- Expression* right_expr) {
- Label done, both_smis, stub_call;
-
- // Get the arguments.
- Register left = x1;
- Register right = x0;
- Register result = x0;
- __ Pop(left);
-
- // Perform combined smi check on both operands.
- __ Orr(x10, left, right);
- JumpPatchSite patch_site(masm_);
- patch_site.EmitJumpIfSmi(x10, &both_smis);
-
- __ Bind(&stub_call);
-
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
- {
- Assembler::BlockPoolsScope scope(masm_);
- CallIC(code, expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
- }
- __ B(&done);
-
- __ Bind(&both_smis);
- // Smi case. This code works in the same way as the smi-smi case in the type
- // recording binary operation stub, see
- // BinaryOpStub::GenerateSmiSmiOperation for comments.
- // TODO(all): That doesn't exist any more. Where are the comments?
- //
- // The set of operations that needs to be supported here is controlled by
- // FullCodeGenerator::ShouldInlineSmiCase().
- switch (op) {
- case Token::SAR:
- __ Ubfx(right, right, kSmiShift, 5);
- __ Asr(result, left, right);
- __ Bic(result, result, kSmiShiftMask);
- break;
- case Token::SHL:
- __ Ubfx(right, right, kSmiShift, 5);
- __ Lsl(result, left, right);
- break;
- case Token::SHR:
- // If `left >>> right` >= 0x80000000, the result is not representable in a
- // signed 32-bit smi.
- __ Ubfx(right, right, kSmiShift, 5);
- __ Lsr(x10, left, right);
- __ Tbnz(x10, kXSignBit, &stub_call);
- __ Bic(result, x10, kSmiShiftMask);
- break;
- case Token::ADD:
- __ Adds(x10, left, right);
- __ B(vs, &stub_call);
- __ Mov(result, x10);
- break;
- case Token::SUB:
- __ Subs(x10, left, right);
- __ B(vs, &stub_call);
- __ Mov(result, x10);
- break;
- case Token::MUL: {
- Label not_minus_zero, done;
- STATIC_ASSERT(static_cast<unsigned>(kSmiShift) == (kXRegSizeInBits / 2));
- STATIC_ASSERT(kSmiTag == 0);
- __ Smulh(x10, left, right);
- __ Cbnz(x10, &not_minus_zero);
- __ Eor(x11, left, right);
- __ Tbnz(x11, kXSignBit, &stub_call);
- __ Mov(result, x10);
- __ B(&done);
- __ Bind(&not_minus_zero);
- __ Cls(x11, x10);
- __ Cmp(x11, kXRegSizeInBits - kSmiShift);
- __ B(lt, &stub_call);
- __ SmiTag(result, x10);
- __ Bind(&done);
- break;
- }
- case Token::BIT_OR:
- __ Orr(result, left, right);
- break;
- case Token::BIT_AND:
- __ And(result, left, right);
- break;
- case Token::BIT_XOR:
- __ Eor(result, left, right);
- break;
- default:
- UNREACHABLE();
- }
-
- __ Bind(&done);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
- __ Pop(x1);
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
- JumpPatchSite patch_site(masm_); // Unbound, signals no inlined smi code.
- {
- Assembler::BlockPoolsScope scope(masm_);
- CallIC(code, expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
- }
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit,
- int* used_store_slots) {
- // Constructor is in x0.
- DCHECK(lit != NULL);
- __ push(x0);
-
- // No access check is needed here since the constructor is created by the
- // class literal.
- Register scratch = x1;
- __ Ldr(scratch,
- FieldMemOperand(x0, JSFunction::kPrototypeOrInitialMapOffset));
- __ Push(scratch);
-
- for (int i = 0; i < lit->properties()->length(); i++) {
- ObjectLiteral::Property* property = lit->properties()->at(i);
- Expression* value = property->value();
-
- if (property->is_static()) {
- __ Peek(scratch, kPointerSize); // constructor
- } else {
- __ Peek(scratch, 0); // prototype
- }
- __ Push(scratch);
- EmitPropertyKey(property, lit->GetIdForProperty(i));
-
- // The static prototype property is read only. We handle the non computed
- // property name case in the parser. Since this is the only case where we
- // need to check for an own read only property we special case this so we do
- // not need to do this for every property.
- if (property->is_static() && property->is_computed_name()) {
- __ CallRuntime(Runtime::kThrowIfStaticPrototype, 1);
- __ Push(x0);
- }
-
- VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(value, 2,
- lit->SlotForHomeObject(value, used_store_slots));
-
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- case ObjectLiteral::Property::PROTOTYPE:
- UNREACHABLE();
- case ObjectLiteral::Property::COMPUTED:
- __ CallRuntime(Runtime::kDefineClassMethod, 3);
- break;
-
- case ObjectLiteral::Property::GETTER:
- __ Mov(x0, Smi::FromInt(DONT_ENUM));
- __ Push(x0);
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
- break;
-
- case ObjectLiteral::Property::SETTER:
- __ Mov(x0, Smi::FromInt(DONT_ENUM));
- __ Push(x0);
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
- break;
-
- default:
- UNREACHABLE();
- }
- }
-
- // prototype
- __ CallRuntime(Runtime::kToFastProperties, 1);
-
- // constructor
- __ CallRuntime(Runtime::kToFastProperties, 1);
-}
-
-
-void FullCodeGenerator::EmitAssignment(Expression* expr,
- FeedbackVectorICSlot slot) {
- DCHECK(expr->IsValidReferenceExpression());
-
- Property* prop = expr->AsProperty();
- LhsKind assign_type = Property::GetAssignType(prop);
-
- switch (assign_type) {
- case VARIABLE: {
- Variable* var = expr->AsVariableProxy()->var();
- EffectContext context(this);
- EmitVariableAssignment(var, Token::ASSIGN, slot);
- break;
- }
- case NAMED_PROPERTY: {
- __ Push(x0); // Preserve value.
- VisitForAccumulatorValue(prop->obj());
- // TODO(all): We could introduce a VisitForRegValue(reg, expr) to avoid
- // this copy.
- __ Mov(StoreDescriptor::ReceiverRegister(), x0);
- __ Pop(StoreDescriptor::ValueRegister()); // Restore value.
- __ Mov(StoreDescriptor::NameRegister(),
- Operand(prop->key()->AsLiteral()->value()));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
- CallStoreIC();
- break;
- }
- case NAMED_SUPER_PROPERTY: {
- __ Push(x0);
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- // stack: value, this; x0: home_object
- Register scratch = x10;
- Register scratch2 = x11;
- __ mov(scratch, result_register()); // home_object
- __ Peek(x0, kPointerSize); // value
- __ Peek(scratch2, 0); // this
- __ Poke(scratch2, kPointerSize); // this
- __ Poke(scratch, 0); // home_object
- // stack: this, home_object; x0: value
- EmitNamedSuperPropertyStore(prop);
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- __ Push(x0);
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- VisitForAccumulatorValue(prop->key());
- Register scratch = x10;
- Register scratch2 = x11;
- __ Peek(scratch2, 2 * kPointerSize); // value
- // stack: value, this, home_object; x0: key, x11: value
- __ Peek(scratch, kPointerSize); // this
- __ Poke(scratch, 2 * kPointerSize);
- __ Peek(scratch, 0); // home_object
- __ Poke(scratch, kPointerSize);
- __ Poke(x0, 0);
- __ Move(x0, scratch2);
- // stack: this, home_object, key; x0: value.
- EmitKeyedSuperPropertyStore(prop);
- break;
- }
- case KEYED_PROPERTY: {
- __ Push(x0); // Preserve value.
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ Mov(StoreDescriptor::NameRegister(), x0);
- __ Pop(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::ValueRegister());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic);
- break;
- }
- }
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
- Variable* var, MemOperand location) {
- __ Str(result_register(), location);
- if (var->IsContextSlot()) {
- // RecordWrite may destroy all its register arguments.
- __ Mov(x10, result_register());
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- x1, offset, x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
- }
-}
-
-
-void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorICSlot slot) {
- ASM_LOCATION("FullCodeGenerator::EmitVariableAssignment");
- if (var->IsUnallocatedOrGlobalSlot()) {
- // Global var, const, or let.
- __ Mov(StoreDescriptor::NameRegister(), Operand(var->name()));
- __ Ldr(StoreDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
- CallStoreIC();
-
- } else if (var->mode() == LET && op != Token::INIT_LET) {
- // Non-initializing assignment to let variable needs a write barrier.
- DCHECK(!var->IsLookupSlot());
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- Label assign;
- MemOperand location = VarOperand(var, x1);
- __ Ldr(x10, location);
- __ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &assign);
- __ Mov(x10, Operand(var->name()));
- __ Push(x10);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- // Perform the assignment.
- __ Bind(&assign);
- EmitStoreToStackLocalOrContextSlot(var, location);
-
- } else if (var->mode() == CONST && op != Token::INIT_CONST) {
- // Assignment to const variable needs a write barrier.
- DCHECK(!var->IsLookupSlot());
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- Label const_error;
- MemOperand location = VarOperand(var, x1);
- __ Ldr(x10, location);
- __ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &const_error);
- __ Mov(x10, Operand(var->name()));
- __ Push(x10);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ Bind(&const_error);
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
-
- } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
- if (var->IsLookupSlot()) {
- // Assignment to var.
- __ Mov(x11, Operand(var->name()));
- __ Mov(x10, Smi::FromInt(language_mode()));
- // jssp[0] : mode.
- // jssp[8] : name.
- // jssp[16] : context.
- // jssp[24] : value.
- __ Push(x0, cp, x11, x10);
- __ CallRuntime(Runtime::kStoreLookupSlot, 4);
- } else {
- // Assignment to var or initializing assignment to let/const in harmony
- // mode.
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- MemOperand location = VarOperand(var, x1);
- if (FLAG_debug_code && op == Token::INIT_LET) {
- __ Ldr(x10, location);
- __ CompareRoot(x10, Heap::kTheHoleValueRootIndex);
- __ Check(eq, kLetBindingReInitialization);
- }
- EmitStoreToStackLocalOrContextSlot(var, location);
- }
-
- } else if (op == Token::INIT_CONST_LEGACY) {
- // Const initializers need a write barrier.
- DCHECK(var->mode() == CONST_LEGACY);
- DCHECK(!var->IsParameter()); // No const parameters.
- if (var->IsLookupSlot()) {
- __ Mov(x1, Operand(var->name()));
- __ Push(x0, cp, x1);
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
- } else {
- DCHECK(var->IsStackLocal() || var->IsContextSlot());
- Label skip;
- MemOperand location = VarOperand(var, x1);
- __ Ldr(x10, location);
- __ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &skip);
- EmitStoreToStackLocalOrContextSlot(var, location);
- __ Bind(&skip);
- }
-
- } else {
- DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT_CONST_LEGACY);
- if (is_strict(language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
- }
- // Silently ignore store in sloppy mode.
- }
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
- ASM_LOCATION("FullCodeGenerator::EmitNamedPropertyAssignment");
- // Assignment to a property, using a named store IC.
- Property* prop = expr->target()->AsProperty();
- DCHECK(prop != NULL);
- DCHECK(prop->key()->IsLiteral());
-
- __ Mov(StoreDescriptor::NameRegister(),
- Operand(prop->key()->AsLiteral()->value()));
- __ Pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->AssignmentFeedbackId());
- }
-
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
- // Assignment to named property of super.
- // x0 : value
- // stack : receiver ('this'), home_object
- DCHECK(prop != NULL);
- Literal* key = prop->key()->AsLiteral();
- DCHECK(key != NULL);
-
- __ Push(key->value());
- __ Push(x0);
- __ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy),
- 4);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
- // Assignment to named property of super.
- // x0 : value
- // stack : receiver ('this'), home_object, key
- DCHECK(prop != NULL);
-
- __ Push(x0);
- __ CallRuntime(
- (is_strict(language_mode()) ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy),
- 4);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
- ASM_LOCATION("FullCodeGenerator::EmitKeyedPropertyAssignment");
- // Assignment to a property, using a keyed store IC.
-
- // TODO(all): Could we pass this in registers rather than on the stack?
- __ Pop(StoreDescriptor::NameRegister(), StoreDescriptor::ReceiverRegister());
- DCHECK(StoreDescriptor::ValueRegister().is(x0));
-
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->AssignmentFeedbackId());
- }
-
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::VisitProperty(Property* expr) {
- Comment cmnt(masm_, "[ Property");
- SetExpressionPosition(expr);
- Expression* key = expr->key();
-
- if (key->IsPropertyName()) {
- if (!expr->IsSuperAccess()) {
- VisitForAccumulatorValue(expr->obj());
- __ Move(LoadDescriptor::ReceiverRegister(), x0);
- EmitNamedPropertyLoad(expr);
- } else {
- VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- expr->obj()->AsSuperPropertyReference()->home_object());
- EmitNamedSuperPropertyLoad(expr);
- }
- } else {
- if (!expr->IsSuperAccess()) {
- VisitForStackValue(expr->obj());
- VisitForAccumulatorValue(expr->key());
- __ Move(LoadDescriptor::NameRegister(), x0);
- __ Pop(LoadDescriptor::ReceiverRegister());
- EmitKeyedPropertyLoad(expr);
- } else {
- VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- expr->obj()->AsSuperPropertyReference()->home_object());
- VisitForStackValue(expr->key());
- EmitKeyedSuperPropertyLoad(expr);
- }
- }
- PrepareForBailoutForId(expr->LoadId(), TOS_REG);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::CallIC(Handle<Code> code,
- TypeFeedbackId ast_id) {
- ic_total_count_++;
- // All calls must have a predictable size in full-codegen code to ensure that
- // the debugger can patch them correctly.
- __ Call(code, RelocInfo::CODE_TARGET, ast_id);
-}
-
-
-// Code common for calls using the IC.
-void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
- Expression* callee = expr->expression();
-
- CallICState::CallType call_type =
- callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
-
- // Get the target function.
- if (call_type == CallICState::FUNCTION) {
- { StackValueContext context(this);
- EmitVariableLoad(callee->AsVariableProxy());
- PrepareForBailout(callee, NO_REGISTERS);
- }
- // Push undefined as receiver. This is patched in the method prologue if it
- // is a sloppy mode method.
- {
- UseScratchRegisterScope temps(masm_);
- Register temp = temps.AcquireX();
- __ LoadRoot(temp, Heap::kUndefinedValueRootIndex);
- __ Push(temp);
- }
- } else {
- // Load the function from the receiver.
- DCHECK(callee->IsProperty());
- DCHECK(!callee->AsProperty()->IsSuperAccess());
- __ Peek(LoadDescriptor::ReceiverRegister(), 0);
- EmitNamedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
- // Push the target function under the receiver.
- __ Pop(x10);
- __ Push(x0, x10);
- }
-
- EmitCall(expr, call_type);
-}
-
-
-void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
- Expression* callee = expr->expression();
- DCHECK(callee->IsProperty());
- Property* prop = callee->AsProperty();
- DCHECK(prop->IsSuperAccess());
- SetExpressionPosition(prop);
-
- Literal* key = prop->key()->AsLiteral();
- DCHECK(!key->value()->IsSmi());
-
- // Load the function from the receiver.
- const Register scratch = x10;
- SuperPropertyReference* super_ref =
- callee->AsProperty()->obj()->AsSuperPropertyReference();
- VisitForStackValue(super_ref->home_object());
- VisitForAccumulatorValue(super_ref->this_var());
- __ Push(x0);
- __ Peek(scratch, kPointerSize);
- __ Push(x0, scratch);
- __ Push(key->value());
- __ Push(Smi::FromInt(language_mode()));
-
- // Stack here:
- // - home_object
- // - this (receiver)
- // - this (receiver) <-- LoadFromSuper will pop here and below.
- // - home_object
- // - language_mode
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
-
- // Replace home_object with target function.
- __ Poke(x0, kPointerSize);
-
- // Stack here:
- // - target function
- // - this (receiver)
- EmitCall(expr, CallICState::METHOD);
-}
-
-
-// Code common for calls using the IC.
-void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
- Expression* key) {
- // Load the key.
- VisitForAccumulatorValue(key);
-
- Expression* callee = expr->expression();
-
- // Load the function from the receiver.
- DCHECK(callee->IsProperty());
- __ Peek(LoadDescriptor::ReceiverRegister(), 0);
- __ Move(LoadDescriptor::NameRegister(), x0);
- EmitKeyedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
-
- // Push the target function under the receiver.
- __ Pop(x10);
- __ Push(x0, x10);
-
- EmitCall(expr, CallICState::METHOD);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
- Expression* callee = expr->expression();
- DCHECK(callee->IsProperty());
- Property* prop = callee->AsProperty();
- DCHECK(prop->IsSuperAccess());
- SetExpressionPosition(prop);
-
- // Load the function from the receiver.
- const Register scratch = x10;
- SuperPropertyReference* super_ref =
- callee->AsProperty()->obj()->AsSuperPropertyReference();
- VisitForStackValue(super_ref->home_object());
- VisitForAccumulatorValue(super_ref->this_var());
- __ Push(x0);
- __ Peek(scratch, kPointerSize);
- __ Push(x0, scratch);
- VisitForStackValue(prop->key());
- __ Push(Smi::FromInt(language_mode()));
-
- // Stack here:
- // - home_object
- // - this (receiver)
- // - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
- // - home_object
- // - key
- // - language_mode
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
-
- // Replace home_object with target function.
- __ Poke(x0, kPointerSize);
-
- // Stack here:
- // - target function
- // - this (receiver)
- EmitCall(expr, CallICState::METHOD);
-}
-
-
-void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
- // Load the arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- SetExpressionPosition(expr);
-
- Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
- __ Mov(x3, SmiFromSlot(expr->CallFeedbackICSlot()));
- __ Peek(x1, (arg_count + 1) * kXRegSize);
- // Don't assign a type feedback id to the IC, since type feedback is provided
- // by the vector above.
- CallIC(ic);
-
- RecordJSReturnSite(expr);
- // Restore context register.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, x0);
-}
-
-
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
- ASM_LOCATION("FullCodeGenerator::EmitResolvePossiblyDirectEval");
- // Prepare to push a copy of the first argument or undefined if it doesn't
- // exist.
- if (arg_count > 0) {
- __ Peek(x9, arg_count * kXRegSize);
- } else {
- __ LoadRoot(x9, Heap::kUndefinedValueRootIndex);
- }
-
- __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-
- // Prepare to push the language mode.
- __ Mov(x11, Smi::FromInt(language_mode()));
- // Prepare to push the start position of the scope the calls resides in.
- __ Mov(x12, Smi::FromInt(scope()->start_position()));
-
- // Push.
- __ Push(x9, x10, x11, x12);
-
- // Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
-}
-
-
-void FullCodeGenerator::EmitInitializeThisAfterSuper(
- SuperCallReference* super_ref, FeedbackVectorICSlot slot) {
- Variable* this_var = super_ref->this_var()->var();
- GetVar(x1, this_var);
- Label uninitialized_this;
- __ JumpIfRoot(x1, Heap::kTheHoleValueRootIndex, &uninitialized_this);
- __ Mov(x0, Operand(this_var->name()));
- __ Push(x0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ bind(&uninitialized_this);
-
- EmitVariableAssignment(this_var, Token::INIT_CONST, slot);
-}
-
-
-// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
-void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
- VariableProxy* callee = expr->expression()->AsVariableProxy();
- if (callee->var()->IsLookupSlot()) {
- Label slow, done;
- SetExpressionPosition(callee);
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(callee, NOT_INSIDE_TYPEOF, &slow, &done);
-
- __ Bind(&slow);
- // Call the runtime to find the function to call (returned in x0)
- // and the object holding it (returned in x1).
- __ Mov(x10, Operand(callee->name()));
- __ Push(context_register(), x10);
- __ CallRuntime(Runtime::kLoadLookupSlot, 2);
- __ Push(x0, x1); // Receiver, function.
- PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
-
- // If fast case code has been generated, emit code to push the
- // function and receiver and have the slow path jump around this
- // code.
- if (done.is_linked()) {
- Label call;
- __ B(&call);
- __ Bind(&done);
- // Push function.
- // The receiver is implicitly the global receiver. Indicate this
- // by passing the undefined to the call function stub.
- __ LoadRoot(x1, Heap::kUndefinedValueRootIndex);
- __ Push(x0, x1);
- __ Bind(&call);
- }
- } else {
- VisitForStackValue(callee);
- // refEnv.WithBaseObject()
- __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
- __ Push(x10); // Reserved receiver slot.
- }
-}
-
-
-void FullCodeGenerator::VisitCall(Call* expr) {
-#ifdef DEBUG
- // We want to verify that RecordJSReturnSite gets called on all paths
- // through this function. Avoid early returns.
- expr->return_is_recorded_ = false;
-#endif
-
- Comment cmnt(masm_, "[ Call");
- Expression* callee = expr->expression();
- Call::CallType call_type = expr->GetCallType(isolate());
-
- if (call_type == Call::POSSIBLY_EVAL_CALL) {
- // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
- // to resolve the function we need to call. Then we call the resolved
- // function using the given arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- PushCalleeAndWithBaseObject(expr);
-
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ Peek(x10, (arg_count + 1) * kPointerSize);
- __ Push(x10);
- EmitResolvePossiblyDirectEval(arg_count);
-
- // Touch up the stack with the resolved function.
- __ Poke(x0, (arg_count + 1) * kPointerSize);
-
- PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
-
- // Record source position for debugger.
- SetExpressionPosition(expr);
-
- // Call the evaluated function.
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
- __ Peek(x1, (arg_count + 1) * kXRegSize);
- __ CallStub(&stub);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, x0);
-
- } else if (call_type == Call::GLOBAL_CALL) {
- EmitCallWithLoadIC(expr);
-
- } else if (call_type == Call::LOOKUP_SLOT_CALL) {
- // Call to a lookup slot (dynamically introduced variable).
- PushCalleeAndWithBaseObject(expr);
- EmitCall(expr);
- } else if (call_type == Call::PROPERTY_CALL) {
- Property* property = callee->AsProperty();
- bool is_named_call = property->key()->IsPropertyName();
- if (property->IsSuperAccess()) {
- if (is_named_call) {
- EmitSuperCallWithLoadIC(expr);
- } else {
- EmitKeyedSuperCallWithLoadIC(expr);
- }
- } else {
- VisitForStackValue(property->obj());
- if (is_named_call) {
- EmitCallWithLoadIC(expr);
- } else {
- EmitKeyedCallWithLoadIC(expr, property->key());
- }
- }
- } else if (call_type == Call::SUPER_CALL) {
- EmitSuperConstructorCall(expr);
- } else {
- DCHECK(call_type == Call::OTHER_CALL);
- // Call to an arbitrary expression not handled specially above.
- VisitForStackValue(callee);
- __ LoadRoot(x1, Heap::kUndefinedValueRootIndex);
- __ Push(x1);
- // Emit function call.
- EmitCall(expr);
- }
-
-#ifdef DEBUG
- // RecordJSReturnSite should have been called.
- DCHECK(expr->return_is_recorded_);
-#endif
-}
-
-
-void FullCodeGenerator::VisitCallNew(CallNew* expr) {
- Comment cmnt(masm_, "[ CallNew");
- // According to ECMA-262, section 11.2.2, page 44, the function
- // expression in new calls must be evaluated before the
- // arguments.
-
- // Push constructor on the stack. If it's not a function it's used as
- // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
- // ignored.
- DCHECK(!expr->expression()->IsSuperPropertyReference());
- VisitForStackValue(expr->expression());
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- SetExpressionPosition(expr);
-
- // Load function and argument count into x1 and x0.
- __ Mov(x0, arg_count);
- __ Peek(x1, arg_count * kXRegSize);
-
- // Record call targets in unoptimized code.
- if (FLAG_pretenuring_call_new) {
- EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
- DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
- expr->CallNewFeedbackSlot().ToInt() + 1);
- }
-
- __ LoadObject(x2, FeedbackVector());
- __ Mov(x3, SmiFromSlot(expr->CallNewFeedbackSlot()));
-
- CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
- PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
- SuperCallReference* super_call_ref =
- expr->expression()->AsSuperCallReference();
- DCHECK_NOT_NULL(super_call_ref);
-
- VariableProxy* new_target_proxy = super_call_ref->new_target_var();
- VisitForStackValue(new_target_proxy);
-
- EmitLoadSuperConstructor(super_call_ref);
- __ push(result_register());
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- SetExpressionPosition(expr);
-
- // Load function and argument count into x1 and x0.
- __ Mov(x0, arg_count);
- __ Peek(x1, arg_count * kXRegSize);
-
- // Record call targets in unoptimized code.
- if (FLAG_pretenuring_call_new) {
- UNREACHABLE();
- /* TODO(dslomov): support pretenuring.
- EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
- DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
- expr->CallNewFeedbackSlot().ToInt() + 1);
- */
- }
-
- __ LoadObject(x2, FeedbackVector());
- __ Mov(x3, SmiFromSlot(expr->CallFeedbackSlot()));
-
- CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
-
- __ Drop(1);
-
- RecordJSReturnSite(expr);
-
- EmitInitializeThisAfterSuper(super_call_ref, expr->CallFeedbackICSlot());
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ TestAndSplit(x0, kSmiTagMask, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- uint64_t sign_mask = V8_UINT64_C(1) << (kSmiShift + kSmiValueSize - 1);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ TestAndSplit(x0, kSmiTagMask | sign_mask, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(x0, if_false);
- __ JumpIfRoot(x0, Heap::kNullValueRootIndex, if_true);
- __ Ldr(x10, FieldMemOperand(x0, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined when tested with typeof.
- __ Ldrb(x11, FieldMemOperand(x10, Map::kBitFieldOffset));
- __ Tbnz(x11, Map::kIsUndetectable, if_false);
- __ Ldrb(x12, FieldMemOperand(x10, Map::kInstanceTypeOffset));
- __ Cmp(x12, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ B(lt, if_false);
- __ Cmp(x12, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(le, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(x0, if_false);
- __ CompareObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(ge, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
- ASM_LOCATION("FullCodeGenerator::EmitIsUndetectableObject");
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(x0, if_false);
- __ Ldr(x10, FieldMemOperand(x0, HeapObject::kMapOffset));
- __ Ldrb(x11, FieldMemOperand(x10, Map::kBitFieldOffset));
- __ Tst(x11, 1 << Map::kIsUndetectable);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(ne, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
- CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false, skip_lookup;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- Register object = x0;
- __ AssertNotSmi(object);
-
- Register map = x10;
- Register bitfield2 = x11;
- __ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
- __ Ldrb(bitfield2, FieldMemOperand(map, Map::kBitField2Offset));
- __ Tbnz(bitfield2, Map::kStringWrapperSafeForDefaultValueOf, &skip_lookup);
-
- // Check for fast case object. Generate false result for slow case object.
- Register props = x12;
- Register props_map = x12;
- Register hash_table_map = x13;
- __ Ldr(props, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ Ldr(props_map, FieldMemOperand(props, HeapObject::kMapOffset));
- __ LoadRoot(hash_table_map, Heap::kHashTableMapRootIndex);
- __ Cmp(props_map, hash_table_map);
- __ B(eq, if_false);
-
- // Look for valueOf name in the descriptor array, and indicate false if found.
- // Since we omit an enumeration index check, if it is added via a transition
- // that shares its descriptor array, this is a false positive.
- Label loop, done;
-
- // Skip loop if no descriptors are valid.
- Register descriptors = x12;
- Register descriptors_length = x13;
- __ NumberOfOwnDescriptors(descriptors_length, map);
- __ Cbz(descriptors_length, &done);
-
- __ LoadInstanceDescriptors(map, descriptors);
-
- // Calculate the end of the descriptor array.
- Register descriptors_end = x14;
- __ Mov(x15, DescriptorArray::kDescriptorSize);
- __ Mul(descriptors_length, descriptors_length, x15);
- // Calculate location of the first key name.
- __ Add(descriptors, descriptors,
- DescriptorArray::kFirstOffset - kHeapObjectTag);
- // Calculate the end of the descriptor array.
- __ Add(descriptors_end, descriptors,
- Operand(descriptors_length, LSL, kPointerSizeLog2));
-
- // Loop through all the keys in the descriptor array. If one of these is the
- // string "valueOf" the result is false.
- Register valueof_string = x1;
- int descriptor_size = DescriptorArray::kDescriptorSize * kPointerSize;
- __ Mov(valueof_string, Operand(isolate()->factory()->value_of_string()));
- __ Bind(&loop);
- __ Ldr(x15, MemOperand(descriptors, descriptor_size, PostIndex));
- __ Cmp(x15, valueof_string);
- __ B(eq, if_false);
- __ Cmp(descriptors, descriptors_end);
- __ B(ne, &loop);
-
- __ Bind(&done);
-
- // Set the bit in the map to indicate that there is no local valueOf field.
- __ Ldrb(x2, FieldMemOperand(map, Map::kBitField2Offset));
- __ Orr(x2, x2, 1 << Map::kStringWrapperSafeForDefaultValueOf);
- __ Strb(x2, FieldMemOperand(map, Map::kBitField2Offset));
-
- __ Bind(&skip_lookup);
-
- // If a valueOf property is not found on the object check that its prototype
- // is the unmodified String prototype. If not result is false.
- Register prototype = x1;
- Register global_idx = x2;
- Register native_context = x2;
- Register string_proto = x3;
- Register proto_map = x4;
- __ Ldr(prototype, FieldMemOperand(map, Map::kPrototypeOffset));
- __ JumpIfSmi(prototype, if_false);
- __ Ldr(proto_map, FieldMemOperand(prototype, HeapObject::kMapOffset));
- __ Ldr(global_idx, GlobalObjectMemOperand());
- __ Ldr(native_context,
- FieldMemOperand(global_idx, GlobalObject::kNativeContextOffset));
- __ Ldr(string_proto,
- ContextMemOperand(native_context,
- Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ Cmp(proto_map, string_proto);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(x0, if_false);
- __ CompareObjectType(x0, x10, x11, JS_FUNCTION_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- // Only a HeapNumber can be -0.0, so return false if we have something else.
- __ JumpIfNotHeapNumber(x0, if_false, DO_SMI_CHECK);
-
- // Test the bit pattern.
- __ Ldr(x10, FieldMemOperand(x0, HeapNumber::kValueOffset));
- __ Cmp(x10, 1); // Set V on 0x8000000000000000.
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(vs, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(x0, if_false);
- __ CompareObjectType(x0, x10, x11, JS_ARRAY_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsTypedArray(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- __ JumpIfSmi(x0, if_false);
- __ CompareObjectType(x0, x10, x11, JS_TYPED_ARRAY_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(x0, if_false);
- __ CompareObjectType(x0, x10, x11, JS_REGEXP_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- __ JumpIfSmi(x0, if_false);
- Register map = x10;
- Register type_reg = x11;
- __ Ldr(map, FieldMemOperand(x0, HeapObject::kMapOffset));
- __ Ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Sub(type_reg, type_reg, Operand(FIRST_JS_PROXY_TYPE));
- __ Cmp(type_reg, Operand(LAST_JS_PROXY_TYPE - FIRST_JS_PROXY_TYPE));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(ls, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
- DCHECK(expr->arguments()->length() == 0);
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- // Get the frame pointer for the calling frame.
- __ Ldr(x2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ Ldr(x1, MemOperand(x2, StandardFrameConstants::kContextOffset));
- __ Cmp(x1, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ B(ne, &check_frame_marker);
- __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ Bind(&check_frame_marker);
- __ Ldr(x1, MemOperand(x2, StandardFrameConstants::kMarkerOffset));
- __ Cmp(x1, Smi::FromInt(StackFrame::CONSTRUCT));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- // Load the two objects into registers and perform the comparison.
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ Pop(x1);
- __ Cmp(x0, x1);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- // ArgumentsAccessStub expects the key in x1.
- VisitForAccumulatorValue(args->at(0));
- __ Mov(x1, x0);
- __ Mov(x0, Smi::FromInt(info_->scope()->num_parameters()));
- ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
- __ CallStub(&stub);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
- DCHECK(expr->arguments()->length() == 0);
- Label exit;
- // Get the number of formal parameters.
- __ Mov(x0, Smi::FromInt(info_->scope()->num_parameters()));
-
- // Check if the calling frame is an arguments adaptor frame.
- __ Ldr(x12, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(x13, MemOperand(x12, StandardFrameConstants::kContextOffset));
- __ Cmp(x13, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ B(ne, &exit);
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame.
- __ Ldr(x0, MemOperand(x12, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- __ Bind(&exit);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
- ASM_LOCATION("FullCodeGenerator::EmitClassOf");
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
- Label done, null, function, non_function_constructor;
-
- VisitForAccumulatorValue(args->at(0));
-
- // If the object is a smi, we return null.
- __ JumpIfSmi(x0, &null);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- // Assume that there are only two callable types, and one of them is at
- // either end of the type range for JS object types. Saves extra comparisons.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CompareObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE);
- // x10: object's map.
- // x11: object's type.
- __ B(lt, &null);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- __ B(eq, &function);
-
- __ Cmp(x11, LAST_SPEC_OBJECT_TYPE);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- __ B(eq, &function);
- // Assume that there is no larger type.
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
-
- // Check if the constructor in the map is a JS function.
- Register instance_type = x14;
- __ GetMapConstructor(x12, x10, x13, instance_type);
- __ Cmp(instance_type, JS_FUNCTION_TYPE);
- __ B(ne, &non_function_constructor);
-
- // x12 now contains the constructor function. Grab the
- // instance class name from there.
- __ Ldr(x13, FieldMemOperand(x12, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(x0,
- FieldMemOperand(x13, SharedFunctionInfo::kInstanceClassNameOffset));
- __ B(&done);
-
- // Functions have class 'Function'.
- __ Bind(&function);
- __ LoadRoot(x0, Heap::kFunction_stringRootIndex);
- __ B(&done);
-
- // Objects with a non-function constructor have class 'Object'.
- __ Bind(&non_function_constructor);
- __ LoadRoot(x0, Heap::kObject_stringRootIndex);
- __ B(&done);
-
- // Non-JS objects have class null.
- __ Bind(&null);
- __ LoadRoot(x0, Heap::kNullValueRootIndex);
-
- // All done.
- __ Bind(&done);
-
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- SubStringStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallStub(&stub);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- RegExpExecStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 4);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- VisitForStackValue(args->at(3));
- __ CallStub(&stub);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
- ASM_LOCATION("FullCodeGenerator::EmitValueOf");
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Label done;
- // If the object is a smi return the object.
- __ JumpIfSmi(x0, &done);
- // If the object is not a value type, return the object.
- __ JumpIfNotObjectType(x0, x10, x11, JS_VALUE_TYPE, &done);
- __ Ldr(x0, FieldMemOperand(x0, JSValue::kValueOffset));
-
- __ Bind(&done);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = nullptr;
- Label* if_false = nullptr;
- Label* fall_through = nullptr;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- __ JumpIfSmi(x0, if_false);
- __ CompareObjectType(x0, x10, x11, JS_DATE_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- DCHECK_NOT_NULL(args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Register object = x0;
- Register result = x0;
- Register stamp_addr = x10;
- Register stamp_cache = x11;
-
- if (index->value() == 0) {
- __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ Mov(stamp_addr, stamp);
- __ Ldr(stamp_addr, MemOperand(stamp_addr));
- __ Ldr(stamp_cache, FieldMemOperand(object, JSDate::kCacheStampOffset));
- __ Cmp(stamp_addr, stamp_cache);
- __ B(ne, &runtime);
- __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ B(&done);
- }
-
- __ Bind(&runtime);
- __ Mov(x1, index);
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ Bind(&done);
- }
-
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(3, args->length());
-
- Register string = x0;
- Register index = x1;
- Register value = x2;
- Register scratch = x10;
-
- VisitForStackValue(args->at(0)); // index
- VisitForStackValue(args->at(1)); // value
- VisitForAccumulatorValue(args->at(2)); // string
- __ Pop(value, index);
-
- if (FLAG_debug_code) {
- __ AssertSmi(value, kNonSmiValue);
- __ AssertSmi(index, kNonSmiIndex);
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- __ EmitSeqStringSetCharCheck(string, index, kIndexIsSmi, scratch,
- one_byte_seq_type);
- }
-
- __ Add(scratch, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ SmiUntag(value);
- __ SmiUntag(index);
- __ Strb(value, MemOperand(scratch, index));
- context()->Plug(string);
-}
-
-
-void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(3, args->length());
-
- Register string = x0;
- Register index = x1;
- Register value = x2;
- Register scratch = x10;
-
- VisitForStackValue(args->at(0)); // index
- VisitForStackValue(args->at(1)); // value
- VisitForAccumulatorValue(args->at(2)); // string
- __ Pop(value, index);
-
- if (FLAG_debug_code) {
- __ AssertSmi(value, kNonSmiValue);
- __ AssertSmi(index, kNonSmiIndex);
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ EmitSeqStringSetCharCheck(string, index, kIndexIsSmi, scratch,
- two_byte_seq_type);
- }
-
- __ Add(scratch, string, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
- __ SmiUntag(value);
- __ SmiUntag(index);
- __ Strh(value, MemOperand(scratch, index, LSL, 1));
- context()->Plug(string);
-}
-
-
-void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
- // Load the arguments on the stack and call the MathPow stub.
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- MathPowStub stub(isolate(), MathPowStub::ON_STACK);
- __ CallStub(&stub);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- VisitForStackValue(args->at(0)); // Load the object.
- VisitForAccumulatorValue(args->at(1)); // Load the value.
- __ Pop(x1);
- // x0 = value.
- // x1 = object.
-
- Label done;
- // If the object is a smi, return the value.
- __ JumpIfSmi(x1, &done);
-
- // If the object is not a value type, return the value.
- __ JumpIfNotObjectType(x1, x10, x11, JS_VALUE_TYPE, &done);
-
- // Store the value.
- __ Str(x0, FieldMemOperand(x1, JSValue::kValueOffset));
- // Update the write barrier. Save the value as it will be
- // overwritten by the write barrier code and is needed afterward.
- __ Mov(x10, x0);
- __ RecordWriteField(
- x1, JSValue::kValueOffset, x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
-
- __ Bind(&done);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(args->length(), 1);
-
- // Load the argument into x0 and call the stub.
- VisitForAccumulatorValue(args->at(0));
-
- NumberToStringStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label done;
- Register code = x0;
- Register result = x1;
-
- StringCharFromCodeGenerator generator(code, result);
- generator.GenerateFast(masm_);
- __ B(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ Bind(&done);
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Register object = x1;
- Register index = x0;
- Register result = x3;
-
- __ Pop(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharCodeAtGenerator generator(object,
- index,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ B(&done);
-
- __ Bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return NaN.
- __ LoadRoot(result, Heap::kNanValueRootIndex);
- __ B(&done);
-
- __ Bind(&need_conversion);
- // Load the undefined value into the result register, which will
- // trigger conversion.
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ B(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
-
- __ Bind(&done);
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Register object = x1;
- Register index = x0;
- Register result = x0;
-
- __ Pop(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharAtGenerator generator(object,
- index,
- x3,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ B(&done);
-
- __ Bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ LoadRoot(result, Heap::kempty_stringRootIndex);
- __ B(&done);
-
- __ Bind(&need_conversion);
- // Move smi zero into the result register, which will trigger conversion.
- __ Mov(result, Smi::FromInt(0));
- __ B(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
-
- __ Bind(&done);
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
- ASM_LOCATION("FullCodeGenerator::EmitStringAdd");
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- __ Pop(x1);
- StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
- __ CallStub(&stub);
-
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringCompareStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
- ASM_LOCATION("FullCodeGenerator::EmitCallFunction");
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() >= 2);
-
- int arg_count = args->length() - 2; // 2 ~ receiver and function.
- for (int i = 0; i < arg_count + 1; i++) {
- VisitForStackValue(args->at(i));
- }
- VisitForAccumulatorValue(args->last()); // Function.
-
- Label runtime, done;
- // Check for non-function argument (including proxy).
- __ JumpIfSmi(x0, &runtime);
- __ JumpIfNotObjectType(x0, x1, x1, JS_FUNCTION_TYPE, &runtime);
-
- // InvokeFunction requires the function in x1. Move it in there.
- __ Mov(x1, x0);
- ParameterCount count(arg_count);
- __ InvokeFunction(x1, count, CALL_FUNCTION, NullCallWrapper());
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ B(&done);
-
- __ Bind(&runtime);
- __ Push(x0);
- __ CallRuntime(Runtime::kCall, args->length());
- __ Bind(&done);
-
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- // new.target
- VisitForStackValue(args->at(0));
-
- // .this_function
- VisitForStackValue(args->at(1));
- __ CallRuntime(Runtime::kGetPrototype, 1);
- __ Push(result_register());
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, args_set_up, runtime;
- __ Ldr(x11, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(x12, MemOperand(x11, StandardFrameConstants::kContextOffset));
- __ Cmp(x12, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ B(eq, &adaptor_frame);
- // default constructor has no arguments, so no adaptor frame means no args.
- __ Mov(x0, Operand(0));
- __ B(&args_set_up);
-
- // Copy arguments from adaptor frame.
- {
- __ bind(&adaptor_frame);
- __ Ldr(x1, MemOperand(x11, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(x1, x1);
-
- __ Mov(x0, x1);
-
- // Get arguments pointer in x11.
- __ Add(x11, x11, Operand(x1, LSL, kPointerSizeLog2));
- __ Add(x11, x11, StandardFrameConstants::kCallerSPOffset);
- Label loop;
- __ bind(&loop);
- // Pre-decrement x11 with kPointerSize on each iteration.
- // Pre-decrement in order to skip receiver.
- __ Ldr(x10, MemOperand(x11, -kPointerSize, PreIndex));
- __ Push(x10);
- __ Sub(x1, x1, Operand(1));
- __ Cbnz(x1, &loop);
- }
-
- __ bind(&args_set_up);
- __ Peek(x1, Operand(x0, LSL, kPointerSizeLog2));
- __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
-
- CallConstructStub stub(isolate(), SUPER_CONSTRUCTOR_CALL);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
-
- __ Drop(1);
-
- context()->Plug(result_register());
-}
-
-
-void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
- RegExpConstructResultStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForAccumulatorValue(args->at(2));
- __ Pop(x1, x2);
- __ CallStub(&stub);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
- DCHECK_NOT_NULL(args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
-
- Handle<FixedArray> jsfunction_result_caches(
- isolate()->native_context()->jsfunction_result_caches());
- if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort(kAttemptToUseUndefinedCache);
- __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
- context()->Plug(x0);
- return;
- }
-
- VisitForAccumulatorValue(args->at(1));
-
- Register key = x0;
- Register cache = x1;
- __ Ldr(cache, GlobalObjectMemOperand());
- __ Ldr(cache, FieldMemOperand(cache, GlobalObject::kNativeContextOffset));
- __ Ldr(cache, ContextMemOperand(cache,
- Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ Ldr(cache,
- FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
-
- Label done;
- __ Ldrsw(x2, UntagSmiFieldMemOperand(cache,
- JSFunctionResultCache::kFingerOffset));
- __ Add(x3, cache, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Add(x3, x3, Operand(x2, LSL, kPointerSizeLog2));
-
- // Load the key and data from the cache.
- __ Ldp(x2, x3, MemOperand(x3));
-
- __ Cmp(key, x2);
- __ CmovX(x0, x3, eq);
- __ B(eq, &done);
-
- // Call runtime to perform the lookup.
- __ Push(cache, key);
- __ CallRuntime(Runtime::kGetFromCacheRT, 2);
-
- __ Bind(&done);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ Ldr(x10, FieldMemOperand(x0, String::kHashFieldOffset));
- __ Tst(x10, String::kContainsCachedArrayIndexMask);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
- VisitForAccumulatorValue(args->at(0));
-
- __ AssertString(x0);
-
- __ Ldr(x10, FieldMemOperand(x0, String::kHashFieldOffset));
- __ IndexFromHash(x10, x0);
-
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
- ASM_LOCATION("FullCodeGenerator::EmitFastOneByteArrayJoin");
-
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- VisitForStackValue(args->at(1));
- VisitForAccumulatorValue(args->at(0));
-
- Register array = x0;
- Register result = x0;
- Register elements = x1;
- Register element = x2;
- Register separator = x3;
- Register array_length = x4;
- Register result_pos = x5;
- Register map = x6;
- Register string_length = x10;
- Register elements_end = x11;
- Register string = x12;
- Register scratch1 = x13;
- Register scratch2 = x14;
- Register scratch3 = x7;
- Register separator_length = x15;
-
- Label bailout, done, one_char_separator, long_separator,
- non_trivial_array, not_size_one_array, loop,
- empty_separator_loop, one_char_separator_loop,
- one_char_separator_loop_entry, long_separator_loop;
-
- // The separator operand is on the stack.
- __ Pop(separator);
-
- // Check that the array is a JSArray.
- __ JumpIfSmi(array, &bailout);
- __ JumpIfNotObjectType(array, map, scratch1, JS_ARRAY_TYPE, &bailout);
-
- // Check that the array has fast elements.
- __ CheckFastElements(map, scratch1, &bailout);
-
- // If the array has length zero, return the empty string.
- // Load and untag the length of the array.
- // It is an unsigned value, so we can skip sign extension.
- // We assume little endianness.
- __ Ldrsw(array_length,
- UntagSmiFieldMemOperand(array, JSArray::kLengthOffset));
- __ Cbnz(array_length, &non_trivial_array);
- __ LoadRoot(result, Heap::kempty_stringRootIndex);
- __ B(&done);
-
- __ Bind(&non_trivial_array);
- // Get the FixedArray containing array's elements.
- __ Ldr(elements, FieldMemOperand(array, JSArray::kElementsOffset));
-
- // Check that all array elements are sequential one-byte strings, and
- // accumulate the sum of their lengths.
- __ Mov(string_length, 0);
- __ Add(element, elements, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
- // Loop condition: while (element < elements_end).
- // Live values in registers:
- // elements: Fixed array of strings.
- // array_length: Length of the fixed array of strings (not smi)
- // separator: Separator string
- // string_length: Accumulated sum of string lengths (not smi).
- // element: Current array element.
- // elements_end: Array end.
- if (FLAG_debug_code) {
- __ Cmp(array_length, 0);
- __ Assert(gt, kNoEmptyArraysHereInEmitFastOneByteArrayJoin);
- }
- __ Bind(&loop);
- __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ JumpIfSmi(string, &bailout);
- __ Ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
- __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
- __ Ldrsw(scratch1,
- UntagSmiFieldMemOperand(string, SeqOneByteString::kLengthOffset));
- __ Adds(string_length, string_length, scratch1);
- __ B(vs, &bailout);
- __ Cmp(element, elements_end);
- __ B(lt, &loop);
-
- // If array_length is 1, return elements[0], a string.
- __ Cmp(array_length, 1);
- __ B(ne, &not_size_one_array);
- __ Ldr(result, FieldMemOperand(elements, FixedArray::kHeaderSize));
- __ B(&done);
-
- __ Bind(&not_size_one_array);
-
- // Live values in registers:
- // separator: Separator string
- // array_length: Length of the array (not smi).
- // string_length: Sum of string lengths (not smi).
- // elements: FixedArray of strings.
-
- // Check that the separator is a flat one-byte string.
- __ JumpIfSmi(separator, &bailout);
- __ Ldr(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
- __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
-
- // Add (separator length times array_length) - separator length to the
- // string_length to get the length of the result string.
- // Load the separator length as untagged.
- // We assume little endianness, and that the length is positive.
- __ Ldrsw(separator_length,
- UntagSmiFieldMemOperand(separator,
- SeqOneByteString::kLengthOffset));
- __ Sub(string_length, string_length, separator_length);
- __ Umaddl(string_length, array_length.W(), separator_length.W(),
- string_length);
-
- // Get first element in the array.
- __ Add(element, elements, FixedArray::kHeaderSize - kHeapObjectTag);
- // Live values in registers:
- // element: First array element
- // separator: Separator string
- // string_length: Length of result string (not smi)
- // array_length: Length of the array (not smi).
- __ AllocateOneByteString(result, string_length, scratch1, scratch2, scratch3,
- &bailout);
-
- // Prepare for looping. Set up elements_end to end of the array. Set
- // result_pos to the position of the result where to write the first
- // character.
- // TODO(all): useless unless AllocateOneByteString trashes the register.
- __ Add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
- __ Add(result_pos, result, SeqOneByteString::kHeaderSize - kHeapObjectTag);
-
- // Check the length of the separator.
- __ Cmp(separator_length, 1);
- __ B(eq, &one_char_separator);
- __ B(gt, &long_separator);
-
- // Empty separator case
- __ Bind(&empty_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
-
- // Copy next array element to the result.
- __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ Ldrsw(string_length,
- UntagSmiFieldMemOperand(string, String::kLengthOffset));
- __ Add(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ CopyBytes(result_pos, string, string_length, scratch1);
- __ Cmp(element, elements_end);
- __ B(lt, &empty_separator_loop); // End while (element < elements_end).
- __ B(&done);
-
- // One-character separator case
- __ Bind(&one_char_separator);
- // Replace separator with its one-byte character value.
- __ Ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator
- __ B(&one_char_separator_loop_entry);
-
- __ Bind(&one_char_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
- // separator: Single separator one-byte char (in lower byte).
-
- // Copy the separator character to the result.
- __ Strb(separator, MemOperand(result_pos, 1, PostIndex));
-
- // Copy next array element to the result.
- __ Bind(&one_char_separator_loop_entry);
- __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ Ldrsw(string_length,
- UntagSmiFieldMemOperand(string, String::kLengthOffset));
- __ Add(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ CopyBytes(result_pos, string, string_length, scratch1);
- __ Cmp(element, elements_end);
- __ B(lt, &one_char_separator_loop); // End while (element < elements_end).
- __ B(&done);
-
- // Long separator case (separator is more than one character). Entry is at the
- // label long_separator below.
- __ Bind(&long_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
- // separator: Separator string.
-
- // Copy the separator to the result.
- // TODO(all): hoist next two instructions.
- __ Ldrsw(string_length,
- UntagSmiFieldMemOperand(separator, String::kLengthOffset));
- __ Add(string, separator, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ CopyBytes(result_pos, string, string_length, scratch1);
-
- __ Bind(&long_separator);
- __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ Ldrsw(string_length,
- UntagSmiFieldMemOperand(string, String::kLengthOffset));
- __ Add(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ CopyBytes(result_pos, string, string_length, scratch1);
- __ Cmp(element, elements_end);
- __ B(lt, &long_separator_loop); // End while (element < elements_end).
- __ B(&done);
-
- __ Bind(&bailout);
- // Returning undefined will force slower code to handle it.
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ Bind(&done);
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
- DCHECK(expr->arguments()->length() == 0);
- ExternalReference debug_is_active =
- ExternalReference::debug_is_active_address(isolate());
- __ Mov(x10, debug_is_active);
- __ Ldrb(x0, MemOperand(x10));
- __ SmiTag(x0);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
- // Assert: expr === CallRuntime("ReflectConstruct")
- DCHECK_EQ(1, expr->arguments()->length());
- CallRuntime* call = expr->arguments()->at(0)->AsCallRuntime();
-
- ZoneList<Expression*>* args = call->arguments();
- DCHECK_EQ(3, args->length());
-
- SuperCallReference* super_call_ref = args->at(0)->AsSuperCallReference();
- DCHECK_NOT_NULL(super_call_ref);
-
- // Load ReflectConstruct function
- EmitLoadJSRuntimeFunction(call);
-
- // Push the target function under the receiver.
- __ Pop(x10);
- __ Push(x0, x10);
-
- // Push super constructor
- EmitLoadSuperConstructor(super_call_ref);
- __ Push(result_register());
-
- // Push arguments array
- VisitForStackValue(args->at(1));
-
- // Push NewTarget
- DCHECK(args->at(2)->IsVariableProxy());
- VisitForStackValue(args->at(2));
-
- EmitCallJSRuntimeFunction(call);
-
- // Restore context register.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, x0);
-
- // TODO(mvstanton): with FLAG_vector_stores this needs a slot id.
- EmitInitializeThisAfterSuper(super_call_ref);
-}
-
-
-void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
- // Push the builtins object as the receiver.
- __ Ldr(x10, GlobalObjectMemOperand());
- __ Ldr(LoadDescriptor::ReceiverRegister(),
- FieldMemOperand(x10, GlobalObject::kBuiltinsOffset));
- __ Push(LoadDescriptor::ReceiverRegister());
-
- // Load the function from the receiver.
- Handle<String> name = expr->name();
- __ Mov(LoadDescriptor::NameRegister(), Operand(name));
- __ Mov(LoadDescriptor::SlotRegister(),
- SmiFromSlot(expr->CallRuntimeFeedbackSlot()));
- CallLoadIC(NOT_CONTEXTUAL);
-}
-
-
-void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- SetExpressionPosition(expr);
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
- __ Peek(x1, (arg_count + 1) * kPointerSize);
- __ CallStub(&stub);
-}
-
-
-void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- if (expr->is_jsruntime()) {
- Comment cmnt(masm_, "[ CallRunTime");
- EmitLoadJSRuntimeFunction(expr);
-
- // Push the target function under the receiver.
- __ Pop(x10);
- __ Push(x0, x10);
-
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
- EmitCallJSRuntimeFunction(expr);
-
- // Restore context register.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- context()->DropAndPlug(1, x0);
-
- } else {
- const Runtime::Function* function = expr->function();
- switch (function->function_id) {
-#define CALL_INTRINSIC_GENERATOR(Name) \
- case Runtime::kInline##Name: { \
- Comment cmnt(masm_, "[ Inline" #Name); \
- return Emit##Name(expr); \
- }
- FOR_EACH_FULL_CODE_INTRINSIC(CALL_INTRINSIC_GENERATOR)
-#undef CALL_INTRINSIC_GENERATOR
- default: {
- Comment cmnt(masm_, "[ CallRuntime for unhandled intrinsic");
- // Push the arguments ("left-to-right").
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Call the C runtime function.
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
- __ CallRuntime(expr->function(), arg_count);
- context()->Plug(x0);
- }
- }
- }
-}
-
-
-void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
- switch (expr->op()) {
- case Token::DELETE: {
- Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
- Property* property = expr->expression()->AsProperty();
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
-
- if (property != NULL) {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- __ Mov(x10, Smi::FromInt(language_mode()));
- __ Push(x10);
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- context()->Plug(x0);
- } else if (proxy != NULL) {
- Variable* var = proxy->var();
- // Delete of an unqualified identifier is disallowed in strict mode but
- // "delete this" is allowed.
- bool is_this = var->HasThisName(isolate());
- DCHECK(is_sloppy(language_mode()) || is_this);
- if (var->IsUnallocatedOrGlobalSlot()) {
- __ Ldr(x12, GlobalObjectMemOperand());
- __ Mov(x11, Operand(var->name()));
- __ Mov(x10, Smi::FromInt(SLOPPY));
- __ Push(x12, x11, x10);
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- context()->Plug(x0);
- } else if (var->IsStackAllocated() || var->IsContextSlot()) {
- // Result of deleting non-global, non-dynamic variables is false.
- // The subexpression does not have side effects.
- context()->Plug(is_this);
- } else {
- // Non-global variable. Call the runtime to try to delete from the
- // context where the variable was introduced.
- __ Mov(x2, Operand(var->name()));
- __ Push(context_register(), x2);
- __ CallRuntime(Runtime::kDeleteLookupSlot, 2);
- context()->Plug(x0);
- }
- } else {
- // Result of deleting non-property, non-variable reference is true.
- // The subexpression may have side effects.
- VisitForEffect(expr->expression());
- context()->Plug(true);
- }
- break;
- break;
- }
- case Token::VOID: {
- Comment cmnt(masm_, "[ UnaryOperation (VOID)");
- VisitForEffect(expr->expression());
- context()->Plug(Heap::kUndefinedValueRootIndex);
- break;
- }
- case Token::NOT: {
- Comment cmnt(masm_, "[ UnaryOperation (NOT)");
- if (context()->IsEffect()) {
- // Unary NOT has no side effects so it's only necessary to visit the
- // subexpression. Match the optimizing compiler by not branching.
- VisitForEffect(expr->expression());
- } else if (context()->IsTest()) {
- const TestContext* test = TestContext::cast(context());
- // The labels are swapped for the recursive call.
- VisitForControl(expr->expression(),
- test->false_label(),
- test->true_label(),
- test->fall_through());
- context()->Plug(test->true_label(), test->false_label());
- } else {
- DCHECK(context()->IsAccumulatorValue() || context()->IsStackValue());
- // TODO(jbramley): This could be much more efficient using (for
- // example) the CSEL instruction.
- Label materialize_true, materialize_false, done;
- VisitForControl(expr->expression(),
- &materialize_false,
- &materialize_true,
- &materialize_true);
-
- __ Bind(&materialize_true);
- PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
- __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
- __ B(&done);
-
- __ Bind(&materialize_false);
- PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
- __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
- __ B(&done);
-
- __ Bind(&done);
- if (context()->IsStackValue()) {
- __ Push(result_register());
- }
- }
- break;
- }
- case Token::TYPEOF: {
- Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
- {
- AccumulatorValueContext context(this);
- VisitForTypeofValue(expr->expression());
- }
- __ Mov(x3, x0);
- TypeofStub typeof_stub(isolate());
- __ CallStub(&typeof_stub);
- context()->Plug(x0);
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
- DCHECK(expr->expression()->IsValidReferenceExpression());
-
- Comment cmnt(masm_, "[ CountOperation");
-
- Property* prop = expr->expression()->AsProperty();
- LhsKind assign_type = Property::GetAssignType(prop);
-
- // Evaluate expression and get value.
- if (assign_type == VARIABLE) {
- DCHECK(expr->expression()->AsVariableProxy()->var() != NULL);
- AccumulatorValueContext context(this);
- EmitVariableLoad(expr->expression()->AsVariableProxy());
- } else {
- // Reserve space for result of postfix operation.
- if (expr->is_postfix() && !context()->IsEffect()) {
- __ Push(xzr);
- }
- switch (assign_type) {
- case NAMED_PROPERTY: {
- // Put the object both on the stack and in the register.
- VisitForStackValue(prop->obj());
- __ Peek(LoadDescriptor::ReceiverRegister(), 0);
- EmitNamedPropertyLoad(prop);
- break;
- }
-
- case NAMED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- __ Push(result_register());
- const Register scratch = x10;
- __ Peek(scratch, kPointerSize);
- __ Push(scratch, result_register());
- EmitNamedSuperPropertyLoad(prop);
- break;
- }
-
- case KEYED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- VisitForAccumulatorValue(prop->key());
- __ Push(result_register());
- const Register scratch1 = x10;
- const Register scratch2 = x11;
- __ Peek(scratch1, 2 * kPointerSize);
- __ Peek(scratch2, kPointerSize);
- __ Push(scratch1, scratch2, result_register());
- EmitKeyedSuperPropertyLoad(prop);
- break;
- }
-
- case KEYED_PROPERTY: {
- VisitForStackValue(prop->obj());
- VisitForStackValue(prop->key());
- __ Peek(LoadDescriptor::ReceiverRegister(), 1 * kPointerSize);
- __ Peek(LoadDescriptor::NameRegister(), 0);
- EmitKeyedPropertyLoad(prop);
- break;
- }
-
- case VARIABLE:
- UNREACHABLE();
- }
- }
-
- // We need a second deoptimization point after loading the value
- // in case evaluating the property load my have a side effect.
- if (assign_type == VARIABLE) {
- PrepareForBailout(expr->expression(), TOS_REG);
- } else {
- PrepareForBailoutForId(prop->LoadId(), TOS_REG);
- }
-
- // Inline smi case if we are in a loop.
- Label stub_call, done;
- JumpPatchSite patch_site(masm_);
-
- int count_value = expr->op() == Token::INC ? 1 : -1;
- if (ShouldInlineSmiCase(expr->op())) {
- Label slow;
- patch_site.EmitJumpIfNotSmi(x0, &slow);
-
- // Save result for postfix expressions.
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- // Save the result on the stack. If we have a named or keyed property we
- // store the result under the receiver that is currently on top of the
- // stack.
- switch (assign_type) {
- case VARIABLE:
- __ Push(x0);
- break;
- case NAMED_PROPERTY:
- __ Poke(x0, kPointerSize);
- break;
- case NAMED_SUPER_PROPERTY:
- __ Poke(x0, kPointerSize * 2);
- break;
- case KEYED_PROPERTY:
- __ Poke(x0, kPointerSize * 2);
- break;
- case KEYED_SUPER_PROPERTY:
- __ Poke(x0, kPointerSize * 3);
- break;
- }
- }
- }
-
- __ Adds(x0, x0, Smi::FromInt(count_value));
- __ B(vc, &done);
- // Call stub. Undo operation first.
- __ Sub(x0, x0, Smi::FromInt(count_value));
- __ B(&stub_call);
- __ Bind(&slow);
- }
- if (!is_strong(language_mode())) {
- ToNumberStub convert_stub(isolate());
- __ CallStub(&convert_stub);
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
- }
-
- // Save result for postfix expressions.
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- // Save the result on the stack. If we have a named or keyed property
- // we store the result under the receiver that is currently on top
- // of the stack.
- switch (assign_type) {
- case VARIABLE:
- __ Push(x0);
- break;
- case NAMED_PROPERTY:
- __ Poke(x0, kXRegSize);
- break;
- case NAMED_SUPER_PROPERTY:
- __ Poke(x0, 2 * kXRegSize);
- break;
- case KEYED_PROPERTY:
- __ Poke(x0, 2 * kXRegSize);
- break;
- case KEYED_SUPER_PROPERTY:
- __ Poke(x0, 3 * kXRegSize);
- break;
- }
- }
- }
-
- __ Bind(&stub_call);
- __ Mov(x1, x0);
- __ Mov(x0, Smi::FromInt(count_value));
-
- SetExpressionPosition(expr);
-
- {
- Assembler::BlockPoolsScope scope(masm_);
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), Token::ADD,
- strength(language_mode())).code();
- CallIC(code, expr->CountBinOpFeedbackId());
- patch_site.EmitPatchInfo();
- }
- __ Bind(&done);
-
- if (is_strong(language_mode())) {
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
- }
- // Store the value returned in x0.
- switch (assign_type) {
- case VARIABLE:
- if (expr->is_postfix()) {
- { EffectContext context(this);
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN, expr->CountSlot());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context.Plug(x0);
- }
- // For all contexts except EffectConstant We have the result on
- // top of the stack.
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN, expr->CountSlot());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(x0);
- }
- break;
- case NAMED_PROPERTY: {
- __ Mov(StoreDescriptor::NameRegister(),
- Operand(prop->key()->AsLiteral()->value()));
- __ Pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->CountStoreFeedbackId());
- }
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(x0);
- }
- break;
- }
- case NAMED_SUPER_PROPERTY: {
- EmitNamedSuperPropertyStore(prop);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(x0);
- }
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- EmitKeyedSuperPropertyStore(prop);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(x0);
- }
- break;
- }
- case KEYED_PROPERTY: {
- __ Pop(StoreDescriptor::NameRegister());
- __ Pop(StoreDescriptor::ReceiverRegister());
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->CountStoreFeedbackId());
- }
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(x0);
- }
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
- Expression* sub_expr,
- Handle<String> check) {
- ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof");
- Comment cmnt(masm_, "[ EmitLiteralCompareTypeof");
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- { AccumulatorValueContext context(this);
- VisitForTypeofValue(sub_expr);
- }
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-
- Factory* factory = isolate()->factory();
- if (String::Equals(check, factory->number_string())) {
- ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof number_string");
- __ JumpIfSmi(x0, if_true);
- __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset));
- __ CompareRoot(x0, Heap::kHeapNumberMapRootIndex);
- Split(eq, if_true, if_false, fall_through);
- } else if (String::Equals(check, factory->string_string())) {
- ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof string_string");
- __ JumpIfSmi(x0, if_false);
- // Check for undetectable objects => false.
- __ JumpIfObjectType(x0, x0, x1, FIRST_NONSTRING_TYPE, if_false, ge);
- __ Ldrb(x1, FieldMemOperand(x0, Map::kBitFieldOffset));
- __ TestAndSplit(x1, 1 << Map::kIsUndetectable, if_true, if_false,
- fall_through);
- } else if (String::Equals(check, factory->symbol_string())) {
- ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof symbol_string");
- __ JumpIfSmi(x0, if_false);
- __ CompareObjectType(x0, x0, x1, SYMBOL_TYPE);
- Split(eq, if_true, if_false, fall_through);
- } else if (String::Equals(check, factory->boolean_string())) {
- ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof boolean_string");
- __ JumpIfRoot(x0, Heap::kTrueValueRootIndex, if_true);
- __ CompareRoot(x0, Heap::kFalseValueRootIndex);
- Split(eq, if_true, if_false, fall_through);
- } else if (String::Equals(check, factory->undefined_string())) {
- ASM_LOCATION(
- "FullCodeGenerator::EmitLiteralCompareTypeof undefined_string");
- __ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex, if_true);
- __ JumpIfSmi(x0, if_false);
- // Check for undetectable objects => true.
- __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset));
- __ Ldrb(x1, FieldMemOperand(x0, Map::kBitFieldOffset));
- __ TestAndSplit(x1, 1 << Map::kIsUndetectable, if_false, if_true,
- fall_through);
- } else if (String::Equals(check, factory->function_string())) {
- ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof function_string");
- __ JumpIfSmi(x0, if_false);
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ JumpIfObjectType(x0, x10, x11, JS_FUNCTION_TYPE, if_true);
- __ CompareAndSplit(x11, JS_FUNCTION_PROXY_TYPE, eq, if_true, if_false,
- fall_through);
-
- } else if (String::Equals(check, factory->object_string())) {
- ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof object_string");
- __ JumpIfSmi(x0, if_false);
- __ JumpIfRoot(x0, Heap::kNullValueRootIndex, if_true);
- // Check for JS objects => true.
- Register map = x10;
- __ JumpIfObjectType(x0, map, x11, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
- if_false, lt);
- __ CompareInstanceType(map, x11, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ B(gt, if_false);
- // Check for undetectable objects => false.
- __ Ldrb(x10, FieldMemOperand(map, Map::kBitFieldOffset));
-
- __ TestAndSplit(x10, 1 << Map::kIsUndetectable, if_true, if_false,
- fall_through);
-
- } else {
- ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof other");
- if (if_false != fall_through) __ B(if_false);
- }
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
- Comment cmnt(masm_, "[ CompareOperation");
- SetExpressionPosition(expr);
-
- // Try to generate an optimized comparison with a literal value.
- // TODO(jbramley): This only checks common values like NaN or undefined.
- // Should it also handle ARM64 immediate operands?
- if (TryLiteralCompare(expr)) {
- return;
- }
-
- // Assign labels according to context()->PrepareTest.
- Label materialize_true;
- Label materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- Token::Value op = expr->op();
- VisitForStackValue(expr->left());
- switch (op) {
- case Token::IN:
- VisitForStackValue(expr->right());
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
- PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
- __ CompareRoot(x0, Heap::kTrueValueRootIndex);
- Split(eq, if_true, if_false, fall_through);
- break;
-
- case Token::INSTANCEOF: {
- VisitForStackValue(expr->right());
- InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
- __ CallStub(&stub);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- // The stub returns 0 for true.
- __ CompareAndSplit(x0, 0, eq, if_true, if_false, fall_through);
- break;
- }
-
- default: {
- VisitForAccumulatorValue(expr->right());
- Condition cond = CompareIC::ComputeCondition(op);
-
- // Pop the stack value.
- __ Pop(x1);
-
- JumpPatchSite patch_site(masm_);
- if (ShouldInlineSmiCase(op)) {
- Label slow_case;
- patch_site.EmitJumpIfEitherNotSmi(x0, x1, &slow_case);
- __ Cmp(x1, x0);
- Split(cond, if_true, if_false, NULL);
- __ Bind(&slow_case);
- }
-
- Handle<Code> ic = CodeFactory::CompareIC(
- isolate(), op, strength(language_mode())).code();
- CallIC(ic, expr->CompareOperationFeedbackId());
- patch_site.EmitPatchInfo();
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ CompareAndSplit(x0, 0, cond, if_true, if_false, fall_through);
- }
- }
-
- // Convert the result of the comparison into one expected for this
- // expression's context.
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
- Expression* sub_expr,
- NilValue nil) {
- ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareNil");
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- VisitForAccumulatorValue(sub_expr);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-
- if (expr->op() == Token::EQ_STRICT) {
- Heap::RootListIndex nil_value = nil == kNullValue ?
- Heap::kNullValueRootIndex :
- Heap::kUndefinedValueRootIndex;
- __ CompareRoot(x0, nil_value);
- Split(eq, if_true, if_false, fall_through);
- } else {
- Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, expr->CompareOperationFeedbackId());
- __ CompareAndSplit(x0, 0, ne, if_true, if_false, fall_through);
- }
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::VisitYield(Yield* expr) {
- Comment cmnt(masm_, "[ Yield");
- SetExpressionPosition(expr);
-
- // Evaluate yielded value first; the initial iterator definition depends on
- // this. It stays on the stack while we update the iterator.
- VisitForStackValue(expr->expression());
-
- // TODO(jbramley): Tidy this up once the merge is done, using named registers
- // and suchlike. The implementation changes a little by bleeding_edge so I
- // don't want to spend too much time on it now.
-
- switch (expr->yield_kind()) {
- case Yield::kSuspend:
- // Pop value from top-of-stack slot; box result into result register.
- EmitCreateIteratorResult(false);
- __ Push(result_register());
- // Fall through.
- case Yield::kInitial: {
- Label suspend, continuation, post_runtime, resume;
-
- __ B(&suspend);
-
- // TODO(jbramley): This label is bound here because the following code
- // looks at its pos(). Is it possible to do something more efficient here,
- // perhaps using Adr?
- __ Bind(&continuation);
- __ B(&resume);
-
- __ Bind(&suspend);
- VisitForAccumulatorValue(expr->generator_object());
- DCHECK((continuation.pos() > 0) && Smi::IsValid(continuation.pos()));
- __ Mov(x1, Smi::FromInt(continuation.pos()));
- __ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset));
- __ Str(cp, FieldMemOperand(x0, JSGeneratorObject::kContextOffset));
- __ Mov(x1, cp);
- __ RecordWriteField(x0, JSGeneratorObject::kContextOffset, x1, x2,
- kLRHasBeenSaved, kDontSaveFPRegs);
- __ Add(x1, fp, StandardFrameConstants::kExpressionsOffset);
- __ Cmp(__ StackPointer(), x1);
- __ B(eq, &post_runtime);
- __ Push(x0); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ Bind(&post_runtime);
- __ Pop(result_register());
- EmitReturnSequence();
-
- __ Bind(&resume);
- context()->Plug(result_register());
- break;
- }
-
- case Yield::kFinal: {
- VisitForAccumulatorValue(expr->generator_object());
- __ Mov(x1, Smi::FromInt(JSGeneratorObject::kGeneratorClosed));
- __ Str(x1, FieldMemOperand(result_register(),
- JSGeneratorObject::kContinuationOffset));
- // Pop value from top-of-stack slot, box result into result register.
- EmitCreateIteratorResult(true);
- EmitUnwindBeforeReturn();
- EmitReturnSequence();
- break;
- }
-
- case Yield::kDelegating: {
- VisitForStackValue(expr->generator_object());
-
- // Initial stack layout is as follows:
- // [sp + 1 * kPointerSize] iter
- // [sp + 0 * kPointerSize] g
-
- Label l_catch, l_try, l_suspend, l_continuation, l_resume;
- Label l_next, l_call, l_loop;
- Register load_receiver = LoadDescriptor::ReceiverRegister();
- Register load_name = LoadDescriptor::NameRegister();
-
- // Initial send value is undefined.
- __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
- __ B(&l_next);
-
- // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
- __ Bind(&l_catch);
- __ LoadRoot(load_name, Heap::kthrow_stringRootIndex); // "throw"
- __ Peek(x3, 1 * kPointerSize); // iter
- __ Push(load_name, x3, x0); // "throw", iter, except
- __ B(&l_call);
-
- // try { received = %yield result }
- // Shuffle the received result above a try handler and yield it without
- // re-boxing.
- __ Bind(&l_try);
- __ Pop(x0); // result
- int handler_index = NewHandlerTableEntry();
- EnterTryBlock(handler_index, &l_catch);
- const int try_block_size = TryCatch::kElementCount * kPointerSize;
- __ Push(x0); // result
- __ B(&l_suspend);
-
- // TODO(jbramley): This label is bound here because the following code
- // looks at its pos(). Is it possible to do something more efficient here,
- // perhaps using Adr?
- __ Bind(&l_continuation);
- __ B(&l_resume);
-
- __ Bind(&l_suspend);
- const int generator_object_depth = kPointerSize + try_block_size;
- __ Peek(x0, generator_object_depth);
- __ Push(x0); // g
- __ Push(Smi::FromInt(handler_index)); // handler-index
- DCHECK((l_continuation.pos() > 0) && Smi::IsValid(l_continuation.pos()));
- __ Mov(x1, Smi::FromInt(l_continuation.pos()));
- __ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset));
- __ Str(cp, FieldMemOperand(x0, JSGeneratorObject::kContextOffset));
- __ Mov(x1, cp);
- __ RecordWriteField(x0, JSGeneratorObject::kContextOffset, x1, x2,
- kLRHasBeenSaved, kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 2);
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ Pop(x0); // result
- EmitReturnSequence();
- __ Bind(&l_resume); // received in x0
- ExitTryBlock(handler_index);
-
- // receiver = iter; f = 'next'; arg = received;
- __ Bind(&l_next);
-
- __ LoadRoot(load_name, Heap::knext_stringRootIndex); // "next"
- __ Peek(x3, 1 * kPointerSize); // iter
- __ Push(load_name, x3, x0); // "next", iter, received
-
- // result = receiver[f](arg);
- __ Bind(&l_call);
- __ Peek(load_receiver, 1 * kPointerSize);
- __ Peek(load_name, 2 * kPointerSize);
- __ Mov(LoadDescriptor::SlotRegister(),
- SmiFromSlot(expr->KeyedLoadFeedbackSlot()));
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), SLOPPY).code();
- CallIC(ic, TypeFeedbackId::None());
- __ Mov(x1, x0);
- __ Poke(x1, 2 * kPointerSize);
- CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
- __ CallStub(&stub);
-
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ Drop(1); // The function is still on the stack; drop it.
-
- // if (!result.done) goto l_try;
- __ Bind(&l_loop);
- __ Move(load_receiver, x0);
-
- __ Push(load_receiver); // save result
- __ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
- __ Mov(LoadDescriptor::SlotRegister(),
- SmiFromSlot(expr->DoneFeedbackSlot()));
- CallLoadIC(NOT_CONTEXTUAL); // x0=result.done
- // The ToBooleanStub argument (result.done) is in x0.
- Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(bool_ic);
- __ Cbz(x0, &l_try);
-
- // result.value
- __ Pop(load_receiver); // result
- __ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
- __ Mov(LoadDescriptor::SlotRegister(),
- SmiFromSlot(expr->ValueFeedbackSlot()));
- CallLoadIC(NOT_CONTEXTUAL); // x0=result.value
- context()->DropAndPlug(2, x0); // drop iter and g
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
- Expression *value,
- JSGeneratorObject::ResumeMode resume_mode) {
- ASM_LOCATION("FullCodeGenerator::EmitGeneratorResume");
- Register generator_object = x1;
- Register the_hole = x2;
- Register operand_stack_size = w3;
- Register function = x4;
-
- // The value stays in x0, and is ultimately read by the resumed generator, as
- // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
- // is read to throw the value when the resumed generator is already closed. x1
- // will hold the generator object until the activation has been resumed.
- VisitForStackValue(generator);
- VisitForAccumulatorValue(value);
- __ Pop(generator_object);
-
- // Load suspended function and context.
- __ Ldr(cp, FieldMemOperand(generator_object,
- JSGeneratorObject::kContextOffset));
- __ Ldr(function, FieldMemOperand(generator_object,
- JSGeneratorObject::kFunctionOffset));
-
- // Load receiver and store as the first argument.
- __ Ldr(x10, FieldMemOperand(generator_object,
- JSGeneratorObject::kReceiverOffset));
- __ Push(x10);
-
- // Push holes for the rest of the arguments to the generator function.
- __ Ldr(x10, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
-
- // The number of arguments is stored as an int32_t, and -1 is a marker
- // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign
- // extension to correctly handle it. However, in this case, we operate on
- // 32-bit W registers, so extension isn't required.
- __ Ldr(w10, FieldMemOperand(x10,
- SharedFunctionInfo::kFormalParameterCountOffset));
- __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
- __ PushMultipleTimes(the_hole, w10);
-
- // Enter a new JavaScript frame, and initialize its slots as they were when
- // the generator was suspended.
- Label resume_frame, done;
- __ Bl(&resume_frame);
- __ B(&done);
-
- __ Bind(&resume_frame);
- __ Push(lr, // Return address.
- fp, // Caller's frame pointer.
- cp, // Callee's context.
- function); // Callee's JS Function.
- __ Add(fp, __ StackPointer(), kPointerSize * 2);
-
- // Load and untag the operand stack size.
- __ Ldr(x10, FieldMemOperand(generator_object,
- JSGeneratorObject::kOperandStackOffset));
- __ Ldr(operand_stack_size,
- UntagSmiFieldMemOperand(x10, FixedArray::kLengthOffset));
-
- // If we are sending a value and there is no operand stack, we can jump back
- // in directly.
- if (resume_mode == JSGeneratorObject::NEXT) {
- Label slow_resume;
- __ Cbnz(operand_stack_size, &slow_resume);
- __ Ldr(x10, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
- __ Ldrsw(x11,
- UntagSmiFieldMemOperand(generator_object,
- JSGeneratorObject::kContinuationOffset));
- __ Add(x10, x10, x11);
- __ Mov(x12, Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
- __ Str(x12, FieldMemOperand(generator_object,
- JSGeneratorObject::kContinuationOffset));
- __ Br(x10);
-
- __ Bind(&slow_resume);
- }
-
- // Otherwise, we push holes for the operand stack and call the runtime to fix
- // up the stack and the handlers.
- __ PushMultipleTimes(the_hole, operand_stack_size);
-
- __ Mov(x10, Smi::FromInt(resume_mode));
- __ Push(generator_object, result_register(), x10);
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
- // Not reached: the runtime call returns elsewhere.
- __ Unreachable();
-
- __ Bind(&done);
- context()->Plug(result_register());
-}
-
-
-void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
- Label gc_required;
- Label allocated;
-
- const int instance_size = 5 * kPointerSize;
- DCHECK_EQ(isolate()->native_context()->iterator_result_map()->instance_size(),
- instance_size);
-
- // Allocate and populate an object with this form: { value: VAL, done: DONE }
-
- Register result = x0;
- __ Allocate(instance_size, result, x10, x11, &gc_required, TAG_OBJECT);
- __ B(&allocated);
-
- __ Bind(&gc_required);
- __ Push(Smi::FromInt(instance_size));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ Ldr(context_register(),
- MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- __ Bind(&allocated);
- Register map_reg = x1;
- Register result_value = x2;
- Register boolean_done = x3;
- Register empty_fixed_array = x4;
- Register untagged_result = x5;
- __ Ldr(map_reg, GlobalObjectMemOperand());
- __ Ldr(map_reg, FieldMemOperand(map_reg, GlobalObject::kNativeContextOffset));
- __ Ldr(map_reg,
- ContextMemOperand(map_reg, Context::ITERATOR_RESULT_MAP_INDEX));
- __ Pop(result_value);
- __ Mov(boolean_done, Operand(isolate()->factory()->ToBoolean(done)));
- __ Mov(empty_fixed_array, Operand(isolate()->factory()->empty_fixed_array()));
- STATIC_ASSERT(JSObject::kPropertiesOffset + kPointerSize ==
- JSObject::kElementsOffset);
- STATIC_ASSERT(JSGeneratorObject::kResultValuePropertyOffset + kPointerSize ==
- JSGeneratorObject::kResultDonePropertyOffset);
- __ ObjectUntag(untagged_result, result);
- __ Str(map_reg, MemOperand(untagged_result, HeapObject::kMapOffset));
- __ Stp(empty_fixed_array, empty_fixed_array,
- MemOperand(untagged_result, JSObject::kPropertiesOffset));
- __ Stp(result_value, boolean_done,
- MemOperand(untagged_result,
- JSGeneratorObject::kResultValuePropertyOffset));
-
- // Only the value field needs a write barrier, as the other values are in the
- // root set.
- __ RecordWriteField(result, JSGeneratorObject::kResultValuePropertyOffset,
- x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
-}
-
-
-// TODO(all): I don't like this method.
-// It seems to me that in too many places x0 is used in place of this.
-// Also, this function is not suitable for all places where x0 should be
-// abstracted (eg. when used as an argument). But some places assume that the
-// first argument register is x0, and use this function instead.
-// Considering that most of the register allocation is hard-coded in the
-// FullCodeGen, that it is unlikely we will need to change it extensively, and
-// that abstracting the allocation through functions would not yield any
-// performance benefit, I think the existence of this function is debatable.
-Register FullCodeGenerator::result_register() {
- return x0;
-}
-
-
-Register FullCodeGenerator::context_register() {
- return cp;
-}
-
-
-void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
- DCHECK(POINTER_SIZE_ALIGN(frame_offset) == frame_offset);
- __ Str(value, MemOperand(fp, frame_offset));
-}
-
-
-void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ Ldr(dst, ContextMemOperand(cp, context_index));
-}
-
-
-void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
- Scope* declaration_scope = scope()->DeclarationScope();
- if (declaration_scope->is_script_scope() ||
- declaration_scope->is_module_scope()) {
- // Contexts nested in the native context have a canonical empty function
- // as their closure, not the anonymous closure containing the global
- // code. Pass a smi sentinel and let the runtime look up the empty
- // function.
- DCHECK(kSmiTag == 0);
- __ Push(xzr);
- } else if (declaration_scope->is_eval_scope()) {
- // Contexts created by a call to eval have the same closure as the
- // context calling eval, not the anonymous closure containing the eval
- // code. Fetch it from the context.
- __ Ldr(x10, ContextMemOperand(cp, Context::CLOSURE_INDEX));
- __ Push(x10);
- } else {
- DCHECK(declaration_scope->is_function_scope());
- __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Push(x10);
- }
-}
-
-
-void FullCodeGenerator::EnterFinallyBlock() {
- ASM_LOCATION("FullCodeGenerator::EnterFinallyBlock");
- DCHECK(!result_register().is(x10));
- // Preserve the result register while executing finally block.
- // Also cook the return address in lr to the stack (smi encoded Code* delta).
- __ Sub(x10, lr, Operand(masm_->CodeObject()));
- __ SmiTag(x10);
- __ Push(result_register(), x10);
-
- // Store pending message while executing finally block.
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ Mov(x10, pending_message_obj);
- __ Ldr(x10, MemOperand(x10));
- __ Push(x10);
-
- ClearPendingMessage();
-}
-
-
-void FullCodeGenerator::ExitFinallyBlock() {
- ASM_LOCATION("FullCodeGenerator::ExitFinallyBlock");
- DCHECK(!result_register().is(x10));
-
- // Restore pending message from stack.
- __ Pop(x10);
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ Mov(x13, pending_message_obj);
- __ Str(x10, MemOperand(x13));
-
- // Restore result register and cooked return address from the stack.
- __ Pop(x10, result_register());
-
- // Uncook the return address (see EnterFinallyBlock).
- __ SmiUntag(x10);
- __ Add(x11, x10, Operand(masm_->CodeObject()));
- __ Br(x11);
-}
-
-
-void FullCodeGenerator::ClearPendingMessage() {
- DCHECK(!result_register().is(x10));
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
- __ Mov(x13, pending_message_obj);
- __ Str(x10, MemOperand(x13));
-}
-
-
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorICSlot slot) {
- DCHECK(FLAG_vector_stores && !slot.IsInvalid());
- __ Mov(VectorStoreICTrampolineDescriptor::SlotRegister(), SmiFromSlot(slot));
-}
-
-
-#undef __
-
-
-void BackEdgeTable::PatchAt(Code* unoptimized_code,
- Address pc,
- BackEdgeState target_state,
- Code* replacement_code) {
- // Turn the jump into a nop.
- Address branch_address = pc - 3 * kInstructionSize;
- PatchingAssembler patcher(branch_address, 1);
-
- DCHECK(Instruction::Cast(branch_address)
- ->IsNop(Assembler::INTERRUPT_CODE_NOP) ||
- (Instruction::Cast(branch_address)->IsCondBranchImm() &&
- Instruction::Cast(branch_address)->ImmPCOffset() ==
- 6 * kInstructionSize));
-
- switch (target_state) {
- case INTERRUPT:
- // <decrement profiling counter>
- // .. .. .. .. b.pl ok
- // .. .. .. .. ldr x16, pc+<interrupt stub address>
- // .. .. .. .. blr x16
- // ... more instructions.
- // ok-label
- // Jump offset is 6 instructions.
- patcher.b(6, pl);
- break;
- case ON_STACK_REPLACEMENT:
- case OSR_AFTER_STACK_CHECK:
- // <decrement profiling counter>
- // .. .. .. .. mov x0, x0 (NOP)
- // .. .. .. .. ldr x16, pc+<on-stack replacement address>
- // .. .. .. .. blr x16
- patcher.nop(Assembler::INTERRUPT_CODE_NOP);
- break;
- }
-
- // Replace the call address.
- Instruction* load = Instruction::Cast(pc)->preceding(2);
- Address interrupt_address_pointer =
- reinterpret_cast<Address>(load) + load->ImmPCOffset();
- DCHECK((Memory::uint64_at(interrupt_address_pointer) ==
- reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
- ->builtins()
- ->OnStackReplacement()
- ->entry())) ||
- (Memory::uint64_at(interrupt_address_pointer) ==
- reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
- ->builtins()
- ->InterruptCheck()
- ->entry())) ||
- (Memory::uint64_at(interrupt_address_pointer) ==
- reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
- ->builtins()
- ->OsrAfterStackCheck()
- ->entry())) ||
- (Memory::uint64_at(interrupt_address_pointer) ==
- reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
- ->builtins()
- ->OnStackReplacement()
- ->entry())));
- Memory::uint64_at(interrupt_address_pointer) =
- reinterpret_cast<uint64_t>(replacement_code->entry());
-
- unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, reinterpret_cast<Address>(load), replacement_code);
-}
-
-
-BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
- Isolate* isolate,
- Code* unoptimized_code,
- Address pc) {
- // TODO(jbramley): There should be some extra assertions here (as in the ARM
- // back-end), but this function is gone in bleeding_edge so it might not
- // matter anyway.
- Instruction* jump_or_nop = Instruction::Cast(pc)->preceding(3);
-
- if (jump_or_nop->IsNop(Assembler::INTERRUPT_CODE_NOP)) {
- Instruction* load = Instruction::Cast(pc)->preceding(2);
- uint64_t entry = Memory::uint64_at(reinterpret_cast<Address>(load) +
- load->ImmPCOffset());
- if (entry == reinterpret_cast<uint64_t>(
- isolate->builtins()->OnStackReplacement()->entry())) {
- return ON_STACK_REPLACEMENT;
- } else if (entry == reinterpret_cast<uint64_t>(
- isolate->builtins()->OsrAfterStackCheck()->entry())) {
- return OSR_AFTER_STACK_CHECK;
- } else {
- UNREACHABLE();
- }
- }
-
- return INTERRUPT;
-}
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/instructions-arm64.cc b/deps/v8/src/arm64/instructions-arm64.cc
index 789268430d..60243d8306 100644
--- a/deps/v8/src/arm64/instructions-arm64.cc
+++ b/deps/v8/src/arm64/instructions-arm64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM64
#define ARM64_DEFINE_FP_STATICS
diff --git a/deps/v8/src/arm64/instrument-arm64.cc b/deps/v8/src/arm64/instrument-arm64.cc
index 9bd02f45ab..7a8e2f4ee1 100644
--- a/deps/v8/src/arm64/instrument-arm64.cc
+++ b/deps/v8/src/arm64/instrument-arm64.cc
@@ -364,12 +364,6 @@ void Instrument::VisitLoadStorePairPreIndex(Instruction* instr) {
}
-void Instrument::VisitLoadStorePairNonTemporal(Instruction* instr) {
- Update();
- InstrumentLoadStorePair(instr);
-}
-
-
void Instrument::VisitLoadLiteral(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Load Literal");
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
index 62e6f2a79e..b49b457124 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM64
#include "src/interface-descriptors.h"
@@ -36,7 +34,11 @@ const Register VectorStoreICDescriptor::VectorRegister() { return x3; }
const Register StoreTransitionDescriptor::MapRegister() { return x3; }
-const Register ElementTransitionAndStoreDescriptor::MapRegister() { return x3; }
+const Register LoadGlobalViaContextDescriptor::SlotRegister() { return x2; }
+
+
+const Register StoreGlobalViaContextDescriptor::SlotRegister() { return x2; }
+const Register StoreGlobalViaContextDescriptor::ValueRegister() { return x0; }
const Register InstanceofDescriptor::left() {
@@ -68,6 +70,14 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
+void StoreTransitionDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
+ MapRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x2: function info
@@ -92,6 +102,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
}
+// static
+const Register ToObjectDescriptor::ReceiverRegister() { return x0; }
+
+
void NumberToStringDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x0: value
@@ -181,10 +195,11 @@ void CallConstructDescriptor::InitializePlatformSpecific(
// x0 : number of arguments
// x1 : the function to call
// x2 : feedback vector
- // x3 : slot in feedback vector (smi) (if r2 is not the megamorphic symbol)
+ // x3 : slot in feedback vector (Smi, for RecordCallTarget)
+ // x4 : original constructor (for IsSuperConstructorCall)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
- Register registers[] = {x0, x1, x2};
+ Register registers[] = {x0, x1, x4, x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -389,11 +404,22 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
}
-void MathRoundVariantDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
+void MathRoundVariantCallFromUnoptimizedCodeDescriptor::
+ InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ x1, // math rounding function
+ x3, // vector slot id
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void MathRoundVariantCallFromOptimizedCodeDescriptor::
+ InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
x1, // math rounding function
x3, // vector slot id
+ x4, // type vector
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/arm64/lithium-arm64.cc b/deps/v8/src/arm64/lithium-arm64.cc
index fef51c669b..4b8208180e 100644
--- a/deps/v8/src/arm64/lithium-arm64.cc
+++ b/deps/v8/src/arm64/lithium-arm64.cc
@@ -4,8 +4,6 @@
#include <sstream>
-#include "src/v8.h"
-
#include "src/arm64/lithium-codegen-arm64.h"
#include "src/hydrogen-osr.h"
#include "src/lithium-inl.h"
@@ -296,6 +294,11 @@ void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
}
+void LLoadGlobalViaContext::PrintDataTo(StringStream* stream) {
+ stream->Add("depth:%d slot:%d", depth(), slot_index());
+}
+
+
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
std::ostringstream os;
@@ -315,6 +318,12 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
+void LStoreGlobalViaContext::PrintDataTo(StringStream* stream) {
+ stream->Add("depth:%d slot:%d <- ", depth(), slot_index());
+ value()->PrintTo(stream);
+}
+
+
void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if string_compare(");
left()->PrintTo(stream);
@@ -887,8 +896,7 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
}
return result;
} else if (instr->representation().IsExternal()) {
- DCHECK(instr->left()->representation().IsExternal());
- DCHECK(instr->right()->representation().IsInteger32());
+ DCHECK(instr->IsConsistentExternalRepresentation());
DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterOrConstantAtStart(instr->right());
@@ -1203,7 +1211,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else if (to.IsSmi()) {
LOperand* value = UseRegisterAtStart(val);
LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
- if (val->CheckFlag(HInstruction::kUint32)) {
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
result = AssignEnvironment(result);
}
return result;
@@ -1703,13 +1711,22 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
+LInstruction* LChunkBuilder::DoLoadGlobalViaContext(
+ HLoadGlobalViaContext* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ DCHECK(instr->slot_index() > 0);
+ LLoadGlobalViaContext* result = new (zone()) LLoadGlobalViaContext(context);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
DCHECK(instr->key()->representation().IsSmiOrInteger32());
ElementsKind elements_kind = instr->elements_kind();
LOperand* elements = UseRegister(instr->elements());
LOperand* key = UseRegisterOrConstant(instr->key());
- if (!instr->is_typed_elements()) {
+ if (!instr->is_fixed_typed_array()) {
if (instr->representation().IsDouble()) {
LOperand* temp = (!instr->key()->IsConstant() ||
instr->RequiresHoleCheck())
@@ -1743,8 +1760,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
LOperand* temp = instr->key()->IsConstant() ? NULL : TempRegister();
LInstruction* result = DefineAsRegister(
new(zone()) LLoadKeyedExternal(elements, key, temp));
- if ((elements_kind == EXTERNAL_UINT32_ELEMENTS ||
- elements_kind == UINT32_ELEMENTS) &&
+ if (elements_kind == UINT32_ELEMENTS &&
!instr->CheckFlag(HInstruction::kUint32)) {
result = AssignEnvironment(result);
}
@@ -2348,7 +2364,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LOperand* elements = NULL;
LOperand* val = NULL;
- if (!instr->is_typed_elements() &&
+ if (!instr->is_fixed_typed_array() &&
instr->value()->representation().IsTagged() &&
instr->NeedsWriteBarrier()) {
// RecordWrite() will clobber all registers.
@@ -2361,15 +2377,12 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
temp = instr->key()->IsConstant() ? NULL : TempRegister();
}
- if (instr->is_typed_elements()) {
+ if (instr->is_fixed_typed_array()) {
DCHECK((instr->value()->representation().IsInteger32() &&
!IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
(instr->value()->representation().IsDouble() &&
IsDoubleOrFloatElementsKind(instr->elements_kind())));
- DCHECK((instr->is_fixed_typed_array() &&
- instr->elements()->representation().IsTagged()) ||
- (instr->is_external() &&
- instr->elements()->representation().IsExternal()));
+ DCHECK(instr->elements()->representation().IsExternal());
return new(zone()) LStoreKeyedExternal(elements, key, val, temp);
} else if (instr->value()->representation().IsDouble()) {
@@ -2457,6 +2470,19 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
}
+LInstruction* LChunkBuilder::DoStoreGlobalViaContext(
+ HStoreGlobalViaContext* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* value = UseFixed(instr->value(),
+ StoreGlobalViaContextDescriptor::ValueRegister());
+ DCHECK(instr->slot_index() > 0);
+
+ LStoreGlobalViaContext* result =
+ new (zone()) LStoreGlobalViaContext(context, value);
+ return MarkAsCall(result, instr);
+}
+
+
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), x1);
diff --git a/deps/v8/src/arm64/lithium-arm64.h b/deps/v8/src/arm64/lithium-arm64.h
index 4507c07591..70337778f4 100644
--- a/deps/v8/src/arm64/lithium-arm64.h
+++ b/deps/v8/src/arm64/lithium-arm64.h
@@ -104,6 +104,7 @@ class LCodeGen;
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalGeneric) \
+ V(LoadGlobalViaContext) \
V(LoadKeyedExternal) \
V(LoadKeyedFixed) \
V(LoadKeyedFixedDouble) \
@@ -152,6 +153,7 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
+ V(StoreGlobalViaContext) \
V(StoreKeyedExternal) \
V(StoreKeyedFixed) \
V(StoreKeyedFixedDouble) \
@@ -1673,6 +1675,22 @@ class LIsUndetectableAndBranch final : public LControlInstruction<1, 1> {
};
+class LLoadGlobalViaContext final : public LTemplateInstruction<1, 1, 1> {
+ public:
+ explicit LLoadGlobalViaContext(LOperand* context) { inputs_[0] = context; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalViaContext, "load-global-via-context")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalViaContext)
+
+ void PrintDataTo(StringStream* stream) override;
+
+ LOperand* context() { return inputs_[0]; }
+
+ int depth() const { return hydrogen()->depth(); }
+ int slot_index() const { return hydrogen()->slot_index(); }
+};
+
+
class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -1748,7 +1766,7 @@ class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
Handle<Object> name() const { return hydrogen()->name(); }
- bool for_typeof() const { return hydrogen()->for_typeof(); }
+ TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
};
@@ -2455,6 +2473,28 @@ class LStackCheck final : public LTemplateInstruction<0, 1, 0> {
};
+class LStoreGlobalViaContext final : public LTemplateInstruction<0, 2, 0> {
+ public:
+ LStoreGlobalViaContext(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalViaContext,
+ "store-global-via-context")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalViaContext)
+
+ void PrintDataTo(StringStream* stream) override;
+
+ int depth() { return hydrogen()->depth(); }
+ int slot_index() { return hydrogen()->slot_index(); }
+ LanguageMode language_mode() { return hydrogen()->language_mode(); }
+};
+
+
template<int T>
class LStoreKeyed : public LTemplateInstruction<0, 3, T> {
public:
diff --git a/deps/v8/src/arm64/lithium-codegen-arm64.cc b/deps/v8/src/arm64/lithium-codegen-arm64.cc
index 074926b83b..3dff64cbe8 100644
--- a/deps/v8/src/arm64/lithium-codegen-arm64.cc
+++ b/deps/v8/src/arm64/lithium-codegen-arm64.cc
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
+#include "src/arm64/frames-arm64.h"
#include "src/arm64/lithium-codegen-arm64.h"
#include "src/arm64/lithium-gap-resolver-arm64.h"
#include "src/base/bits.h"
@@ -276,15 +275,23 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
if (op->IsStackSlot()) {
+ int index = op->index();
+ if (index >= 0) {
+ index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+ }
if (is_tagged) {
- translation->StoreStackSlot(op->index());
+ translation->StoreStackSlot(index);
} else if (is_uint32) {
- translation->StoreUint32StackSlot(op->index());
+ translation->StoreUint32StackSlot(index);
} else {
- translation->StoreInt32StackSlot(op->index());
+ translation->StoreInt32StackSlot(index);
}
} else if (op->IsDoubleStackSlot()) {
- translation->StoreDoubleStackSlot(op->index());
+ int index = op->index();
+ if (index >= 0) {
+ index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+ }
+ translation->StoreDoubleStackSlot(index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
@@ -1476,9 +1483,14 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
void LCodeGen::DoAddE(LAddE* instr) {
Register result = ToRegister(instr->result());
Register left = ToRegister(instr->left());
- Operand right = (instr->right()->IsConstantOperand())
- ? ToInteger32(LConstantOperand::cast(instr->right()))
- : Operand(ToRegister32(instr->right()), SXTW);
+ Operand right = Operand(x0); // Dummy initialization.
+ if (instr->hydrogen()->external_add_type() == AddOfExternalAndTagged) {
+ right = Operand(ToRegister(instr->right()));
+ } else if (instr->right()->IsConstantOperand()) {
+ right = ToInteger32(LConstantOperand::cast(instr->right()));
+ } else {
+ right = Operand(ToRegister32(instr->right()), SXTW);
+ }
DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
__ Add(result, left, right);
@@ -1926,6 +1938,12 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ B(eq, true_label);
}
+ if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+ // SIMD value -> true.
+ __ CompareInstanceType(map, scratch, SIMD128_VALUE_TYPE);
+ __ B(eq, true_label);
+ }
+
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
Label not_heap_number;
__ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, &not_heap_number);
@@ -3362,13 +3380,31 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->result()).Is(x0));
__ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode, SLOPPY,
- PREMONOMORPHIC).code();
+ Handle<Code> ic =
+ CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
+ SLOPPY, PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
+void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->result()).is(x0));
+
+ int const slot = instr->slot_index();
+ int const depth = instr->depth();
+ if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
+ __ Mov(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
+ Handle<Code> stub =
+ CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
+ CallCode(stub, RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
+ }
+}
+
+
MemOperand LCodeGen::PrepareKeyedExternalArrayOperand(
Register key,
Register base,
@@ -3426,42 +3462,33 @@ void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
elements_kind,
instr->base_offset());
- if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
- (elements_kind == FLOAT32_ELEMENTS)) {
+ if (elements_kind == FLOAT32_ELEMENTS) {
DoubleRegister result = ToDoubleRegister(instr->result());
__ Ldr(result.S(), mem_op);
__ Fcvt(result, result.S());
- } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
- (elements_kind == FLOAT64_ELEMENTS)) {
+ } else if (elements_kind == FLOAT64_ELEMENTS) {
DoubleRegister result = ToDoubleRegister(instr->result());
__ Ldr(result, mem_op);
} else {
Register result = ToRegister(instr->result());
switch (elements_kind) {
- case EXTERNAL_INT8_ELEMENTS:
case INT8_ELEMENTS:
__ Ldrsb(result, mem_op);
break;
- case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
- case EXTERNAL_UINT8_ELEMENTS:
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
__ Ldrb(result, mem_op);
break;
- case EXTERNAL_INT16_ELEMENTS:
case INT16_ELEMENTS:
__ Ldrsh(result, mem_op);
break;
- case EXTERNAL_UINT16_ELEMENTS:
case UINT16_ELEMENTS:
__ Ldrh(result, mem_op);
break;
- case EXTERNAL_INT32_ELEMENTS:
case INT32_ELEMENTS:
__ Ldrsw(result, mem_op);
break;
- case EXTERNAL_UINT32_ELEMENTS:
case UINT32_ELEMENTS:
__ Ldr(result.W(), mem_op);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
@@ -3472,8 +3499,6 @@ void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
break;
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
- case EXTERNAL_FLOAT32_ELEMENTS:
- case EXTERNAL_FLOAT64_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
@@ -3692,7 +3717,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
Handle<Code> ic =
CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_CONTEXTUAL, instr->hydrogen()->language_mode(),
+ isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -5017,8 +5042,8 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
// here.
__ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
__ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags()));
- __ Push(cp, scratch1, scratch2); // The context is the first argument.
- CallRuntime(Runtime::kDeclareGlobals, 3, instr);
+ __ Push(scratch1, scratch2);
+ CallRuntime(Runtime::kDeclareGlobals, 2, instr);
}
@@ -5148,44 +5173,33 @@ void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
elements_kind,
instr->base_offset());
- if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
- (elements_kind == FLOAT32_ELEMENTS)) {
+ if (elements_kind == FLOAT32_ELEMENTS) {
DoubleRegister value = ToDoubleRegister(instr->value());
DoubleRegister dbl_scratch = double_scratch();
__ Fcvt(dbl_scratch.S(), value);
__ Str(dbl_scratch.S(), dst);
- } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
- (elements_kind == FLOAT64_ELEMENTS)) {
+ } else if (elements_kind == FLOAT64_ELEMENTS) {
DoubleRegister value = ToDoubleRegister(instr->value());
__ Str(value, dst);
} else {
Register value = ToRegister(instr->value());
switch (elements_kind) {
- case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
- case EXTERNAL_INT8_ELEMENTS:
- case EXTERNAL_UINT8_ELEMENTS:
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
case INT8_ELEMENTS:
__ Strb(value, dst);
break;
- case EXTERNAL_INT16_ELEMENTS:
- case EXTERNAL_UINT16_ELEMENTS:
case INT16_ELEMENTS:
case UINT16_ELEMENTS:
__ Strh(value, dst);
break;
- case EXTERNAL_INT32_ELEMENTS:
- case EXTERNAL_UINT32_ELEMENTS:
case INT32_ELEMENTS:
case UINT32_ELEMENTS:
__ Str(value.W(), dst);
break;
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
- case EXTERNAL_FLOAT32_ELEMENTS:
- case EXTERNAL_FLOAT64_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
@@ -5507,6 +5521,30 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
+void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->value())
+ .is(StoreGlobalViaContextDescriptor::ValueRegister()));
+
+ int const slot = instr->slot_index();
+ int const depth = instr->depth();
+ if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
+ __ Mov(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
+ Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
+ isolate(), depth, instr->language_mode())
+ .code();
+ CallCode(stub, RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ Push(StoreGlobalViaContextDescriptor::ValueRegister());
+ __ CallRuntime(is_strict(instr->language_mode())
+ ? Runtime::kStoreGlobalViaContext_Strict
+ : Runtime::kStoreGlobalViaContext_Sloppy,
+ 2);
+ }
+}
+
+
void LCodeGen::DoStringAdd(LStringAdd* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->left()).Is(x1));
@@ -5907,10 +5945,8 @@ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
Register scratch = ToRegister(instr->temp2());
__ JumpIfSmi(value, false_label);
- __ JumpIfObjectType(
- value, map, scratch, FIRST_NONSTRING_TYPE, false_label, ge);
- __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
- EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
+ __ CompareObjectType(value, map, scratch, FIRST_NONSTRING_TYPE);
+ EmitBranch(instr, lt);
} else if (String::Equals(type_name, factory->symbol_string())) {
DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
@@ -5962,6 +5998,20 @@ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
__ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
+// clang-format off
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
+ } else if (String::Equals(type_name, factory->type##_string())) { \
+ DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL)); \
+ Register map = ToRegister(instr->temp1()); \
+ \
+ __ JumpIfSmi(value, false_label); \
+ __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); \
+ __ CompareRoot(map, Heap::k##Type##MapRootIndex); \
+ EmitBranch(instr, eq);
+ SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+ // clang-format on
+
} else {
__ B(false_label);
}
diff --git a/deps/v8/src/arm64/lithium-gap-resolver-arm64.cc b/deps/v8/src/arm64/lithium-gap-resolver-arm64.cc
index 7d01f792bc..1520fa1888 100644
--- a/deps/v8/src/arm64/lithium-gap-resolver-arm64.cc
+++ b/deps/v8/src/arm64/lithium-gap-resolver-arm64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/arm64/delayed-masm-arm64-inl.h"
#include "src/arm64/lithium-codegen-arm64.h"
#include "src/arm64/lithium-gap-resolver-arm64.h"
diff --git a/deps/v8/src/arm64/lithium-gap-resolver-arm64.h b/deps/v8/src/arm64/lithium-gap-resolver-arm64.h
index 2eb651b924..8866db4c94 100644
--- a/deps/v8/src/arm64/lithium-gap-resolver-arm64.h
+++ b/deps/v8/src/arm64/lithium-gap-resolver-arm64.h
@@ -5,8 +5,6 @@
#ifndef V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
#define V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
-#include "src/v8.h"
-
#include "src/arm64/delayed-masm-arm64.h"
#include "src/lithium.h"
diff --git a/deps/v8/src/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
index b691e21813..445513bf5a 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
@@ -869,15 +869,6 @@ void MacroAssembler::Isb() {
}
-void MacroAssembler::Ldnp(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& src) {
- DCHECK(allow_macro_instructions_);
- DCHECK(!AreAliased(rt, rt2));
- ldnp(rt, rt2, src);
-}
-
-
void MacroAssembler::Ldr(const CPURegister& rt, const Immediate& imm) {
DCHECK(allow_macro_instructions_);
ldr(rt, imm);
@@ -1134,14 +1125,6 @@ void MacroAssembler::Umull(const Register& rd, const Register& rn,
}
-void MacroAssembler::Stnp(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& dst) {
- DCHECK(allow_macro_instructions_);
- stnp(rt, rt2, dst);
-}
-
-
void MacroAssembler::Sxtb(const Register& rd, const Register& rn) {
DCHECK(allow_macro_instructions_);
DCHECK(!rd.IsZero());
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index c7d6797416..586df33c4d 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -2,16 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM64
+#include "src/arm64/frames-arm64.h"
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/cpu-profiler.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -907,6 +906,25 @@ void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
}
+void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
+ const CPURegister& dst2, const CPURegister& dst3,
+ const CPURegister& dst4, const CPURegister& dst5,
+ const CPURegister& dst6, const CPURegister& dst7) {
+ // It is not valid to pop into the same register more than once in one
+ // instruction, not even into the zero register.
+ DCHECK(!AreAliased(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7));
+ DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7));
+ DCHECK(dst0.IsValid());
+
+ int count = 5 + dst5.IsValid() + dst6.IsValid() + dst7.IsValid();
+ int size = dst0.SizeInBytes();
+
+ PopHelper(4, size, dst0, dst1, dst2, dst3);
+ PopHelper(count - 4, size, dst4, dst5, dst6, dst7);
+ PopPostamble(count, size);
+}
+
+
void MacroAssembler::Push(const Register& src0, const FPRegister& src1) {
int size = src0.SizeInBytes() + src1.SizeInBytes();
@@ -3030,10 +3048,10 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
void MacroAssembler::DebugBreak() {
Mov(x0, 0);
- Mov(x1, ExternalReference(Runtime::kDebugBreak, isolate()));
+ Mov(x1, ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
CEntryStub ces(isolate(), 1);
DCHECK(AllowThisStubCall(&ces));
- Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+ Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
}
@@ -3223,26 +3241,6 @@ void MacroAssembler::Allocate(Register object_size,
}
-void MacroAssembler::UndoAllocationInNewSpace(Register object,
- Register scratch) {
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- // Make sure the object has no tag before resetting top.
- Bic(object, object, kHeapObjectTagMask);
-#ifdef DEBUG
- // Check that the object un-allocated is below the current top.
- Mov(scratch, new_space_allocation_top);
- Ldr(scratch, MemOperand(scratch));
- Cmp(object, scratch);
- Check(lt, kUndoAllocationOfNonAllocatedMemory);
-#endif
- // Write the address of the object to un-allocate as the current top.
- Mov(scratch, new_space_allocation_top);
- Str(object, MemOperand(scratch));
-}
-
-
void MacroAssembler::AllocateTwoByteString(Register result,
Register length,
Register scratch1,
@@ -4417,21 +4415,29 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
Register scratch1,
Label* found) {
DCHECK(!AreAliased(object, scratch0, scratch1));
- Factory* factory = isolate()->factory();
Register current = scratch0;
- Label loop_again;
+ Label loop_again, end;
// Scratch contains elements pointer.
Mov(current, object);
+ Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
+ Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
+ CompareAndBranch(current, Heap::kNullValueRootIndex, eq, &end);
// Loop based on the map going up the prototype chain.
Bind(&loop_again);
Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
+ STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
+ STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
+ CompareInstanceType(current, scratch1, JS_OBJECT_TYPE);
+ B(lo, found);
Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
DecodeField<Map::ElementsKindBits>(scratch1);
CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found);
Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
- CompareAndBranch(current, Operand(factory->null_value()), ne, &loop_again);
+ CompareAndBranch(current, Heap::kNullValueRootIndex, ne, &loop_again);
+
+ Bind(&end);
}
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index 7854ff0e52..76e2fdb3fb 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -7,11 +7,10 @@
#include <vector>
+#include "src/arm64/assembler-arm64.h"
#include "src/bailout-reason.h"
-#include "src/globals.h"
-
-#include "src/arm64/assembler-arm64-inl.h"
#include "src/base/bits.h"
+#include "src/globals.h"
// Simulator specific helpers.
#if USE_SIMULATOR
@@ -34,6 +33,20 @@
namespace v8 {
namespace internal {
+// Give alias names to registers for calling conventions.
+// TODO(titzer): arm64 is a pain for aliasing; get rid of these macros
+#define kReturnRegister0 x0
+#define kReturnRegister1 x1
+#define kJSFunctionRegister x1
+#define kContextRegister cp
+#define kInterpreterAccumulatorRegister x0
+#define kInterpreterRegisterFileRegister x18
+#define kInterpreterBytecodeOffsetRegister x19
+#define kInterpreterBytecodeArrayRegister x20
+#define kInterpreterDispatchTableRegister x21
+#define kRuntimeCallFunctionRegister x1
+#define kRuntimeCallArgCountRegister x0
+
#define LS_MACRO_LIST(V) \
V(Ldrb, Register&, rt, LDRB_w) \
V(Strb, Register&, rt, STRB_w) \
@@ -569,6 +582,10 @@ class MacroAssembler : public Assembler {
const CPURegister& src6 = NoReg, const CPURegister& src7 = NoReg);
void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
+ void Pop(const CPURegister& dst0, const CPURegister& dst1,
+ const CPURegister& dst2, const CPURegister& dst3,
+ const CPURegister& dst4, const CPURegister& dst5 = NoReg,
+ const CPURegister& dst6 = NoReg, const CPURegister& dst7 = NoReg);
void Push(const Register& src0, const FPRegister& src1);
// Alternative forms of Push and Pop, taking a RegList or CPURegList that
@@ -1305,12 +1322,6 @@ class MacroAssembler : public Assembler {
Label* gc_required,
AllocationFlags flags);
- // Undo allocation in new space. The object passed and objects allocated after
- // it will no longer be allocated. The caller must make sure that no pointers
- // are left to the object(s) no longer allocated as they would be invalid when
- // allocation is undone.
- void UndoAllocationInNewSpace(Register object, Register scratch);
-
void AllocateTwoByteString(Register result,
Register length,
Register scratch1,
@@ -1771,7 +1782,7 @@ class MacroAssembler : public Assembler {
// |object| is the object being stored into, |value| is the object being
// stored. value and scratch registers are clobbered by the operation.
// The offset is the offset from the start of the object, not the offset from
- // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
+ // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
void RecordWriteField(
Register object,
int offset,
@@ -2235,7 +2246,7 @@ class UseScratchRegisterScope {
};
-inline MemOperand ContextMemOperand(Register context, int index) {
+inline MemOperand ContextMemOperand(Register context, int index = 0) {
return MemOperand(context, Context::SlotOffset(index));
}
diff --git a/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc
deleted file mode 100644
index 801cc1359b..0000000000
--- a/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc
+++ /dev/null
@@ -1,1617 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_ARM64
-
-#include "src/code-stubs.h"
-#include "src/cpu-profiler.h"
-#include "src/log.h"
-#include "src/macro-assembler.h"
-#include "src/regexp-macro-assembler.h"
-#include "src/regexp-stack.h"
-#include "src/unicode.h"
-
-#include "src/arm64/regexp-macro-assembler-arm64.h"
-
-namespace v8 {
-namespace internal {
-
-#ifndef V8_INTERPRETED_REGEXP
-/*
- * This assembler uses the following register assignment convention:
- * - w19 : Used to temporarely store a value before a call to C code.
- * See CheckNotBackReferenceIgnoreCase.
- * - x20 : Pointer to the current code object (Code*),
- * it includes the heap object tag.
- * - w21 : Current position in input, as negative offset from
- * the end of the string. Please notice that this is
- * the byte offset, not the character offset!
- * - w22 : Currently loaded character. Must be loaded using
- * LoadCurrentCharacter before using any of the dispatch methods.
- * - x23 : Points to tip of backtrack stack.
- * - w24 : Position of the first character minus one: non_position_value.
- * Used to initialize capture registers.
- * - x25 : Address at the end of the input string: input_end.
- * Points to byte after last character in input.
- * - x26 : Address at the start of the input string: input_start.
- * - w27 : Where to start in the input string.
- * - x28 : Output array pointer.
- * - x29/fp : Frame pointer. Used to access arguments, local variables and
- * RegExp registers.
- * - x16/x17 : IP registers, used by assembler. Very volatile.
- * - csp : Points to tip of C stack.
- *
- * - x0-x7 : Used as a cache to store 32 bit capture registers. These
- * registers need to be retained every time a call to C code
- * is done.
- *
- * The remaining registers are free for computations.
- * Each call to a public method should retain this convention.
- *
- * The stack will have the following structure:
- *
- * Location Name Description
- * (as referred to in
- * the code)
- *
- * - fp[104] isolate Address of the current isolate.
- * - fp[96] return_address Secondary link/return address
- * used by an exit frame if this is a
- * native call.
- * ^^^ csp when called ^^^
- * - fp[88] lr Return from the RegExp code.
- * - fp[80] r29 Old frame pointer (CalleeSaved).
- * - fp[0..72] r19-r28 Backup of CalleeSaved registers.
- * - fp[-8] direct_call 1 => Direct call from JavaScript code.
- * 0 => Call through the runtime system.
- * - fp[-16] stack_base High end of the memory area to use as
- * the backtracking stack.
- * - fp[-24] output_size Output may fit multiple sets of matches.
- * - fp[-32] input Handle containing the input string.
- * - fp[-40] success_counter
- * ^^^^^^^^^^^^^ From here and downwards we store 32 bit values ^^^^^^^^^^^^^
- * - fp[-44] register N Capture registers initialized with
- * - fp[-48] register N + 1 non_position_value.
- * ... The first kNumCachedRegisters (N) registers
- * ... are cached in x0 to x7.
- * ... Only positions must be stored in the first
- * - ... num_saved_registers_ registers.
- * - ...
- * - register N + num_registers - 1
- * ^^^^^^^^^ csp ^^^^^^^^^
- *
- * The first num_saved_registers_ registers are initialized to point to
- * "character -1" in the string (i.e., char_size() bytes before the first
- * character of the string). The remaining registers start out as garbage.
- *
- * The data up to the return address must be placed there by the calling
- * code and the remaining arguments are passed in registers, e.g. by calling the
- * code entry as cast to a function with the signature:
- * int (*match)(String* input,
- * int start_offset,
- * Address input_start,
- * Address input_end,
- * int* output,
- * int output_size,
- * Address stack_base,
- * bool direct_call = false,
- * Address secondary_return_address, // Only used by native call.
- * Isolate* isolate)
- * The call is performed by NativeRegExpMacroAssembler::Execute()
- * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
- * in arm64/simulator-arm64.h.
- * When calling as a non-direct call (i.e., from C++ code), the return address
- * area is overwritten with the LR register by the RegExp code. When doing a
- * direct call from generated code, the return address is placed there by
- * the calling code, as in a normal exit frame.
- */
-
-#define __ ACCESS_MASM(masm_)
-
-RegExpMacroAssemblerARM64::RegExpMacroAssemblerARM64(Isolate* isolate,
- Zone* zone, Mode mode,
- int registers_to_save)
- : NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize)),
- mode_(mode),
- num_registers_(registers_to_save),
- num_saved_registers_(registers_to_save),
- entry_label_(),
- start_label_(),
- success_label_(),
- backtrack_label_(),
- exit_label_() {
- __ SetStackPointer(csp);
- DCHECK_EQ(0, registers_to_save % 2);
- // We can cache at most 16 W registers in x0-x7.
- STATIC_ASSERT(kNumCachedRegisters <= 16);
- STATIC_ASSERT((kNumCachedRegisters % 2) == 0);
- __ B(&entry_label_); // We'll write the entry code later.
- __ Bind(&start_label_); // And then continue from here.
-}
-
-
-RegExpMacroAssemblerARM64::~RegExpMacroAssemblerARM64() {
- delete masm_;
- // Unuse labels in case we throw away the assembler without calling GetCode.
- entry_label_.Unuse();
- start_label_.Unuse();
- success_label_.Unuse();
- backtrack_label_.Unuse();
- exit_label_.Unuse();
- check_preempt_label_.Unuse();
- stack_overflow_label_.Unuse();
-}
-
-int RegExpMacroAssemblerARM64::stack_limit_slack() {
- return RegExpStack::kStackLimitSlack;
-}
-
-
-void RegExpMacroAssemblerARM64::AdvanceCurrentPosition(int by) {
- if (by != 0) {
- __ Add(current_input_offset(),
- current_input_offset(), by * char_size());
- }
-}
-
-
-void RegExpMacroAssemblerARM64::AdvanceRegister(int reg, int by) {
- DCHECK((reg >= 0) && (reg < num_registers_));
- if (by != 0) {
- Register to_advance;
- RegisterState register_state = GetRegisterState(reg);
- switch (register_state) {
- case STACKED:
- __ Ldr(w10, register_location(reg));
- __ Add(w10, w10, by);
- __ Str(w10, register_location(reg));
- break;
- case CACHED_LSW:
- to_advance = GetCachedRegister(reg);
- __ Add(to_advance, to_advance, by);
- break;
- case CACHED_MSW:
- to_advance = GetCachedRegister(reg);
- __ Add(to_advance, to_advance,
- static_cast<int64_t>(by) << kWRegSizeInBits);
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void RegExpMacroAssemblerARM64::Backtrack() {
- CheckPreemption();
- Pop(w10);
- __ Add(x10, code_pointer(), Operand(w10, UXTW));
- __ Br(x10);
-}
-
-
-void RegExpMacroAssemblerARM64::Bind(Label* label) {
- __ Bind(label);
-}
-
-
-void RegExpMacroAssemblerARM64::CheckCharacter(uint32_t c, Label* on_equal) {
- CompareAndBranchOrBacktrack(current_character(), c, eq, on_equal);
-}
-
-
-void RegExpMacroAssemblerARM64::CheckCharacterGT(uc16 limit,
- Label* on_greater) {
- CompareAndBranchOrBacktrack(current_character(), limit, hi, on_greater);
-}
-
-
-void RegExpMacroAssemblerARM64::CheckAtStart(Label* on_at_start) {
- Label not_at_start;
- // Did we start the match at the start of the input string?
- CompareAndBranchOrBacktrack(start_offset(), 0, ne, &not_at_start);
- // If we did, are we still at the start of the input string?
- __ Add(x10, input_end(), Operand(current_input_offset(), SXTW));
- __ Cmp(x10, input_start());
- BranchOrBacktrack(eq, on_at_start);
- __ Bind(&not_at_start);
-}
-
-
-void RegExpMacroAssemblerARM64::CheckNotAtStart(Label* on_not_at_start) {
- // Did we start the match at the start of the input string?
- CompareAndBranchOrBacktrack(start_offset(), 0, ne, on_not_at_start);
- // If we did, are we still at the start of the input string?
- __ Add(x10, input_end(), Operand(current_input_offset(), SXTW));
- __ Cmp(x10, input_start());
- BranchOrBacktrack(ne, on_not_at_start);
-}
-
-
-void RegExpMacroAssemblerARM64::CheckCharacterLT(uc16 limit, Label* on_less) {
- CompareAndBranchOrBacktrack(current_character(), limit, lo, on_less);
-}
-
-
-void RegExpMacroAssemblerARM64::CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string) {
- // This method is only ever called from the cctests.
-
- if (check_end_of_string) {
- // Is last character of required match inside string.
- CheckPosition(cp_offset + str.length() - 1, on_failure);
- }
-
- Register characters_address = x11;
-
- __ Add(characters_address,
- input_end(),
- Operand(current_input_offset(), SXTW));
- if (cp_offset != 0) {
- __ Add(characters_address, characters_address, cp_offset * char_size());
- }
-
- for (int i = 0; i < str.length(); i++) {
- if (mode_ == LATIN1) {
- __ Ldrb(w10, MemOperand(characters_address, 1, PostIndex));
- DCHECK(str[i] <= String::kMaxOneByteCharCode);
- } else {
- __ Ldrh(w10, MemOperand(characters_address, 2, PostIndex));
- }
- CompareAndBranchOrBacktrack(w10, str[i], ne, on_failure);
- }
-}
-
-
-void RegExpMacroAssemblerARM64::CheckGreedyLoop(Label* on_equal) {
- __ Ldr(w10, MemOperand(backtrack_stackpointer()));
- __ Cmp(current_input_offset(), w10);
- __ Cset(x11, eq);
- __ Add(backtrack_stackpointer(),
- backtrack_stackpointer(), Operand(x11, LSL, kWRegSizeLog2));
- BranchOrBacktrack(eq, on_equal);
-}
-
-void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_no_match) {
- Label fallthrough;
-
- Register capture_start_offset = w10;
- // Save the capture length in a callee-saved register so it will
- // be preserved if we call a C helper.
- Register capture_length = w19;
- DCHECK(kCalleeSaved.IncludesAliasOf(capture_length));
-
- // Find length of back-referenced capture.
- DCHECK((start_reg % 2) == 0);
- if (start_reg < kNumCachedRegisters) {
- __ Mov(capture_start_offset.X(), GetCachedRegister(start_reg));
- __ Lsr(x11, GetCachedRegister(start_reg), kWRegSizeInBits);
- } else {
- __ Ldp(w11, capture_start_offset, capture_location(start_reg, x10));
- }
- __ Sub(capture_length, w11, capture_start_offset); // Length to check.
- // Succeed on empty capture (including no capture).
- __ Cbz(capture_length, &fallthrough);
-
- // Check that there are enough characters left in the input.
- __ Cmn(capture_length, current_input_offset());
- BranchOrBacktrack(gt, on_no_match);
-
- if (mode_ == LATIN1) {
- Label success;
- Label fail;
- Label loop_check;
-
- Register capture_start_address = x12;
- Register capture_end_addresss = x13;
- Register current_position_address = x14;
-
- __ Add(capture_start_address,
- input_end(),
- Operand(capture_start_offset, SXTW));
- __ Add(capture_end_addresss,
- capture_start_address,
- Operand(capture_length, SXTW));
- __ Add(current_position_address,
- input_end(),
- Operand(current_input_offset(), SXTW));
-
- Label loop;
- __ Bind(&loop);
- __ Ldrb(w10, MemOperand(capture_start_address, 1, PostIndex));
- __ Ldrb(w11, MemOperand(current_position_address, 1, PostIndex));
- __ Cmp(w10, w11);
- __ B(eq, &loop_check);
-
- // Mismatch, try case-insensitive match (converting letters to lower-case).
- __ Orr(w10, w10, 0x20); // Convert capture character to lower-case.
- __ Orr(w11, w11, 0x20); // Also convert input character.
- __ Cmp(w11, w10);
- __ B(ne, &fail);
- __ Sub(w10, w10, 'a');
- __ Cmp(w10, 'z' - 'a'); // Is w10 a lowercase letter?
- __ B(ls, &loop_check); // In range 'a'-'z'.
- // Latin-1: Check for values in range [224,254] but not 247.
- __ Sub(w10, w10, 224 - 'a');
- __ Cmp(w10, 254 - 224);
- __ Ccmp(w10, 247 - 224, ZFlag, ls); // Check for 247.
- __ B(eq, &fail); // Weren't Latin-1 letters.
-
- __ Bind(&loop_check);
- __ Cmp(capture_start_address, capture_end_addresss);
- __ B(lt, &loop);
- __ B(&success);
-
- __ Bind(&fail);
- BranchOrBacktrack(al, on_no_match);
-
- __ Bind(&success);
- // Compute new value of character position after the matched part.
- __ Sub(current_input_offset().X(), current_position_address, input_end());
- if (masm_->emit_debug_code()) {
- __ Cmp(current_input_offset().X(), Operand(current_input_offset(), SXTW));
- __ Ccmp(current_input_offset(), 0, NoFlag, eq);
- // The current input offset should be <= 0, and fit in a W register.
- __ Check(le, kOffsetOutOfRange);
- }
- } else {
- DCHECK(mode_ == UC16);
- int argument_count = 4;
-
- // The cached registers need to be retained.
- CPURegList cached_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 7);
- DCHECK((cached_registers.Count() * 2) == kNumCachedRegisters);
- __ PushCPURegList(cached_registers);
-
- // Put arguments into arguments registers.
- // Parameters are
- // x0: Address byte_offset1 - Address captured substring's start.
- // x1: Address byte_offset2 - Address of current character position.
- // w2: size_t byte_length - length of capture in bytes(!)
- // x3: Isolate* isolate
-
- // Address of start of capture.
- __ Add(x0, input_end(), Operand(capture_start_offset, SXTW));
- // Length of capture.
- __ Mov(w2, capture_length);
- // Address of current input position.
- __ Add(x1, input_end(), Operand(current_input_offset(), SXTW));
- // Isolate.
- __ Mov(x3, ExternalReference::isolate_address(isolate()));
-
- {
- AllowExternalCallThatCantCauseGC scope(masm_);
- ExternalReference function =
- ExternalReference::re_case_insensitive_compare_uc16(isolate());
- __ CallCFunction(function, argument_count);
- }
-
- // Check if function returned non-zero for success or zero for failure.
- // x0 is one of the registers used as a cache so it must be tested before
- // the cache is restored.
- __ Cmp(x0, 0);
- __ PopCPURegList(cached_registers);
- BranchOrBacktrack(eq, on_no_match);
-
- // On success, increment position by length of capture.
- __ Add(current_input_offset(), current_input_offset(), capture_length);
- }
-
- __ Bind(&fallthrough);
-}
-
-void RegExpMacroAssemblerARM64::CheckNotBackReference(
- int start_reg,
- Label* on_no_match) {
- Label fallthrough;
-
- Register capture_start_address = x12;
- Register capture_end_address = x13;
- Register current_position_address = x14;
- Register capture_length = w15;
-
- // Find length of back-referenced capture.
- DCHECK((start_reg % 2) == 0);
- if (start_reg < kNumCachedRegisters) {
- __ Mov(x10, GetCachedRegister(start_reg));
- __ Lsr(x11, GetCachedRegister(start_reg), kWRegSizeInBits);
- } else {
- __ Ldp(w11, w10, capture_location(start_reg, x10));
- }
- __ Sub(capture_length, w11, w10); // Length to check.
- // Succeed on empty capture (including no capture).
- __ Cbz(capture_length, &fallthrough);
-
- // Check that there are enough characters left in the input.
- __ Cmn(capture_length, current_input_offset());
- BranchOrBacktrack(gt, on_no_match);
-
- // Compute pointers to match string and capture string
- __ Add(capture_start_address, input_end(), Operand(w10, SXTW));
- __ Add(capture_end_address,
- capture_start_address,
- Operand(capture_length, SXTW));
- __ Add(current_position_address,
- input_end(),
- Operand(current_input_offset(), SXTW));
-
- Label loop;
- __ Bind(&loop);
- if (mode_ == LATIN1) {
- __ Ldrb(w10, MemOperand(capture_start_address, 1, PostIndex));
- __ Ldrb(w11, MemOperand(current_position_address, 1, PostIndex));
- } else {
- DCHECK(mode_ == UC16);
- __ Ldrh(w10, MemOperand(capture_start_address, 2, PostIndex));
- __ Ldrh(w11, MemOperand(current_position_address, 2, PostIndex));
- }
- __ Cmp(w10, w11);
- BranchOrBacktrack(ne, on_no_match);
- __ Cmp(capture_start_address, capture_end_address);
- __ B(lt, &loop);
-
- // Move current character position to position after match.
- __ Sub(current_input_offset().X(), current_position_address, input_end());
- if (masm_->emit_debug_code()) {
- __ Cmp(current_input_offset().X(), Operand(current_input_offset(), SXTW));
- __ Ccmp(current_input_offset(), 0, NoFlag, eq);
- // The current input offset should be <= 0, and fit in a W register.
- __ Check(le, kOffsetOutOfRange);
- }
- __ Bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerARM64::CheckNotCharacter(unsigned c,
- Label* on_not_equal) {
- CompareAndBranchOrBacktrack(current_character(), c, ne, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerARM64::CheckCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_equal) {
- __ And(w10, current_character(), mask);
- CompareAndBranchOrBacktrack(w10, c, eq, on_equal);
-}
-
-
-void RegExpMacroAssemblerARM64::CheckNotCharacterAfterAnd(unsigned c,
- unsigned mask,
- Label* on_not_equal) {
- __ And(w10, current_character(), mask);
- CompareAndBranchOrBacktrack(w10, c, ne, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerARM64::CheckNotCharacterAfterMinusAnd(
- uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal) {
- DCHECK(minus < String::kMaxUtf16CodeUnit);
- __ Sub(w10, current_character(), minus);
- __ And(w10, w10, mask);
- CompareAndBranchOrBacktrack(w10, c, ne, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerARM64::CheckCharacterInRange(
- uc16 from,
- uc16 to,
- Label* on_in_range) {
- __ Sub(w10, current_character(), from);
- // Unsigned lower-or-same condition.
- CompareAndBranchOrBacktrack(w10, to - from, ls, on_in_range);
-}
-
-
-void RegExpMacroAssemblerARM64::CheckCharacterNotInRange(
- uc16 from,
- uc16 to,
- Label* on_not_in_range) {
- __ Sub(w10, current_character(), from);
- // Unsigned higher condition.
- CompareAndBranchOrBacktrack(w10, to - from, hi, on_not_in_range);
-}
-
-
-void RegExpMacroAssemblerARM64::CheckBitInTable(
- Handle<ByteArray> table,
- Label* on_bit_set) {
- __ Mov(x11, Operand(table));
- if ((mode_ != LATIN1) || (kTableMask != String::kMaxOneByteCharCode)) {
- __ And(w10, current_character(), kTableMask);
- __ Add(w10, w10, ByteArray::kHeaderSize - kHeapObjectTag);
- } else {
- __ Add(w10, current_character(), ByteArray::kHeaderSize - kHeapObjectTag);
- }
- __ Ldrb(w11, MemOperand(x11, w10, UXTW));
- CompareAndBranchOrBacktrack(w11, 0, ne, on_bit_set);
-}
-
-
-bool RegExpMacroAssemblerARM64::CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match) {
- // Range checks (c in min..max) are generally implemented by an unsigned
- // (c - min) <= (max - min) check
- switch (type) {
- case 's':
- // Match space-characters
- if (mode_ == LATIN1) {
- // One byte space characters are '\t'..'\r', ' ' and \u00a0.
- Label success;
- // Check for ' ' or 0x00a0.
- __ Cmp(current_character(), ' ');
- __ Ccmp(current_character(), 0x00a0, ZFlag, ne);
- __ B(eq, &success);
- // Check range 0x09..0x0d.
- __ Sub(w10, current_character(), '\t');
- CompareAndBranchOrBacktrack(w10, '\r' - '\t', hi, on_no_match);
- __ Bind(&success);
- return true;
- }
- return false;
- case 'S':
- // The emitted code for generic character classes is good enough.
- return false;
- case 'd':
- // Match ASCII digits ('0'..'9').
- __ Sub(w10, current_character(), '0');
- CompareAndBranchOrBacktrack(w10, '9' - '0', hi, on_no_match);
- return true;
- case 'D':
- // Match ASCII non-digits.
- __ Sub(w10, current_character(), '0');
- CompareAndBranchOrBacktrack(w10, '9' - '0', ls, on_no_match);
- return true;
- case '.': {
- // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
- // Here we emit the conditional branch only once at the end to make branch
- // prediction more efficient, even though we could branch out of here
- // as soon as a character matches.
- __ Cmp(current_character(), 0x0a);
- __ Ccmp(current_character(), 0x0d, ZFlag, ne);
- if (mode_ == UC16) {
- __ Sub(w10, current_character(), 0x2028);
- // If the Z flag was set we clear the flags to force a branch.
- __ Ccmp(w10, 0x2029 - 0x2028, NoFlag, ne);
- // ls -> !((C==1) && (Z==0))
- BranchOrBacktrack(ls, on_no_match);
- } else {
- BranchOrBacktrack(eq, on_no_match);
- }
- return true;
- }
- case 'n': {
- // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
- // We have to check all 4 newline characters before emitting
- // the conditional branch.
- __ Cmp(current_character(), 0x0a);
- __ Ccmp(current_character(), 0x0d, ZFlag, ne);
- if (mode_ == UC16) {
- __ Sub(w10, current_character(), 0x2028);
- // If the Z flag was set we clear the flags to force a fall-through.
- __ Ccmp(w10, 0x2029 - 0x2028, NoFlag, ne);
- // hi -> (C==1) && (Z==0)
- BranchOrBacktrack(hi, on_no_match);
- } else {
- BranchOrBacktrack(ne, on_no_match);
- }
- return true;
- }
- case 'w': {
- if (mode_ != LATIN1) {
- // Table is 256 entries, so all Latin1 characters can be tested.
- CompareAndBranchOrBacktrack(current_character(), 'z', hi, on_no_match);
- }
- ExternalReference map = ExternalReference::re_word_character_map();
- __ Mov(x10, map);
- __ Ldrb(w10, MemOperand(x10, current_character(), UXTW));
- CompareAndBranchOrBacktrack(w10, 0, eq, on_no_match);
- return true;
- }
- case 'W': {
- Label done;
- if (mode_ != LATIN1) {
- // Table is 256 entries, so all Latin1 characters can be tested.
- __ Cmp(current_character(), 'z');
- __ B(hi, &done);
- }
- ExternalReference map = ExternalReference::re_word_character_map();
- __ Mov(x10, map);
- __ Ldrb(w10, MemOperand(x10, current_character(), UXTW));
- CompareAndBranchOrBacktrack(w10, 0, ne, on_no_match);
- __ Bind(&done);
- return true;
- }
- case '*':
- // Match any character.
- return true;
- // No custom implementation (yet): s(UC16), S(UC16).
- default:
- return false;
- }
-}
-
-
-void RegExpMacroAssemblerARM64::Fail() {
- __ Mov(w0, FAILURE);
- __ B(&exit_label_);
-}
-
-
-Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
- Label return_w0;
- // Finalize code - write the entry point code now we know how many
- // registers we need.
-
- // Entry code:
- __ Bind(&entry_label_);
-
- // Arguments on entry:
- // x0: String* input
- // x1: int start_offset
- // x2: byte* input_start
- // x3: byte* input_end
- // x4: int* output array
- // x5: int output array size
- // x6: Address stack_base
- // x7: int direct_call
-
- // The stack pointer should be csp on entry.
- // csp[8]: address of the current isolate
- // csp[0]: secondary link/return address used by native call
-
- // Tell the system that we have a stack frame. Because the type is MANUAL, no
- // code is generated.
- FrameScope scope(masm_, StackFrame::MANUAL);
-
- // Push registers on the stack, only push the argument registers that we need.
- CPURegList argument_registers(x0, x5, x6, x7);
-
- CPURegList registers_to_retain = kCalleeSaved;
- DCHECK(kCalleeSaved.Count() == 11);
- registers_to_retain.Combine(lr);
-
- DCHECK(csp.Is(__ StackPointer()));
- __ PushCPURegList(registers_to_retain);
- __ PushCPURegList(argument_registers);
-
- // Set frame pointer in place.
- __ Add(frame_pointer(), csp, argument_registers.Count() * kPointerSize);
-
- // Initialize callee-saved registers.
- __ Mov(start_offset(), w1);
- __ Mov(input_start(), x2);
- __ Mov(input_end(), x3);
- __ Mov(output_array(), x4);
-
- // Set the number of registers we will need to allocate, that is:
- // - success_counter (X register)
- // - (num_registers_ - kNumCachedRegisters) (W registers)
- int num_wreg_to_allocate = num_registers_ - kNumCachedRegisters;
- // Do not allocate registers on the stack if they can all be cached.
- if (num_wreg_to_allocate < 0) { num_wreg_to_allocate = 0; }
- // Make room for the success_counter.
- num_wreg_to_allocate += 2;
-
- // Make sure the stack alignment will be respected.
- int alignment = masm_->ActivationFrameAlignment();
- DCHECK_EQ(alignment % 16, 0);
- int align_mask = (alignment / kWRegSize) - 1;
- num_wreg_to_allocate = (num_wreg_to_allocate + align_mask) & ~align_mask;
-
- // Check if we have space on the stack.
- Label stack_limit_hit;
- Label stack_ok;
-
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ Mov(x10, stack_limit);
- __ Ldr(x10, MemOperand(x10));
- __ Subs(x10, csp, x10);
-
- // Handle it if the stack pointer is already below the stack limit.
- __ B(ls, &stack_limit_hit);
-
- // Check if there is room for the variable number of registers above
- // the stack limit.
- __ Cmp(x10, num_wreg_to_allocate * kWRegSize);
- __ B(hs, &stack_ok);
-
- // Exit with OutOfMemory exception. There is not enough space on the stack
- // for our working registers.
- __ Mov(w0, EXCEPTION);
- __ B(&return_w0);
-
- __ Bind(&stack_limit_hit);
- CallCheckStackGuardState(x10);
- // If returned value is non-zero, we exit with the returned value as result.
- __ Cbnz(w0, &return_w0);
-
- __ Bind(&stack_ok);
-
- // Allocate space on stack.
- __ Claim(num_wreg_to_allocate, kWRegSize);
-
- // Initialize success_counter with 0.
- __ Str(wzr, MemOperand(frame_pointer(), kSuccessCounter));
-
- // Find negative length (offset of start relative to end).
- __ Sub(x10, input_start(), input_end());
- if (masm_->emit_debug_code()) {
- // Check that the input string length is < 2^30.
- __ Neg(x11, x10);
- __ Cmp(x11, (1<<30) - 1);
- __ Check(ls, kInputStringTooLong);
- }
- __ Mov(current_input_offset(), w10);
-
- // The non-position value is used as a clearing value for the
- // capture registers, it corresponds to the position of the first character
- // minus one.
- __ Sub(non_position_value(), current_input_offset(), char_size());
- __ Sub(non_position_value(), non_position_value(),
- Operand(start_offset(), LSL, (mode_ == UC16) ? 1 : 0));
- // We can store this value twice in an X register for initializing
- // on-stack registers later.
- __ Orr(twice_non_position_value(),
- non_position_value().X(),
- Operand(non_position_value().X(), LSL, kWRegSizeInBits));
-
- // Initialize code pointer register.
- __ Mov(code_pointer(), Operand(masm_->CodeObject()));
-
- Label load_char_start_regexp, start_regexp;
- // Load newline if index is at start, previous character otherwise.
- __ Cbnz(start_offset(), &load_char_start_regexp);
- __ Mov(current_character(), '\n');
- __ B(&start_regexp);
-
- // Global regexp restarts matching here.
- __ Bind(&load_char_start_regexp);
- // Load previous char as initial value of current character register.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ Bind(&start_regexp);
- // Initialize on-stack registers.
- if (num_saved_registers_ > 0) {
- ClearRegisters(0, num_saved_registers_ - 1);
- }
-
- // Initialize backtrack stack pointer.
- __ Ldr(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackBase));
-
- // Execute
- __ B(&start_label_);
-
- if (backtrack_label_.is_linked()) {
- __ Bind(&backtrack_label_);
- Backtrack();
- }
-
- if (success_label_.is_linked()) {
- Register first_capture_start = w15;
-
- // Save captures when successful.
- __ Bind(&success_label_);
-
- if (num_saved_registers_ > 0) {
- // V8 expects the output to be an int32_t array.
- Register capture_start = w12;
- Register capture_end = w13;
- Register input_length = w14;
-
- // Copy captures to output.
-
- // Get string length.
- __ Sub(x10, input_end(), input_start());
- if (masm_->emit_debug_code()) {
- // Check that the input string length is < 2^30.
- __ Cmp(x10, (1<<30) - 1);
- __ Check(ls, kInputStringTooLong);
- }
- // input_start has a start_offset offset on entry. We need to include
- // it when computing the length of the whole string.
- if (mode_ == UC16) {
- __ Add(input_length, start_offset(), Operand(w10, LSR, 1));
- } else {
- __ Add(input_length, start_offset(), w10);
- }
-
- // Copy the results to the output array from the cached registers first.
- for (int i = 0;
- (i < num_saved_registers_) && (i < kNumCachedRegisters);
- i += 2) {
- __ Mov(capture_start.X(), GetCachedRegister(i));
- __ Lsr(capture_end.X(), capture_start.X(), kWRegSizeInBits);
- if ((i == 0) && global_with_zero_length_check()) {
- // Keep capture start for the zero-length check later.
- __ Mov(first_capture_start, capture_start);
- }
- // Offsets need to be relative to the start of the string.
- if (mode_ == UC16) {
- __ Add(capture_start, input_length, Operand(capture_start, ASR, 1));
- __ Add(capture_end, input_length, Operand(capture_end, ASR, 1));
- } else {
- __ Add(capture_start, input_length, capture_start);
- __ Add(capture_end, input_length, capture_end);
- }
- // The output pointer advances for a possible global match.
- __ Stp(capture_start,
- capture_end,
- MemOperand(output_array(), kPointerSize, PostIndex));
- }
-
- // Only carry on if there are more than kNumCachedRegisters capture
- // registers.
- int num_registers_left_on_stack =
- num_saved_registers_ - kNumCachedRegisters;
- if (num_registers_left_on_stack > 0) {
- Register base = x10;
- // There are always an even number of capture registers. A couple of
- // registers determine one match with two offsets.
- DCHECK_EQ(0, num_registers_left_on_stack % 2);
- __ Add(base, frame_pointer(), kFirstCaptureOnStack);
-
- // We can unroll the loop here, we should not unroll for less than 2
- // registers.
- STATIC_ASSERT(kNumRegistersToUnroll > 2);
- if (num_registers_left_on_stack <= kNumRegistersToUnroll) {
- for (int i = 0; i < num_registers_left_on_stack / 2; i++) {
- __ Ldp(capture_end,
- capture_start,
- MemOperand(base, -kPointerSize, PostIndex));
- if ((i == 0) && global_with_zero_length_check()) {
- // Keep capture start for the zero-length check later.
- __ Mov(first_capture_start, capture_start);
- }
- // Offsets need to be relative to the start of the string.
- if (mode_ == UC16) {
- __ Add(capture_start,
- input_length,
- Operand(capture_start, ASR, 1));
- __ Add(capture_end, input_length, Operand(capture_end, ASR, 1));
- } else {
- __ Add(capture_start, input_length, capture_start);
- __ Add(capture_end, input_length, capture_end);
- }
- // The output pointer advances for a possible global match.
- __ Stp(capture_start,
- capture_end,
- MemOperand(output_array(), kPointerSize, PostIndex));
- }
- } else {
- Label loop, start;
- __ Mov(x11, num_registers_left_on_stack);
-
- __ Ldp(capture_end,
- capture_start,
- MemOperand(base, -kPointerSize, PostIndex));
- if (global_with_zero_length_check()) {
- __ Mov(first_capture_start, capture_start);
- }
- __ B(&start);
-
- __ Bind(&loop);
- __ Ldp(capture_end,
- capture_start,
- MemOperand(base, -kPointerSize, PostIndex));
- __ Bind(&start);
- if (mode_ == UC16) {
- __ Add(capture_start, input_length, Operand(capture_start, ASR, 1));
- __ Add(capture_end, input_length, Operand(capture_end, ASR, 1));
- } else {
- __ Add(capture_start, input_length, capture_start);
- __ Add(capture_end, input_length, capture_end);
- }
- // The output pointer advances for a possible global match.
- __ Stp(capture_start,
- capture_end,
- MemOperand(output_array(), kPointerSize, PostIndex));
- __ Sub(x11, x11, 2);
- __ Cbnz(x11, &loop);
- }
- }
- }
-
- if (global()) {
- Register success_counter = w0;
- Register output_size = x10;
- // Restart matching if the regular expression is flagged as global.
-
- // Increment success counter.
- __ Ldr(success_counter, MemOperand(frame_pointer(), kSuccessCounter));
- __ Add(success_counter, success_counter, 1);
- __ Str(success_counter, MemOperand(frame_pointer(), kSuccessCounter));
-
- // Capture results have been stored, so the number of remaining global
- // output registers is reduced by the number of stored captures.
- __ Ldr(output_size, MemOperand(frame_pointer(), kOutputSize));
- __ Sub(output_size, output_size, num_saved_registers_);
- // Check whether we have enough room for another set of capture results.
- __ Cmp(output_size, num_saved_registers_);
- __ B(lt, &return_w0);
-
- // The output pointer is already set to the next field in the output
- // array.
- // Update output size on the frame before we restart matching.
- __ Str(output_size, MemOperand(frame_pointer(), kOutputSize));
-
- if (global_with_zero_length_check()) {
- // Special case for zero-length matches.
- __ Cmp(current_input_offset(), first_capture_start);
- // Not a zero-length match, restart.
- __ B(ne, &load_char_start_regexp);
- // Offset from the end is zero if we already reached the end.
- __ Cbz(current_input_offset(), &return_w0);
- // Advance current position after a zero-length match.
- __ Add(current_input_offset(),
- current_input_offset(),
- Operand((mode_ == UC16) ? 2 : 1));
- }
-
- __ B(&load_char_start_regexp);
- } else {
- __ Mov(w0, SUCCESS);
- }
- }
-
- if (exit_label_.is_linked()) {
- // Exit and return w0
- __ Bind(&exit_label_);
- if (global()) {
- __ Ldr(w0, MemOperand(frame_pointer(), kSuccessCounter));
- }
- }
-
- __ Bind(&return_w0);
-
- // Set stack pointer back to first register to retain
- DCHECK(csp.Is(__ StackPointer()));
- __ Mov(csp, fp);
- __ AssertStackConsistency();
-
- // Restore registers.
- __ PopCPURegList(registers_to_retain);
-
- __ Ret();
-
- Label exit_with_exception;
- // Registers x0 to x7 are used to store the first captures, they need to be
- // retained over calls to C++ code.
- CPURegList cached_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 7);
- DCHECK((cached_registers.Count() * 2) == kNumCachedRegisters);
-
- if (check_preempt_label_.is_linked()) {
- __ Bind(&check_preempt_label_);
- SaveLinkRegister();
- // The cached registers need to be retained.
- __ PushCPURegList(cached_registers);
- CallCheckStackGuardState(x10);
- // Returning from the regexp code restores the stack (csp <- fp)
- // so we don't need to drop the link register from it before exiting.
- __ Cbnz(w0, &return_w0);
- // Reset the cached registers.
- __ PopCPURegList(cached_registers);
- RestoreLinkRegister();
- __ Ret();
- }
-
- if (stack_overflow_label_.is_linked()) {
- __ Bind(&stack_overflow_label_);
- SaveLinkRegister();
- // The cached registers need to be retained.
- __ PushCPURegList(cached_registers);
- // Call GrowStack(backtrack_stackpointer(), &stack_base)
- __ Mov(x2, ExternalReference::isolate_address(isolate()));
- __ Add(x1, frame_pointer(), kStackBase);
- __ Mov(x0, backtrack_stackpointer());
- ExternalReference grow_stack =
- ExternalReference::re_grow_stack(isolate());
- __ CallCFunction(grow_stack, 3);
- // If return NULL, we have failed to grow the stack, and
- // must exit with a stack-overflow exception.
- // Returning from the regexp code restores the stack (csp <- fp)
- // so we don't need to drop the link register from it before exiting.
- __ Cbz(w0, &exit_with_exception);
- // Otherwise use return value as new stack pointer.
- __ Mov(backtrack_stackpointer(), x0);
- // Reset the cached registers.
- __ PopCPURegList(cached_registers);
- RestoreLinkRegister();
- __ Ret();
- }
-
- if (exit_with_exception.is_linked()) {
- __ Bind(&exit_with_exception);
- __ Mov(w0, EXCEPTION);
- __ B(&return_w0);
- }
-
- CodeDesc code_desc;
- masm_->GetCode(&code_desc);
- Handle<Code> code = isolate()->factory()->NewCode(
- code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
- PROFILE(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
- return Handle<HeapObject>::cast(code);
-}
-
-
-void RegExpMacroAssemblerARM64::GoTo(Label* to) {
- BranchOrBacktrack(al, to);
-}
-
-void RegExpMacroAssemblerARM64::IfRegisterGE(int reg, int comparand,
- Label* if_ge) {
- Register to_compare = GetRegister(reg, w10);
- CompareAndBranchOrBacktrack(to_compare, comparand, ge, if_ge);
-}
-
-
-void RegExpMacroAssemblerARM64::IfRegisterLT(int reg, int comparand,
- Label* if_lt) {
- Register to_compare = GetRegister(reg, w10);
- CompareAndBranchOrBacktrack(to_compare, comparand, lt, if_lt);
-}
-
-
-void RegExpMacroAssemblerARM64::IfRegisterEqPos(int reg, Label* if_eq) {
- Register to_compare = GetRegister(reg, w10);
- __ Cmp(to_compare, current_input_offset());
- BranchOrBacktrack(eq, if_eq);
-}
-
-RegExpMacroAssembler::IrregexpImplementation
- RegExpMacroAssemblerARM64::Implementation() {
- return kARM64Implementation;
-}
-
-
-void RegExpMacroAssemblerARM64::LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds,
- int characters) {
- // TODO(pielan): Make sure long strings are caught before this, and not
- // just asserted in debug mode.
- DCHECK(cp_offset >= -1); // ^ and \b can look behind one character.
- // Be sane! (And ensure that an int32_t can be used to index the string)
- DCHECK(cp_offset < (1<<30));
- if (check_bounds) {
- CheckPosition(cp_offset + characters - 1, on_end_of_input);
- }
- LoadCurrentCharacterUnchecked(cp_offset, characters);
-}
-
-
-void RegExpMacroAssemblerARM64::PopCurrentPosition() {
- Pop(current_input_offset());
-}
-
-
-void RegExpMacroAssemblerARM64::PopRegister(int register_index) {
- Pop(w10);
- StoreRegister(register_index, w10);
-}
-
-
-void RegExpMacroAssemblerARM64::PushBacktrack(Label* label) {
- if (label->is_bound()) {
- int target = label->pos();
- __ Mov(w10, target + Code::kHeaderSize - kHeapObjectTag);
- } else {
- __ Adr(x10, label, MacroAssembler::kAdrFar);
- __ Sub(x10, x10, code_pointer());
- if (masm_->emit_debug_code()) {
- __ Cmp(x10, kWRegMask);
- // The code offset has to fit in a W register.
- __ Check(ls, kOffsetOutOfRange);
- }
- }
- Push(w10);
- CheckStackLimit();
-}
-
-
-void RegExpMacroAssemblerARM64::PushCurrentPosition() {
- Push(current_input_offset());
-}
-
-
-void RegExpMacroAssemblerARM64::PushRegister(int register_index,
- StackCheckFlag check_stack_limit) {
- Register to_push = GetRegister(register_index, w10);
- Push(to_push);
- if (check_stack_limit) CheckStackLimit();
-}
-
-
-void RegExpMacroAssemblerARM64::ReadCurrentPositionFromRegister(int reg) {
- Register cached_register;
- RegisterState register_state = GetRegisterState(reg);
- switch (register_state) {
- case STACKED:
- __ Ldr(current_input_offset(), register_location(reg));
- break;
- case CACHED_LSW:
- cached_register = GetCachedRegister(reg);
- __ Mov(current_input_offset(), cached_register.W());
- break;
- case CACHED_MSW:
- cached_register = GetCachedRegister(reg);
- __ Lsr(current_input_offset().X(), cached_register, kWRegSizeInBits);
- break;
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void RegExpMacroAssemblerARM64::ReadStackPointerFromRegister(int reg) {
- Register read_from = GetRegister(reg, w10);
- __ Ldr(x11, MemOperand(frame_pointer(), kStackBase));
- __ Add(backtrack_stackpointer(), x11, Operand(read_from, SXTW));
-}
-
-
-void RegExpMacroAssemblerARM64::SetCurrentPositionFromEnd(int by) {
- Label after_position;
- __ Cmp(current_input_offset(), -by * char_size());
- __ B(ge, &after_position);
- __ Mov(current_input_offset(), -by * char_size());
- // On RegExp code entry (where this operation is used), the character before
- // the current position is expected to be already loaded.
- // We have advanced the position, so it's safe to read backwards.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ Bind(&after_position);
-}
-
-
-void RegExpMacroAssemblerARM64::SetRegister(int register_index, int to) {
- DCHECK(register_index >= num_saved_registers_); // Reserved for positions!
- Register set_to = wzr;
- if (to != 0) {
- set_to = w10;
- __ Mov(set_to, to);
- }
- StoreRegister(register_index, set_to);
-}
-
-
-bool RegExpMacroAssemblerARM64::Succeed() {
- __ B(&success_label_);
- return global();
-}
-
-
-void RegExpMacroAssemblerARM64::WriteCurrentPositionToRegister(int reg,
- int cp_offset) {
- Register position = current_input_offset();
- if (cp_offset != 0) {
- position = w10;
- __ Add(position, current_input_offset(), cp_offset * char_size());
- }
- StoreRegister(reg, position);
-}
-
-
-void RegExpMacroAssemblerARM64::ClearRegisters(int reg_from, int reg_to) {
- DCHECK(reg_from <= reg_to);
- int num_registers = reg_to - reg_from + 1;
-
- // If the first capture register is cached in a hardware register but not
- // aligned on a 64-bit one, we need to clear the first one specifically.
- if ((reg_from < kNumCachedRegisters) && ((reg_from % 2) != 0)) {
- StoreRegister(reg_from, non_position_value());
- num_registers--;
- reg_from++;
- }
-
- // Clear cached registers in pairs as far as possible.
- while ((num_registers >= 2) && (reg_from < kNumCachedRegisters)) {
- DCHECK(GetRegisterState(reg_from) == CACHED_LSW);
- __ Mov(GetCachedRegister(reg_from), twice_non_position_value());
- reg_from += 2;
- num_registers -= 2;
- }
-
- if ((num_registers % 2) == 1) {
- StoreRegister(reg_from, non_position_value());
- num_registers--;
- reg_from++;
- }
-
- if (num_registers > 0) {
- // If there are some remaining registers, they are stored on the stack.
- DCHECK(reg_from >= kNumCachedRegisters);
-
- // Move down the indexes of the registers on stack to get the correct offset
- // in memory.
- reg_from -= kNumCachedRegisters;
- reg_to -= kNumCachedRegisters;
- // We should not unroll the loop for less than 2 registers.
- STATIC_ASSERT(kNumRegistersToUnroll > 2);
- // We position the base pointer to (reg_from + 1).
- int base_offset = kFirstRegisterOnStack -
- kWRegSize - (kWRegSize * reg_from);
- if (num_registers > kNumRegistersToUnroll) {
- Register base = x10;
- __ Add(base, frame_pointer(), base_offset);
-
- Label loop;
- __ Mov(x11, num_registers);
- __ Bind(&loop);
- __ Str(twice_non_position_value(),
- MemOperand(base, -kPointerSize, PostIndex));
- __ Sub(x11, x11, 2);
- __ Cbnz(x11, &loop);
- } else {
- for (int i = reg_from; i <= reg_to; i += 2) {
- __ Str(twice_non_position_value(),
- MemOperand(frame_pointer(), base_offset));
- base_offset -= kWRegSize * 2;
- }
- }
- }
-}
-
-
-void RegExpMacroAssemblerARM64::WriteStackPointerToRegister(int reg) {
- __ Ldr(x10, MemOperand(frame_pointer(), kStackBase));
- __ Sub(x10, backtrack_stackpointer(), x10);
- if (masm_->emit_debug_code()) {
- __ Cmp(x10, Operand(w10, SXTW));
- // The stack offset needs to fit in a W register.
- __ Check(eq, kOffsetOutOfRange);
- }
- StoreRegister(reg, w10);
-}
-
-
-// Helper function for reading a value out of a stack frame.
-template <typename T>
-static T& frame_entry(Address re_frame, int frame_offset) {
- return *reinterpret_cast<T*>(re_frame + frame_offset);
-}
-
-
-template <typename T>
-static T* frame_entry_address(Address re_frame, int frame_offset) {
- return reinterpret_cast<T*>(re_frame + frame_offset);
-}
-
-
-int RegExpMacroAssemblerARM64::CheckStackGuardState(
- Address* return_address, Code* re_code, Address re_frame, int start_index,
- const byte** input_start, const byte** input_end) {
- return NativeRegExpMacroAssembler::CheckStackGuardState(
- frame_entry<Isolate*>(re_frame, kIsolate), start_index,
- frame_entry<int>(re_frame, kDirectCall) == 1, return_address, re_code,
- frame_entry_address<String*>(re_frame, kInput), input_start, input_end);
-}
-
-
-void RegExpMacroAssemblerARM64::CheckPosition(int cp_offset,
- Label* on_outside_input) {
- CompareAndBranchOrBacktrack(current_input_offset(),
- -cp_offset * char_size(),
- ge,
- on_outside_input);
-}
-
-
-bool RegExpMacroAssemblerARM64::CanReadUnaligned() {
- // TODO(pielan): See whether or not we should disable unaligned accesses.
- return !slow_safe();
-}
-
-
-// Private methods:
-
-void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) {
- // Allocate space on the stack to store the return address. The
- // CheckStackGuardState C++ function will override it if the code
- // moved. Allocate extra space for 2 arguments passed by pointers.
- // AAPCS64 requires the stack to be 16 byte aligned.
- int alignment = masm_->ActivationFrameAlignment();
- DCHECK_EQ(alignment % 16, 0);
- int align_mask = (alignment / kXRegSize) - 1;
- int xreg_to_claim = (3 + align_mask) & ~align_mask;
-
- DCHECK(csp.Is(__ StackPointer()));
- __ Claim(xreg_to_claim);
-
- // CheckStackGuardState needs the end and start addresses of the input string.
- __ Poke(input_end(), 2 * kPointerSize);
- __ Add(x5, csp, 2 * kPointerSize);
- __ Poke(input_start(), kPointerSize);
- __ Add(x4, csp, kPointerSize);
-
- __ Mov(w3, start_offset());
- // RegExp code frame pointer.
- __ Mov(x2, frame_pointer());
- // Code* of self.
- __ Mov(x1, Operand(masm_->CodeObject()));
-
- // We need to pass a pointer to the return address as first argument.
- // The DirectCEntry stub will place the return address on the stack before
- // calling so the stack pointer will point to it.
- __ Mov(x0, csp);
-
- ExternalReference check_stack_guard_state =
- ExternalReference::re_check_stack_guard_state(isolate());
- __ Mov(scratch, check_stack_guard_state);
- DirectCEntryStub stub(isolate());
- stub.GenerateCall(masm_, scratch);
-
- // The input string may have been moved in memory, we need to reload it.
- __ Peek(input_start(), kPointerSize);
- __ Peek(input_end(), 2 * kPointerSize);
-
- DCHECK(csp.Is(__ StackPointer()));
- __ Drop(xreg_to_claim);
-
- // Reload the Code pointer.
- __ Mov(code_pointer(), Operand(masm_->CodeObject()));
-}
-
-void RegExpMacroAssemblerARM64::BranchOrBacktrack(Condition condition,
- Label* to) {
- if (condition == al) { // Unconditional.
- if (to == NULL) {
- Backtrack();
- return;
- }
- __ B(to);
- return;
- }
- if (to == NULL) {
- to = &backtrack_label_;
- }
- __ B(condition, to);
-}
-
-void RegExpMacroAssemblerARM64::CompareAndBranchOrBacktrack(Register reg,
- int immediate,
- Condition condition,
- Label* to) {
- if ((immediate == 0) && ((condition == eq) || (condition == ne))) {
- if (to == NULL) {
- to = &backtrack_label_;
- }
- if (condition == eq) {
- __ Cbz(reg, to);
- } else {
- __ Cbnz(reg, to);
- }
- } else {
- __ Cmp(reg, immediate);
- BranchOrBacktrack(condition, to);
- }
-}
-
-
-void RegExpMacroAssemblerARM64::CheckPreemption() {
- // Check for preemption.
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ Mov(x10, stack_limit);
- __ Ldr(x10, MemOperand(x10));
- DCHECK(csp.Is(__ StackPointer()));
- __ Cmp(csp, x10);
- CallIf(&check_preempt_label_, ls);
-}
-
-
-void RegExpMacroAssemblerARM64::CheckStackLimit() {
- ExternalReference stack_limit =
- ExternalReference::address_of_regexp_stack_limit(isolate());
- __ Mov(x10, stack_limit);
- __ Ldr(x10, MemOperand(x10));
- __ Cmp(backtrack_stackpointer(), x10);
- CallIf(&stack_overflow_label_, ls);
-}
-
-
-void RegExpMacroAssemblerARM64::Push(Register source) {
- DCHECK(source.Is32Bits());
- DCHECK(!source.is(backtrack_stackpointer()));
- __ Str(source,
- MemOperand(backtrack_stackpointer(),
- -static_cast<int>(kWRegSize),
- PreIndex));
-}
-
-
-void RegExpMacroAssemblerARM64::Pop(Register target) {
- DCHECK(target.Is32Bits());
- DCHECK(!target.is(backtrack_stackpointer()));
- __ Ldr(target,
- MemOperand(backtrack_stackpointer(), kWRegSize, PostIndex));
-}
-
-
-Register RegExpMacroAssemblerARM64::GetCachedRegister(int register_index) {
- DCHECK(register_index < kNumCachedRegisters);
- return Register::Create(register_index / 2, kXRegSizeInBits);
-}
-
-
-Register RegExpMacroAssemblerARM64::GetRegister(int register_index,
- Register maybe_result) {
- DCHECK(maybe_result.Is32Bits());
- DCHECK(register_index >= 0);
- if (num_registers_ <= register_index) {
- num_registers_ = register_index + 1;
- }
- Register result;
- RegisterState register_state = GetRegisterState(register_index);
- switch (register_state) {
- case STACKED:
- __ Ldr(maybe_result, register_location(register_index));
- result = maybe_result;
- break;
- case CACHED_LSW:
- result = GetCachedRegister(register_index).W();
- break;
- case CACHED_MSW:
- __ Lsr(maybe_result.X(), GetCachedRegister(register_index),
- kWRegSizeInBits);
- result = maybe_result;
- break;
- default:
- UNREACHABLE();
- break;
- }
- DCHECK(result.Is32Bits());
- return result;
-}
-
-
-void RegExpMacroAssemblerARM64::StoreRegister(int register_index,
- Register source) {
- DCHECK(source.Is32Bits());
- DCHECK(register_index >= 0);
- if (num_registers_ <= register_index) {
- num_registers_ = register_index + 1;
- }
-
- Register cached_register;
- RegisterState register_state = GetRegisterState(register_index);
- switch (register_state) {
- case STACKED:
- __ Str(source, register_location(register_index));
- break;
- case CACHED_LSW:
- cached_register = GetCachedRegister(register_index);
- if (!source.Is(cached_register.W())) {
- __ Bfi(cached_register, source.X(), 0, kWRegSizeInBits);
- }
- break;
- case CACHED_MSW:
- cached_register = GetCachedRegister(register_index);
- __ Bfi(cached_register, source.X(), kWRegSizeInBits, kWRegSizeInBits);
- break;
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void RegExpMacroAssemblerARM64::CallIf(Label* to, Condition condition) {
- Label skip_call;
- if (condition != al) __ B(&skip_call, NegateCondition(condition));
- __ Bl(to);
- __ Bind(&skip_call);
-}
-
-
-void RegExpMacroAssemblerARM64::RestoreLinkRegister() {
- DCHECK(csp.Is(__ StackPointer()));
- __ Pop(lr, xzr);
- __ Add(lr, lr, Operand(masm_->CodeObject()));
-}
-
-
-void RegExpMacroAssemblerARM64::SaveLinkRegister() {
- DCHECK(csp.Is(__ StackPointer()));
- __ Sub(lr, lr, Operand(masm_->CodeObject()));
- __ Push(xzr, lr);
-}
-
-
-MemOperand RegExpMacroAssemblerARM64::register_location(int register_index) {
- DCHECK(register_index < (1<<30));
- DCHECK(register_index >= kNumCachedRegisters);
- if (num_registers_ <= register_index) {
- num_registers_ = register_index + 1;
- }
- register_index -= kNumCachedRegisters;
- int offset = kFirstRegisterOnStack - register_index * kWRegSize;
- return MemOperand(frame_pointer(), offset);
-}
-
-MemOperand RegExpMacroAssemblerARM64::capture_location(int register_index,
- Register scratch) {
- DCHECK(register_index < (1<<30));
- DCHECK(register_index < num_saved_registers_);
- DCHECK(register_index >= kNumCachedRegisters);
- DCHECK_EQ(register_index % 2, 0);
- register_index -= kNumCachedRegisters;
- int offset = kFirstCaptureOnStack - register_index * kWRegSize;
- // capture_location is used with Stp instructions to load/store 2 registers.
- // The immediate field in the encoding is limited to 7 bits (signed).
- if (is_int7(offset)) {
- return MemOperand(frame_pointer(), offset);
- } else {
- __ Add(scratch, frame_pointer(), offset);
- return MemOperand(scratch);
- }
-}
-
-void RegExpMacroAssemblerARM64::LoadCurrentCharacterUnchecked(int cp_offset,
- int characters) {
- Register offset = current_input_offset();
-
- // The ldr, str, ldrh, strh instructions can do unaligned accesses, if the CPU
- // and the operating system running on the target allow it.
- // If unaligned load/stores are not supported then this function must only
- // be used to load a single character at a time.
-
- // ARMv8 supports unaligned accesses but V8 or the kernel can decide to
- // disable it.
- // TODO(pielan): See whether or not we should disable unaligned accesses.
- if (!CanReadUnaligned()) {
- DCHECK(characters == 1);
- }
-
- if (cp_offset != 0) {
- if (masm_->emit_debug_code()) {
- __ Mov(x10, cp_offset * char_size());
- __ Add(x10, x10, Operand(current_input_offset(), SXTW));
- __ Cmp(x10, Operand(w10, SXTW));
- // The offset needs to fit in a W register.
- __ Check(eq, kOffsetOutOfRange);
- } else {
- __ Add(w10, current_input_offset(), cp_offset * char_size());
- }
- offset = w10;
- }
-
- if (mode_ == LATIN1) {
- if (characters == 4) {
- __ Ldr(current_character(), MemOperand(input_end(), offset, SXTW));
- } else if (characters == 2) {
- __ Ldrh(current_character(), MemOperand(input_end(), offset, SXTW));
- } else {
- DCHECK(characters == 1);
- __ Ldrb(current_character(), MemOperand(input_end(), offset, SXTW));
- }
- } else {
- DCHECK(mode_ == UC16);
- if (characters == 2) {
- __ Ldr(current_character(), MemOperand(input_end(), offset, SXTW));
- } else {
- DCHECK(characters == 1);
- __ Ldrh(current_character(), MemOperand(input_end(), offset, SXTW));
- }
- }
-}
-
-#endif // V8_INTERPRETED_REGEXP
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/regexp-macro-assembler-arm64.h b/deps/v8/src/arm64/regexp-macro-assembler-arm64.h
deleted file mode 100644
index ae4393f7ac..0000000000
--- a/deps/v8/src/arm64/regexp-macro-assembler-arm64.h
+++ /dev/null
@@ -1,295 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_
-#define V8_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_
-
-#include "src/macro-assembler.h"
-
-#include "src/arm64/assembler-arm64.h"
-#include "src/arm64/assembler-arm64-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-#ifndef V8_INTERPRETED_REGEXP
-class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
- public:
- RegExpMacroAssemblerARM64(Isolate* isolate, Zone* zone, Mode mode,
- int registers_to_save);
- virtual ~RegExpMacroAssemblerARM64();
- virtual void AbortedCodeGeneration() { masm_->AbortedCodeGeneration(); }
- virtual int stack_limit_slack();
- virtual void AdvanceCurrentPosition(int by);
- virtual void AdvanceRegister(int reg, int by);
- virtual void Backtrack();
- virtual void Bind(Label* label);
- virtual void CheckAtStart(Label* on_at_start);
- virtual void CheckCharacter(unsigned c, Label* on_equal);
- virtual void CheckCharacterAfterAnd(unsigned c,
- unsigned mask,
- Label* on_equal);
- virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
- virtual void CheckCharacterLT(uc16 limit, Label* on_less);
- virtual void CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string);
- // A "greedy loop" is a loop that is both greedy and with a simple
- // body. It has a particularly simple implementation.
- virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
- virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- Label* on_no_match);
- virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
- virtual void CheckNotCharacterAfterAnd(unsigned c,
- unsigned mask,
- Label* on_not_equal);
- virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal);
- virtual void CheckCharacterInRange(uc16 from,
- uc16 to,
- Label* on_in_range);
- virtual void CheckCharacterNotInRange(uc16 from,
- uc16 to,
- Label* on_not_in_range);
- virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
-
- // Checks whether the given offset from the current position is before
- // the end of the string.
- virtual void CheckPosition(int cp_offset, Label* on_outside_input);
- virtual bool CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match);
- virtual void Fail();
- virtual Handle<HeapObject> GetCode(Handle<String> source);
- virtual void GoTo(Label* label);
- virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
- virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
- virtual void IfRegisterEqPos(int reg, Label* if_eq);
- virtual IrregexpImplementation Implementation();
- virtual void LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds = true,
- int characters = 1);
- virtual void PopCurrentPosition();
- virtual void PopRegister(int register_index);
- virtual void PushBacktrack(Label* label);
- virtual void PushCurrentPosition();
- virtual void PushRegister(int register_index,
- StackCheckFlag check_stack_limit);
- virtual void ReadCurrentPositionFromRegister(int reg);
- virtual void ReadStackPointerFromRegister(int reg);
- virtual void SetCurrentPositionFromEnd(int by);
- virtual void SetRegister(int register_index, int to);
- virtual bool Succeed();
- virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
- virtual void ClearRegisters(int reg_from, int reg_to);
- virtual void WriteStackPointerToRegister(int reg);
- virtual bool CanReadUnaligned();
-
- // Called from RegExp if the stack-guard is triggered.
- // If the code object is relocated, the return address is fixed before
- // returning.
- static int CheckStackGuardState(Address* return_address,
- Code* re_code,
- Address re_frame,
- int start_offset,
- const byte** input_start,
- const byte** input_end);
-
- private:
- // Above the frame pointer - Stored registers and stack passed parameters.
- // Callee-saved registers x19-x29, where x29 is the old frame pointer.
- static const int kCalleeSavedRegisters = 0;
- // Return address.
- // It is placed above the 11 callee-saved registers.
- static const int kReturnAddress = kCalleeSavedRegisters + 11 * kPointerSize;
- static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
- // Stack parameter placed by caller.
- static const int kIsolate = kSecondaryReturnAddress + kPointerSize;
-
- // Below the frame pointer.
- // Register parameters stored by setup code.
- static const int kDirectCall = kCalleeSavedRegisters - kPointerSize;
- static const int kStackBase = kDirectCall - kPointerSize;
- static const int kOutputSize = kStackBase - kPointerSize;
- static const int kInput = kOutputSize - kPointerSize;
- // When adding local variables remember to push space for them in
- // the frame in GetCode.
- static const int kSuccessCounter = kInput - kPointerSize;
- // First position register address on the stack. Following positions are
- // below it. A position is a 32 bit value.
- static const int kFirstRegisterOnStack = kSuccessCounter - kWRegSize;
- // A capture is a 64 bit value holding two position.
- static const int kFirstCaptureOnStack = kSuccessCounter - kXRegSize;
-
- // Initial size of code buffer.
- static const size_t kRegExpCodeSize = 1024;
-
- // When initializing registers to a non-position value we can unroll
- // the loop. Set the limit of registers to unroll.
- static const int kNumRegistersToUnroll = 16;
-
- // We are using x0 to x7 as a register cache. Each hardware register must
- // contain one capture, that is two 32 bit registers. We can cache at most
- // 16 registers.
- static const int kNumCachedRegisters = 16;
-
- // Load a number of characters at the given offset from the
- // current position, into the current-character register.
- void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
-
- // Check whether preemption has been requested.
- void CheckPreemption();
-
- // Check whether we are exceeding the stack limit on the backtrack stack.
- void CheckStackLimit();
-
- // Generate a call to CheckStackGuardState.
- void CallCheckStackGuardState(Register scratch);
-
- // Location of a 32 bit position register.
- MemOperand register_location(int register_index);
-
- // Location of a 64 bit capture, combining two position registers.
- MemOperand capture_location(int register_index, Register scratch);
-
- // Register holding the current input position as negative offset from
- // the end of the string.
- Register current_input_offset() { return w21; }
-
- // The register containing the current character after LoadCurrentCharacter.
- Register current_character() { return w22; }
-
- // Register holding address of the end of the input string.
- Register input_end() { return x25; }
-
- // Register holding address of the start of the input string.
- Register input_start() { return x26; }
-
- // Register holding the offset from the start of the string where we should
- // start matching.
- Register start_offset() { return w27; }
-
- // Pointer to the output array's first element.
- Register output_array() { return x28; }
-
- // Register holding the frame address. Local variables, parameters and
- // regexp registers are addressed relative to this.
- Register frame_pointer() { return fp; }
-
- // The register containing the backtrack stack top. Provides a meaningful
- // name to the register.
- Register backtrack_stackpointer() { return x23; }
-
- // Register holding pointer to the current code object.
- Register code_pointer() { return x20; }
-
- // Register holding the value used for clearing capture registers.
- Register non_position_value() { return w24; }
- // The top 32 bit of this register is used to store this value
- // twice. This is used for clearing more than one register at a time.
- Register twice_non_position_value() { return x24; }
-
- // Byte size of chars in the string to match (decided by the Mode argument)
- int char_size() { return static_cast<int>(mode_); }
-
- // Equivalent to a conditional branch to the label, unless the label
- // is NULL, in which case it is a conditional Backtrack.
- void BranchOrBacktrack(Condition condition, Label* to);
-
- // Compares reg against immmediate before calling BranchOrBacktrack.
- // It makes use of the Cbz and Cbnz instructions.
- void CompareAndBranchOrBacktrack(Register reg,
- int immediate,
- Condition condition,
- Label* to);
-
- inline void CallIf(Label* to, Condition condition);
-
- // Save and restore the link register on the stack in a way that
- // is GC-safe.
- inline void SaveLinkRegister();
- inline void RestoreLinkRegister();
-
- // Pushes the value of a register on the backtrack stack. Decrements the
- // stack pointer by a word size and stores the register's value there.
- inline void Push(Register source);
-
- // Pops a value from the backtrack stack. Reads the word at the stack pointer
- // and increments it by a word size.
- inline void Pop(Register target);
-
- // This state indicates where the register actually is.
- enum RegisterState {
- STACKED, // Resides in memory.
- CACHED_LSW, // Least Significant Word of a 64 bit hardware register.
- CACHED_MSW // Most Significant Word of a 64 bit hardware register.
- };
-
- RegisterState GetRegisterState(int register_index) {
- DCHECK(register_index >= 0);
- if (register_index >= kNumCachedRegisters) {
- return STACKED;
- } else {
- if ((register_index % 2) == 0) {
- return CACHED_LSW;
- } else {
- return CACHED_MSW;
- }
- }
- }
-
- // Store helper that takes the state of the register into account.
- inline void StoreRegister(int register_index, Register source);
-
- // Returns a hardware W register that holds the value of the capture
- // register.
- //
- // This function will try to use an existing cache register (w0-w7) for the
- // result. Otherwise, it will load the value into maybe_result.
- //
- // If the returned register is anything other than maybe_result, calling code
- // must not write to it.
- inline Register GetRegister(int register_index, Register maybe_result);
-
- // Returns the harware register (x0-x7) holding the value of the capture
- // register.
- // This assumes that the state of the register is not STACKED.
- inline Register GetCachedRegister(int register_index);
-
- Isolate* isolate() const { return masm_->isolate(); }
-
- MacroAssembler* masm_;
-
- // Which mode to generate code for (LATIN1 or UC16).
- Mode mode_;
-
- // One greater than maximal register index actually used.
- int num_registers_;
-
- // Number of registers to output at the end (the saved registers
- // are always 0..num_saved_registers_-1)
- int num_saved_registers_;
-
- // Labels used internally.
- Label entry_label_;
- Label start_label_;
- Label success_label_;
- Label backtrack_label_;
- Label exit_label_;
- Label check_preempt_label_;
- Label stack_overflow_label_;
-};
-
-#endif // V8_INTERPRETED_REGEXP
-
-
-}} // namespace v8::internal
-
-#endif // V8_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_
diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/arm64/simulator-arm64.cc
index 29d3ea2419..83fd164bb6 100644
--- a/deps/v8/src/arm64/simulator-arm64.cc
+++ b/deps/v8/src/arm64/simulator-arm64.cc
@@ -5,7 +5,6 @@
#include <stdlib.h>
#include <cmath>
#include <cstdarg>
-#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
@@ -223,6 +222,9 @@ int64_t Simulator::CallRegExp(byte* entry,
void Simulator::CheckPCSComplianceAndRun() {
+ // Adjust JS-based stack limit to C-based stack limit.
+ isolate_->stack_guard()->AdjustStackLimitForSimulator();
+
#ifdef DEBUG
CHECK_EQ(kNumberOfCalleeSavedRegisters, kCalleeSaved.Count());
CHECK_EQ(kNumberOfCalleeSavedFPRegisters, kCalleeSavedFP.Count());
@@ -333,9 +335,15 @@ uintptr_t Simulator::PopAddress() {
// Returns the limit of the stack area to enable checking for stack overflows.
-uintptr_t Simulator::StackLimit() const {
- // Leave a safety margin of 1024 bytes to prevent overrunning the stack when
- // pushing values.
+uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
+ // The simulator uses a separate JS stack. If we have exhausted the C stack,
+ // we also drop down the JS limit to reflect the exhaustion on the JS stack.
+ if (GetCurrentStackPosition() < c_limit) {
+ return reinterpret_cast<uintptr_t>(get_sp());
+ }
+
+ // Otherwise the limit is the JS stack. Leave a safety margin of 1024 bytes
+ // to prevent overrunning the stack when pushing values.
return stack_limit_ + 1024;
}
@@ -1676,11 +1684,6 @@ void Simulator::VisitLoadStorePairPostIndex(Instruction* instr) {
}
-void Simulator::VisitLoadStorePairNonTemporal(Instruction* instr) {
- LoadStorePairHelper(instr, Offset);
-}
-
-
void Simulator::LoadStorePairHelper(Instruction* instr,
AddrMode addrmode) {
unsigned rt = instr->Rt();
diff --git a/deps/v8/src/arm64/simulator-arm64.h b/deps/v8/src/arm64/simulator-arm64.h
index 64fceb3451..6ff0013ebd 100644
--- a/deps/v8/src/arm64/simulator-arm64.h
+++ b/deps/v8/src/arm64/simulator-arm64.h
@@ -8,8 +8,6 @@
#include <stdarg.h>
#include <vector>
-#include "src/v8.h"
-
#include "src/allocation.h"
#include "src/arm64/assembler-arm64.h"
#include "src/arm64/decoder-arm64.h"
@@ -268,7 +266,7 @@ class Simulator : public DecoderVisitor {
uintptr_t PopAddress();
// Accessor to the internal simulator stack area.
- uintptr_t StackLimit() const;
+ uintptr_t StackLimit(uintptr_t c_limit) const;
void ResetState();
@@ -403,7 +401,7 @@ class Simulator : public DecoderVisitor {
}
Instruction* lr() { return reg<Instruction*>(kLinkRegCode); }
- Address get_sp() { return reg<Address>(31, Reg31IsStackPointer); }
+ Address get_sp() const { return reg<Address>(31, Reg31IsStackPointer); }
template<typename T>
T fpreg(unsigned code) const {
@@ -884,13 +882,14 @@ class Simulator : public DecoderVisitor {
// The simulator has its own stack. Thus it has a different stack limit from
-// the C-based native code.
-// See also 'class SimulatorStack' in arm/simulator-arm.h.
+// the C-based native code. The JS-based limit normally points near the end of
+// the simulator stack. When the C-based limit is exhausted we reflect that by
+// lowering the JS-based limit as well, to make stack checks trigger.
class SimulatorStack : public v8::internal::AllStatic {
public:
static uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
uintptr_t c_limit) {
- return Simulator::current(isolate)->StackLimit();
+ return Simulator::current(isolate)->StackLimit(c_limit);
}
static uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
diff --git a/deps/v8/src/arm64/utils-arm64.h b/deps/v8/src/arm64/utils-arm64.h
index eee614d288..da91fd5d60 100644
--- a/deps/v8/src/arm64/utils-arm64.h
+++ b/deps/v8/src/arm64/utils-arm64.h
@@ -6,7 +6,6 @@
#define V8_ARM64_UTILS_ARM64_H_
#include <cmath>
-#include "src/v8.h"
#include "src/arm64/constants-arm64.h"