summaryrefslogtreecommitdiff
path: root/deps/v8/src/ppc
diff options
context:
space:
mode:
authorAli Ijaz Sheikh <ofrobots@google.com>2016-03-01 08:58:05 -0800
committerAli Sheikh <ofrobots@lemonhope.roam.corp.google.com>2016-03-03 20:35:20 -0800
commit069e02ab47656b3efd1b6829c65856b2e1c2d1db (patch)
treeeb643e0a2e88fd64bb9fc927423458d2ae96c2db /deps/v8/src/ppc
parent8938355398c79f583a468284b768652d12ba9bc9 (diff)
downloadandroid-node-v8-069e02ab47656b3efd1b6829c65856b2e1c2d1db.tar.gz
android-node-v8-069e02ab47656b3efd1b6829c65856b2e1c2d1db.tar.bz2
android-node-v8-069e02ab47656b3efd1b6829c65856b2e1c2d1db.zip
deps: upgrade to V8 4.9.385.18
Pick up the current branch head for V8 4.9 https://github.com/v8/v8/commit/1ecba0f PR-URL: https://github.com/nodejs/node/pull/4722 Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: Michaƫl Zasso <mic.besace@gmail.com>
Diffstat (limited to 'deps/v8/src/ppc')
-rw-r--r--deps/v8/src/ppc/assembler-ppc-inl.h35
-rw-r--r--deps/v8/src/ppc/assembler-ppc.cc57
-rw-r--r--deps/v8/src/ppc/assembler-ppc.h33
-rw-r--r--deps/v8/src/ppc/builtins-ppc.cc1772
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc426
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.h4
-rw-r--r--deps/v8/src/ppc/codegen-ppc.cc44
-rw-r--r--deps/v8/src/ppc/codegen-ppc.h2
-rw-r--r--deps/v8/src/ppc/constants-ppc.h48
-rw-r--r--deps/v8/src/ppc/deoptimizer-ppc.cc7
-rw-r--r--deps/v8/src/ppc/disasm-ppc.cc28
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc59
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc590
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h147
-rw-r--r--deps/v8/src/ppc/simulator-ppc.cc245
-rw-r--r--deps/v8/src/ppc/simulator-ppc.h47
16 files changed, 2079 insertions, 1465 deletions
diff --git a/deps/v8/src/ppc/assembler-ppc-inl.h b/deps/v8/src/ppc/assembler-ppc-inl.h
index 4f6a35d66e..b384d3f4f9 100644
--- a/deps/v8/src/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/ppc/assembler-ppc-inl.h
@@ -60,7 +60,7 @@ void RelocInfo::apply(intptr_t delta) {
// mov sequence
DCHECK(IsInternalReferenceEncoded(rmode_));
Address target = Assembler::target_address_at(pc_, host_);
- Assembler::set_target_address_at(pc_, host_, target + delta,
+ Assembler::set_target_address_at(isolate_, pc_, host_, target + delta,
SKIP_ICACHE_FLUSH);
}
}
@@ -136,7 +136,8 @@ void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
+ Assembler::set_target_address_at(isolate_, pc_, host_, target,
+ icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
@@ -196,8 +197,9 @@ void RelocInfo::set_target_object(Object* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(
- pc_, host_, reinterpret_cast<Address>(target), icache_flush_mode);
+ Assembler::set_target_address_at(isolate_, pc_, host_,
+ reinterpret_cast<Address>(target),
+ icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWrite(
@@ -286,7 +288,7 @@ Code* RelocInfo::code_age_stub() {
void RelocInfo::set_code_age_stub(Code* stub,
ICacheFlushMode icache_flush_mode) {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Assembler::set_target_address_at(pc_ + kCodeAgingTargetDelta, host_,
+ Assembler::set_target_address_at(isolate_, pc_ + kCodeAgingTargetDelta, host_,
stub->instruction_start(),
icache_flush_mode);
}
@@ -300,7 +302,7 @@ Address RelocInfo::debug_call_address() {
void RelocInfo::set_debug_call_address(Address target) {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
- Assembler::set_target_address_at(pc_, host_, target);
+ Assembler::set_target_address_at(isolate_, pc_, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -319,9 +321,10 @@ void RelocInfo::WipeOut() {
} else if (IsInternalReferenceEncoded(rmode_)) {
// mov sequence
// Currently used only by deserializer, no need to flush.
- Assembler::set_target_address_at(pc_, host_, NULL, SKIP_ICACHE_FLUSH);
+ Assembler::set_target_address_at(isolate_, pc_, host_, NULL,
+ SKIP_ICACHE_FLUSH);
} else {
- Assembler::set_target_address_at(pc_, host_, NULL);
+ Assembler::set_target_address_at(isolate_, pc_, host_, NULL);
}
}
@@ -637,16 +640,16 @@ Address Assembler::target_constant_pool_address_at(
// has already deserialized the mov instructions etc.
// There is a FIXED_SEQUENCE assumption here
void Assembler::deserialization_set_special_target_at(
- Address instruction_payload, Code* code, Address target) {
- set_target_address_at(instruction_payload, code, target);
+ Isolate* isolate, Address instruction_payload, Code* code, Address target) {
+ set_target_address_at(isolate, instruction_payload, code, target);
}
void Assembler::deserialization_set_target_internal_reference_at(
- Address pc, Address target, RelocInfo::Mode mode) {
+ Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
if (RelocInfo::IsInternalReferenceEncoded(mode)) {
Code* code = NULL;
- set_target_address_at(pc, code, target, SKIP_ICACHE_FLUSH);
+ set_target_address_at(isolate, pc, code, target, SKIP_ICACHE_FLUSH);
} else {
Memory::Address_at(pc) = target;
}
@@ -654,8 +657,8 @@ void Assembler::deserialization_set_target_internal_reference_at(
// This code assumes the FIXED_SEQUENCE of lis/ori
-void Assembler::set_target_address_at(Address pc, Address constant_pool,
- Address target,
+void Assembler::set_target_address_at(Isolate* isolate, Address pc,
+ Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode) {
if (FLAG_enable_embedded_constant_pool && constant_pool) {
ConstantPoolEntry::Access access;
@@ -698,7 +701,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
*(p + 3) = instr4;
*(p + 4) = instr5;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICacheWithoutIsolate(p, 5 * kInstrSize);
+ Assembler::FlushICache(isolate, p, 5 * kInstrSize);
}
#else
uint32_t* p = reinterpret_cast<uint32_t*>(pc);
@@ -713,7 +716,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
*p = instr1;
*(p + 1) = instr2;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICacheWithoutIsolate(p, 2 * kInstrSize);
+ Assembler::FlushICache(isolate, p, 2 * kInstrSize);
}
#endif
return;
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc
index ac03ce6949..147fb59aae 100644
--- a/deps/v8/src/ppc/assembler-ppc.cc
+++ b/deps/v8/src/ppc/assembler-ppc.cc
@@ -453,7 +453,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
// pointer in a register.
Register dst = Register::from_code(instr_at(pos + kInstrSize));
int32_t offset = target_pos + (Code::kHeaderSize - kHeapObjectTag);
- CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), 2,
+ CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos), 2,
CodePatcher::DONT_FLUSH);
patcher.masm()->bitwise_mov32(dst, offset);
break;
@@ -464,7 +464,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
Register dst = Register::from_code((operands >> 21) & 0x1f);
Register base = Register::from_code((operands >> 16) & 0x1f);
int32_t offset = target_pos + SIGN_EXT_IMM16(operands & kImm16Mask);
- CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), 2,
+ CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos), 2,
CodePatcher::DONT_FLUSH);
patcher.masm()->bitwise_add32(dst, base, offset);
break;
@@ -472,7 +472,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
case kUnboundMovLabelAddrOpcode: {
// Load the address of the label in a register.
Register dst = Register::from_code(instr_at(pos + kInstrSize));
- CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
+ CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
kMovInstructionsNoConstantPool,
CodePatcher::DONT_FLUSH);
// Keep internal references relative until EmitRelocations.
@@ -480,7 +480,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
break;
}
case kUnboundJumpTableEntryOpcode: {
- CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
+ CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
kPointerSize / kInstrSize, CodePatcher::DONT_FLUSH);
// Keep internal references relative until EmitRelocations.
patcher.masm()->dp(target_pos);
@@ -1844,7 +1844,10 @@ void Assembler::mtxer(Register src) {
}
-void Assembler::mcrfs(int bf, int bfa) {
+void Assembler::mcrfs(CRegister cr, FPSCRBit bit) {
+ DCHECK(static_cast<int>(bit) < 32);
+ int bf = cr.code();
+ int bfa = bit / CRWIDTH;
emit(EXT4 | MCRFS | bf * B23 | bfa * B18);
}
@@ -2163,6 +2166,18 @@ void Assembler::fcfid(const DoubleRegister frt, const DoubleRegister frb,
}
+void Assembler::fcfidu(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc) {
+ emit(EXT4 | FCFIDU | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
+void Assembler::fcfidus(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc) {
+ emit(EXT3 | FCFIDU | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
void Assembler::fcfids(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc) {
emit(EXT3 | FCFID | frt.code() * B21 | frb.code() * B11 | rc);
@@ -2181,6 +2196,18 @@ void Assembler::fctidz(const DoubleRegister frt, const DoubleRegister frb,
}
+void Assembler::fctidu(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc) {
+ emit(EXT4 | FCTIDU | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
+void Assembler::fctiduz(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc) {
+ emit(EXT4 | FCTIDUZ | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
void Assembler::fsel(const DoubleRegister frt, const DoubleRegister fra,
const DoubleRegister frc, const DoubleRegister frb,
RCBit rc) {
@@ -2195,6 +2222,20 @@ void Assembler::fneg(const DoubleRegister frt, const DoubleRegister frb,
}
+void Assembler::mtfsb0(FPSCRBit bit, RCBit rc) {
+ DCHECK(static_cast<int>(bit) < 32);
+ int bt = bit;
+ emit(EXT4 | MTFSB0 | bt * B21 | rc);
+}
+
+
+void Assembler::mtfsb1(FPSCRBit bit, RCBit rc) {
+ DCHECK(static_cast<int>(bit) < 32);
+ int bt = bit;
+ emit(EXT4 | MTFSB1 | bt * B21 | rc);
+}
+
+
void Assembler::mtfsfi(int bf, int immediate, RCBit rc) {
emit(EXT4 | MTFSFI | bf * B23 | immediate * B12 | rc);
}
@@ -2299,6 +2340,7 @@ void Assembler::GrowBuffer(int needed) {
// Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
+ desc.origin = this;
desc.instr_size = pc_offset();
desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
@@ -2377,7 +2419,7 @@ void Assembler::EmitRelocations() {
RelocInfo::Mode rmode = it->rmode();
Address pc = buffer_ + it->position();
Code* code = NULL;
- RelocInfo rinfo(pc, rmode, it->data(), code);
+ RelocInfo rinfo(isolate(), pc, rmode, it->data(), code);
// Fix up internal references now that they are guaranteed to be bound.
if (RelocInfo::IsInternalReference(rmode)) {
@@ -2387,7 +2429,8 @@ void Assembler::EmitRelocations() {
} else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
// mov sequence
intptr_t pos = reinterpret_cast<intptr_t>(target_address_at(pc, code));
- set_target_address_at(pc, code, buffer_ + pos, SKIP_ICACHE_FLUSH);
+ set_target_address_at(isolate(), pc, code, buffer_ + pos,
+ SKIP_ICACHE_FLUSH);
}
reloc_info_writer.Write(&rinfo);
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h
index 36843c17ab..e84d695251 100644
--- a/deps/v8/src/ppc/assembler-ppc.h
+++ b/deps/v8/src/ppc/assembler-ppc.h
@@ -56,8 +56,11 @@
#define ABI_RETURNS_OBJECT_PAIRS_IN_REGS \
(!V8_HOST_ARCH_PPC || !V8_TARGET_ARCH_PPC64 || V8_TARGET_LITTLE_ENDIAN)
-#define ABI_TOC_ADDRESSABILITY_VIA_IP \
- (V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN)
+#if !V8_HOST_ARCH_PPC || (V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN)
+#define ABI_CALL_VIA_IP 1
+#else
+#define ABI_CALL_VIA_IP 0
+#endif
#if !V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64
#define ABI_TOC_REGISTER Register::kCode_r2
@@ -457,17 +460,18 @@ class Assembler : public AssemblerBase {
// Read/Modify the code target address in the branch/call instruction at pc.
INLINE(static Address target_address_at(Address pc, Address constant_pool));
INLINE(static void set_target_address_at(
- Address pc, Address constant_pool, Address target,
+ Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(static Address target_address_at(Address pc, Code* code)) {
Address constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool);
}
INLINE(static void set_target_address_at(
- Address pc, Code* code, Address target,
+ Isolate* isolate, Address pc, Code* code, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
Address constant_pool = code ? code->constant_pool() : NULL;
- set_target_address_at(pc, constant_pool, target, icache_flush_mode);
+ set_target_address_at(isolate, pc, constant_pool, target,
+ icache_flush_mode);
}
// Return the code target address at a call site from the return address
@@ -481,11 +485,12 @@ class Assembler : public AssemblerBase {
// This sets the branch destination.
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Address instruction_payload, Code* code, Address target);
+ Isolate* isolate, Address instruction_payload, Code* code,
+ Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
- Address pc, Address target,
+ Isolate* isolate, Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// Size of an instruction.
@@ -982,7 +987,7 @@ class Assembler : public AssemblerBase {
void mtlr(Register src);
void mtctr(Register src);
void mtxer(Register src);
- void mcrfs(int bf, int bfa);
+ void mcrfs(CRegister cr, FPSCRBit bit);
void mfcr(Register dst);
#if V8_TARGET_ARCH_PPC64
void mffprd(Register dst, DoubleRegister src);
@@ -1050,17 +1055,27 @@ class Assembler : public AssemblerBase {
RCBit rc = LeaveRC);
void fcfid(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc = LeaveRC);
+ void fcfidu(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc = LeaveRC);
+ void fcfidus(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc = LeaveRC);
void fcfids(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc = LeaveRC);
void fctid(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc = LeaveRC);
void fctidz(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc = LeaveRC);
+ void fctidu(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc = LeaveRC);
+ void fctiduz(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc = LeaveRC);
void fsel(const DoubleRegister frt, const DoubleRegister fra,
const DoubleRegister frc, const DoubleRegister frb,
RCBit rc = LeaveRC);
void fneg(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc = LeaveRC);
+ void mtfsb0(FPSCRBit bit, RCBit rc = LeaveRC);
+ void mtfsb1(FPSCRBit bit, RCBit rc = LeaveRC);
void mtfsfi(int bf, int immediate, RCBit rc = LeaveRC);
void mffs(const DoubleRegister frt, RCBit rc = LeaveRC);
void mtfsf(const DoubleRegister frb, bool L = 1, int FLM = 0, bool W = 0,
@@ -1164,7 +1179,7 @@ class Assembler : public AssemblerBase {
void RecordGeneratorContinuation();
// Mark address of a debug break slot.
- void RecordDebugBreakSlot(RelocInfo::Mode mode, int argc = 0);
+ void RecordDebugBreakSlot(RelocInfo::Mode mode);
// Record the AST id of the CallIC being compiled, so that it can be placed
// in the relocation information.
diff --git a/deps/v8/src/ppc/builtins-ppc.cc b/deps/v8/src/ppc/builtins-ppc.cc
index 9b3a3fb9ad..0476cd27e1 100644
--- a/deps/v8/src/ppc/builtins-ppc.cc
+++ b/deps/v8/src/ppc/builtins-ppc.cc
@@ -21,9 +21,8 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id,
BuiltinExtraArguments extra_args) {
// ----------- S t a t e -------------
// -- r3 : number of arguments excluding receiver
- // (only guaranteed when the called function
- // is not marked as DontAdaptArguments)
- // -- r4 : called function
+ // -- r4 : target
+ // -- r6 : new.target
// -- sp[0] : last argument
// -- ...
// -- sp[4 * (argc - 1)] : first argument
@@ -35,37 +34,29 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id,
// ConstructStubs implemented in C++ will be run in the context of the caller
// instead of the callee, due to the way that [[Construct]] is defined for
// ordinary functions).
- // TODO(bmeurer): Can we make this more robust?
__ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
// Insert extra arguments.
int num_extra_args = 0;
- if (extra_args == NEEDS_CALLED_FUNCTION) {
- num_extra_args = 1;
- __ push(r4);
- } else {
- DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
+ switch (extra_args) {
+ case BuiltinExtraArguments::kTarget:
+ __ Push(r4);
+ ++num_extra_args;
+ break;
+ case BuiltinExtraArguments::kNewTarget:
+ __ Push(r6);
+ ++num_extra_args;
+ break;
+ case BuiltinExtraArguments::kTargetAndNewTarget:
+ __ Push(r4, r6);
+ num_extra_args += 2;
+ break;
+ case BuiltinExtraArguments::kNone:
+ break;
}
// JumpToExternalReference expects r3 to contain the number of arguments
- // including the receiver and the extra arguments. But r3 is only valid
- // if the called function is marked as DontAdaptArguments, otherwise we
- // need to load the argument count from the SharedFunctionInfo.
- __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadWordArith(
- r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
-#if !V8_TARGET_ARCH_PPC64
- __ SmiUntag(r5);
-#endif
- __ cmpi(r5, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ isel(ne, r3, r5, r3);
- } else {
- Label skip;
- __ beq(&skip);
- __ mr(r3, r5);
- __ bind(&skip);
- }
+ // including the receiver and the extra arguments.
__ addi(r3, r3, Operand(num_extra_args + 1));
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
@@ -75,31 +66,15 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id,
// Load the built-in InternalArray function from the current context.
static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
Register result) {
- // Load the native context.
-
- __ LoadP(result,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ LoadP(result,
- FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
- // Load the InternalArray function from the native context.
- __ LoadP(result,
- MemOperand(result, Context::SlotOffset(
- Context::INTERNAL_ARRAY_FUNCTION_INDEX)));
+ // Load the InternalArray function from the current native context.
+ __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
}
// Load the built-in Array function from the current context.
static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the native context.
-
- __ LoadP(result,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ LoadP(result,
- FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
- // Load the Array function from the native context.
- __ LoadP(
- result,
- MemOperand(result, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+ // Load the Array function from the current native context.
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
}
@@ -161,6 +136,110 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// static
+void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : number of arguments
+ // -- r4 : constructor function
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ // 1. Load the first argument into r3 and get rid of the rest (including the
+ // receiver).
+ Label no_arguments;
+ {
+ __ cmpi(r3, Operand::Zero());
+ __ beq(&no_arguments);
+ __ subi(r3, r3, Operand(1));
+ __ ShiftLeftImm(r3, r3, Operand(kPointerSizeLog2));
+ __ LoadPUX(r3, MemOperand(sp, r3));
+ __ Drop(2);
+ }
+
+ // 2a. Convert the first argument to a number.
+ ToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+
+ // 2b. No arguments, return +0.
+ __ bind(&no_arguments);
+ __ LoadSmiLiteral(r3, Smi::FromInt(0));
+ __ Ret(1);
+}
+
+
+// static
+void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : number of arguments
+ // -- r4 : constructor function
+ // -- r6 : new target
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ // 1. Make sure we operate in the context of the called function.
+ __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into r5 and get rid of the rest (including the
+ // receiver).
+ {
+ Label no_arguments, done;
+ __ cmpi(r3, Operand::Zero());
+ __ beq(&no_arguments);
+ __ subi(r3, r3, Operand(1));
+ __ ShiftLeftImm(r5, r3, Operand(kPointerSizeLog2));
+ __ LoadPUX(r5, MemOperand(sp, r5));
+ __ Drop(2);
+ __ b(&done);
+ __ bind(&no_arguments);
+ __ LoadSmiLiteral(r5, Smi::FromInt(0));
+ __ Drop(1);
+ __ bind(&done);
+ }
+
+ // 3. Make sure r5 is a number.
+ {
+ Label done_convert;
+ __ JumpIfSmi(r5, &done_convert);
+ __ CompareObjectType(r5, r7, r7, HEAP_NUMBER_TYPE);
+ __ beq(&done_convert);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r4, r6);
+ __ mr(r3, r5);
+ ToNumberStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ mr(r5, r3);
+ __ Pop(r4, r6);
+ }
+ __ bind(&done_convert);
+ }
+
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ cmp(r4, r6);
+ __ bne(&new_object);
+
+ // 5. Allocate a JSValue wrapper for the number.
+ __ AllocateJSValue(r3, r4, r5, r7, r8, &new_object);
+ __ Ret();
+
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r5, r4, r6); // first argument, constructor, new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ Pop(r5);
+ }
+ __ StoreP(r5, FieldMemOperand(r3, JSValue::kValueOffset), r0);
+ __ Ret();
+}
+
+
+// static
void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : number of arguments
@@ -212,7 +291,7 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
__ bind(&symbol_descriptive_string);
{
__ Push(r3);
- __ TailCallRuntime(Runtime::kSymbolDescriptiveString, 1, 1);
+ __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
}
}
@@ -222,13 +301,16 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : number of arguments
// -- r4 : constructor function
- // -- r6 : original constructor
+ // -- r6 : new target
// -- lr : return address
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
// -----------------------------------
- // 1. Load the first argument into r5 and get rid of the rest (including the
+ // 1. Make sure we operate in the context of the called function.
+ __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into r5 and get rid of the rest (including the
// receiver).
{
Label no_arguments, done;
@@ -245,7 +327,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&done);
}
- // 2. Make sure r5 is a string.
+ // 3. Make sure r5 is a string.
{
Label convert, done_convert;
__ JumpIfSmi(r5, &convert);
@@ -264,69 +346,43 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&done_convert);
}
- // 3. Allocate a JSValue wrapper for the string.
- {
- // ----------- S t a t e -------------
- // -- r5 : the first argument
- // -- r4 : constructor function
- // -- r6 : original constructor
- // -- lr : return address
- // -----------------------------------
-
- Label allocate, done_allocate, rt_call;
-
- // Fall back to runtime if the original constructor and function differ.
- __ cmp(r4, r6);
- __ bne(&rt_call);
-
- __ Allocate(JSValue::kSize, r3, r6, r7, &allocate, TAG_OBJECT);
- __ bind(&done_allocate);
-
- // Initialize the JSValue in r3.
- __ LoadGlobalFunctionInitialMap(r4, r6, r7);
- __ StoreP(r6, FieldMemOperand(r3, HeapObject::kMapOffset), r0);
- __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(r6, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
- __ StoreP(r6, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
- __ StoreP(r5, FieldMemOperand(r3, JSValue::kValueOffset), r0);
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
- __ Ret();
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ cmp(r4, r6);
+ __ bne(&new_object);
- // Fallback to the runtime to allocate in new space.
- __ bind(&allocate);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ LoadSmiLiteral(r6, Smi::FromInt(JSValue::kSize));
- __ Push(r4, r5, r6);
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ Pop(r4, r5);
- }
- __ b(&done_allocate);
+ // 5. Allocate a JSValue wrapper for the string.
+ __ AllocateJSValue(r3, r4, r5, r7, r8, &new_object);
+ __ Ret();
- // Fallback to the runtime to create new object.
- __ bind(&rt_call);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r4, r5, r4, r6); // constructor function, original constructor
- __ CallRuntime(Runtime::kNewObject, 2);
- __ Pop(r4, r5);
- }
- __ StoreP(r5, FieldMemOperand(r3, JSValue::kValueOffset), r0);
- __ Ret();
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r5, r4, r6); // first argument, constructor, new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ Pop(r5);
}
+ __ StoreP(r5, FieldMemOperand(r3, JSValue::kValueOffset), r0);
+ __ Ret();
}
static void CallRuntimePassFunction(MacroAssembler* masm,
Runtime::FunctionId function_id) {
+ // ----------- S t a t e -------------
+ // -- r4 : target function (preserved for callee)
+ // -- r6 : new target (preserved for callee)
+ // -----------------------------------
+
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function onto the stack.
+ // Push a copy of the target function and the new target.
// Push function as parameter to the runtime call.
- __ Push(r4, r4);
+ __ Push(r4, r6, r4);
__ CallRuntime(function_id, 1);
- // Restore reciever.
- __ Pop(r4);
+ // Restore target function and new target.
+ __ Pop(r4, r6);
}
@@ -364,12 +420,13 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function) {
+ bool is_api_function,
+ bool create_implicit_receiver) {
// ----------- S t a t e -------------
// -- r3 : number of arguments
// -- r4 : constructor function
// -- r5 : allocation site or undefined
- // -- r6 : original constructor
+ // -- r6 : new target
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
@@ -382,187 +439,175 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Preserve the incoming parameters on the stack.
__ AssertUndefinedOrAllocationSite(r5, r7);
- __ SmiTag(r3);
- __ Push(r5, r3, r4, r6);
-
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(isolate);
- __ mov(r5, Operand(debug_step_in_fp));
- __ LoadP(r5, MemOperand(r5));
- __ cmpi(r5, Operand::Zero());
- __ bne(&rt_call);
-
- // Verify that the original constructor is a JSFunction.
- __ CompareObjectType(r6, r8, r7, JS_FUNCTION_TYPE);
- __ bne(&rt_call);
-
- // Load the initial map and verify that it is in fact a map.
- // r6: original constructor
- __ LoadP(r5,
- FieldMemOperand(r6, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(r5, &rt_call);
- __ CompareObjectType(r5, r8, r7, MAP_TYPE);
- __ bne(&rt_call);
-
- // Fall back to runtime if the expected base constructor and base
- // constructor differ.
- __ LoadP(r8, FieldMemOperand(r5, Map::kConstructorOrBackPointerOffset));
- __ cmp(r4, r8);
- __ bne(&rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // r4: constructor function
- // r5: initial map
- __ CompareInstanceType(r5, r8, JS_FUNCTION_TYPE);
- __ beq(&rt_call);
-
- if (!is_api_function) {
- Label allocate;
- MemOperand bit_field3 = FieldMemOperand(r5, Map::kBitField3Offset);
- // Check if slack tracking is enabled.
- __ lwz(r7, bit_field3);
- __ DecodeField<Map::Counter>(r11, r7);
- __ cmpi(r11, Operand(Map::kSlackTrackingCounterEnd));
- __ blt(&allocate);
- // Decrease generous allocation count.
- __ Add(r7, r7, -(1 << Map::Counter::kShift), r0);
- __ stw(r7, bit_field3);
- __ cmpi(r11, Operand(Map::kSlackTrackingCounterEnd));
- __ bne(&allocate);
-
- __ Push(r4, r5, r5); // r5 = initial map
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
- __ Pop(r4, r5);
-
- __ bind(&allocate);
- }
-
- // Now allocate the JSObject on the heap.
- // r4: constructor function
- // r5: initial map
- Label rt_call_reload_new_target;
- __ lbz(r6, FieldMemOperand(r5, Map::kInstanceSizeOffset));
-
- __ Allocate(r6, r7, r8, r9, &rt_call_reload_new_target, SIZE_IN_WORDS);
- // Allocated the JSObject, now initialize the fields. Map is set to
- // initial map and properties and elements are set to empty fixed array.
- // r4: constructor function
- // r5: initial map
- // r6: object size
- // r7: JSObject (not tagged)
- __ LoadRoot(r9, Heap::kEmptyFixedArrayRootIndex);
- __ mr(r8, r7);
- __ StoreP(r5, MemOperand(r8, JSObject::kMapOffset));
- __ StoreP(r9, MemOperand(r8, JSObject::kPropertiesOffset));
- __ StoreP(r9, MemOperand(r8, JSObject::kElementsOffset));
- __ addi(r8, r8, Operand(JSObject::kElementsOffset + kPointerSize));
-
- __ ShiftLeftImm(r9, r6, Operand(kPointerSizeLog2));
- __ add(r9, r7, r9); // End of object.
-
- // Fill all the in-object properties with the appropriate filler.
- // r4: constructor function
- // r5: initial map
- // r6: object size
- // r7: JSObject (not tagged)
- // r8: First in-object property of JSObject (not tagged)
- // r9: End of object
- DCHECK_EQ(3 * kPointerSize, JSObject::kHeaderSize);
- __ LoadRoot(r10, Heap::kUndefinedValueRootIndex);
-
- if (!is_api_function) {
- Label no_inobject_slack_tracking;
-
- // Check if slack tracking is enabled.
- __ cmpi(r11, Operand(Map::kSlackTrackingCounterEnd));
- __ blt(&no_inobject_slack_tracking);
-
- // Allocate object with a slack.
- __ lbz(
- r3,
- FieldMemOperand(
- r5, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset));
- __ lbz(r5, FieldMemOperand(r5, Map::kUnusedPropertyFieldsOffset));
- __ sub(r3, r3, r5);
- if (FLAG_debug_code) {
- __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
- __ add(r0, r8, r0);
- // r0: offset of first field after pre-allocated fields
- __ cmp(r0, r9);
- __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
- }
- {
- Label done;
- __ cmpi(r3, Operand::Zero());
- __ beq(&done);
- __ InitializeNFieldsWithFiller(r8, r3, r10);
- __ bind(&done);
+ if (!create_implicit_receiver) {
+ __ SmiTag(r7, r3, SetRC);
+ __ Push(r5, r7);
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ } else {
+ __ SmiTag(r3);
+ __ Push(r5, r3);
+
+ // Try to allocate the object without transitioning into C code. If any of
+ // the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ // Verify that the new target is a JSFunction.
+ __ CompareObjectType(r6, r8, r7, JS_FUNCTION_TYPE);
+ __ bne(&rt_call);
+
+ // Load the initial map and verify that it is in fact a map.
+ // r6: new target
+ __ LoadP(r5,
+ FieldMemOperand(r6, JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(r5, &rt_call);
+ __ CompareObjectType(r5, r8, r7, MAP_TYPE);
+ __ bne(&rt_call);
+
+ // Fall back to runtime if the expected base constructor and base
+ // constructor differ.
+ __ LoadP(r8, FieldMemOperand(r5, Map::kConstructorOrBackPointerOffset));
+ __ cmp(r4, r8);
+ __ bne(&rt_call);
+
+ // Check that the constructor is not constructing a JSFunction (see
+ // comments in Runtime_NewObject in runtime.cc). In which case the
+ // initial map's instance type would be JS_FUNCTION_TYPE.
+ // r4: constructor function
+ // r5: initial map
+ // r6: new target
+ __ CompareInstanceType(r5, r8, JS_FUNCTION_TYPE);
+ __ beq(&rt_call);
+
+ // Now allocate the JSObject on the heap.
+ // r4: constructor function
+ // r5: initial map
+ // r6: new target
+ __ lbz(r10, FieldMemOperand(r5, Map::kInstanceSizeOffset));
+
+ __ Allocate(r10, r7, r10, r9, &rt_call, SIZE_IN_WORDS);
+
+ // Allocated the JSObject, now initialize the fields. Map is set to
+ // initial map and properties and elements are set to empty fixed array.
+ // r4: constructor function
+ // r5: initial map
+ // r6: new target
+ // r7: JSObject (not HeapObject tagged - the actual address).
+ // r10: start of next object
+ __ LoadRoot(r9, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(r5, MemOperand(r7, JSObject::kMapOffset));
+ __ StoreP(r9, MemOperand(r7, JSObject::kPropertiesOffset));
+ __ StoreP(r9, MemOperand(r7, JSObject::kElementsOffset));
+ __ addi(r8, r7, Operand(JSObject::kElementsOffset + kPointerSize));
+
+ // Add the object tag to make the JSObject real, so that we can continue
+ // and jump into the continuation code at any time from now on.
+ __ addi(r7, r7, Operand(kHeapObjectTag));
+
+ // Fill all the in-object properties with the appropriate filler.
+ // r7: JSObject (tagged)
+ // r8: First in-object property of JSObject (not tagged)
+ __ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
+
+ if (!is_api_function) {
+ Label no_inobject_slack_tracking;
+
+ MemOperand bit_field3 = FieldMemOperand(r5, Map::kBitField3Offset);
+ // Check if slack tracking is enabled.
+ __ lwz(r3, bit_field3);
+ __ DecodeField<Map::ConstructionCounter>(r11, r3);
+ // r11: slack tracking counter
+ __ cmpi(r11, Operand(Map::kSlackTrackingCounterEnd));
+ __ blt(&no_inobject_slack_tracking);
+ // Decrease generous allocation count.
+ __ Add(r3, r3, -(1 << Map::ConstructionCounter::kShift), r0);
+ __ stw(r3, bit_field3);
+
+ // Allocate object with a slack.
+ __ lbz(r3, FieldMemOperand(r5, Map::kUnusedPropertyFieldsOffset));
+ __ ShiftLeftImm(r3, r3, Operand(kPointerSizeLog2));
+ __ sub(r3, r10, r3);
+ // r3: offset of first field after pre-allocated fields
+ if (FLAG_debug_code) {
+ __ cmp(r8, r3);
+ __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
+ }
+ __ InitializeFieldsWithFiller(r8, r3, r9);
+
+ // To allow truncation fill the remaining fields with one pointer
+ // filler map.
+ __ LoadRoot(r9, Heap::kOnePointerFillerMapRootIndex);
+ __ InitializeFieldsWithFiller(r8, r10, r9);
+
+ // r11: slack tracking counter value before decreasing.
+ __ cmpi(r11, Operand(Map::kSlackTrackingCounterEnd));
+ __ bne(&allocated);
+
+ // Push the constructor, new_target and the object to the stack,
+ // and then the initial map as an argument to the runtime call.
+ __ Push(r4, r6, r7, r5);
+ __ CallRuntime(Runtime::kFinalizeInstanceSize);
+ __ Pop(r4, r6, r7);
+
+ // Continue with JSObject being successfully allocated
+ // r4: constructor function
+ // r6: new target
+ // r7: JSObject
+ __ b(&allocated);
+
+ __ bind(&no_inobject_slack_tracking);
}
- // To allow for truncation.
- __ LoadRoot(r10, Heap::kOnePointerFillerMapRootIndex);
- // Fill the remaining fields with one pointer filler map.
- __ bind(&no_inobject_slack_tracking);
- }
+ __ InitializeFieldsWithFiller(r8, r10, r9);
- __ InitializeFieldsWithFiller(r8, r9, r10);
+ // Continue with JSObject being successfully allocated
+ // r4: constructor function
+ // r6: new target
+ // r7: JSObject
+ __ b(&allocated);
+ }
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on.
- __ addi(r7, r7, Operand(kHeapObjectTag));
+ // Allocate the new receiver object using the runtime call.
+ // r4: constructor function
+ // r6: new target
+ __ bind(&rt_call);
+
+ // Push the constructor and new_target twice, second pair as arguments
+ // to the runtime call.
+ __ Push(r4, r6, r4, r6);
+ __ CallRuntime(Runtime::kNewObject);
+ __ mr(r7, r3);
+ __ Pop(r4, r6);
- // Continue with JSObject being successfully allocated
+ // Receiver for constructor call allocated.
+ // r4: constructor function
+ // r6: new target
// r7: JSObject
- __ b(&allocated);
-
- // Reload the original constructor and fall-through.
- __ bind(&rt_call_reload_new_target);
- __ LoadP(r6, MemOperand(sp, 0 * kPointerSize));
- }
-
- // Allocate the new receiver object using the runtime call.
- // r4: constructor function
- // r6: original constructor
- __ bind(&rt_call);
- __ Push(r4, r6); // constructor function, original constructor
- __ CallRuntime(Runtime::kNewObject, 2);
- __ mr(r7, r3);
+ __ bind(&allocated);
- // Receiver for constructor call allocated.
- // r7: JSObject
- __ bind(&allocated);
+ // Retrieve smi-tagged arguments count from the stack.
+ __ LoadP(r3, MemOperand(sp));
+ __ SmiUntag(r3, SetRC);
- // Restore the parameters.
- __ Pop(r4, ip);
-
- // Retrieve smi-tagged arguments count from the stack.
- __ LoadP(r6, MemOperand(sp));
-
- // Push new.target onto the construct frame. This is stored just below the
- // receiver on the stack.
- __ Push(ip, r7, r7);
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ Push(r7, r7);
+ }
// Set up pointer to last argument.
__ addi(r5, fp, Operand(StandardFrameConstants::kCallerSPOffset));
// Copy arguments and receiver to the expression stack.
+ // r3: number of arguments
// r4: constructor function
// r5: address of last argument (caller sp)
- // r6: number of arguments (smi-tagged)
+ // r6: new target
+ // cr0: condition indicating whether r3 is zero
// sp[0]: receiver
// sp[1]: receiver
- // sp[2]: new.target
- // sp[3]: number of arguments (smi-tagged)
+ // sp[2]: number of arguments (smi-tagged)
Label loop, no_args;
- __ SmiUntag(r3, r6, SetRC);
__ beq(&no_args, cr0);
__ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
__ sub(sp, sp, ip);
@@ -577,57 +622,60 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Call the function.
// r3: number of arguments
// r4: constructor function
+ // r6: new target
if (is_api_function) {
__ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
Handle<Code> code = masm->isolate()->builtins()->HandleApiCallConstruct();
__ Call(code, RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(r3);
- __ InvokeFunction(r4, actual, CALL_FUNCTION, NullCallWrapper());
+ __ InvokeFunction(r4, r6, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
}
// Store offset of return address for deoptimizer.
- if (!is_api_function) {
+ if (create_implicit_receiver && !is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore context from the frame.
// r3: result
// sp[0]: receiver
- // sp[1]: new.target
- // sp[2]: number of arguments (smi-tagged)
+ // sp[1]: number of arguments (smi-tagged)
__ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- // r3: result
- // sp[0]: receiver
- // sp[1]: new.target
- // sp[2]: number of arguments (smi-tagged)
- __ JumpIfSmi(r3, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ CompareObjectType(r3, r4, r6, FIRST_SPEC_OBJECT_TYPE);
- __ bge(&exit);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ LoadP(r3, MemOperand(sp));
-
- // Remove receiver from the stack, remove caller arguments, and
- // return.
- __ bind(&exit);
- // r3: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: new.target (original constructor)
- // sp[2]: number of arguments (smi-tagged)
- __ LoadP(r4, MemOperand(sp, 2 * kPointerSize));
+ if (create_implicit_receiver) {
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ // r3: result
+ // sp[0]: receiver
+ // sp[1]: number of arguments (smi-tagged)
+ __ JumpIfSmi(r3, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ __ CompareObjectType(r3, r4, r6, FIRST_JS_RECEIVER_TYPE);
+ __ bge(&exit);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ LoadP(r3, MemOperand(sp));
+
+ // Remove receiver from the stack, remove caller arguments, and
+ // return.
+ __ bind(&exit);
+ // r3: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: number of arguments (smi-tagged)
+ __ LoadP(r4, MemOperand(sp, 1 * kPointerSize));
+ } else {
+ __ LoadP(r4, MemOperand(sp));
+ }
// Leave construct frame.
}
@@ -635,104 +683,32 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ SmiToPtrArrayOffset(r4, r4);
__ add(sp, sp, r4);
__ addi(sp, sp, Operand(kPointerSize));
- __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r4, r5);
+ if (create_implicit_receiver) {
+ __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r4, r5);
+ }
__ blr();
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false);
+ Generate_JSConstructStubHelper(masm, false, true);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true);
+ Generate_JSConstructStubHelper(masm, true, true);
}
-void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : number of arguments
- // -- r4 : constructor function
- // -- r5 : allocation site or undefined
- // -- r6 : original constructor
- // -- lr : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
-
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
-
- __ AssertUndefinedOrAllocationSite(r5, r7);
-
- // Smi-tagged arguments count.
- __ mr(r7, r3);
- __ SmiTag(r7, SetRC);
-
- // receiver is the hole.
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-
- // allocation site, smi arguments count, new.target, receiver
- __ Push(r5, r7, r6, ip);
-
- // Set up pointer to last argument.
- __ addi(r5, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- // r3: number of arguments
- // r4: constructor function
- // r5: address of last argument (caller sp)
- // r7: number of arguments (smi-tagged)
- // cr0: compare against zero of arguments
- // sp[0]: receiver
- // sp[1]: new.target
- // sp[2]: number of arguments (smi-tagged)
- Label loop, no_args;
- __ beq(&no_args, cr0);
- __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
- __ mtctr(r3);
- __ bind(&loop);
- __ subi(ip, ip, Operand(kPointerSize));
- __ LoadPX(r0, MemOperand(r5, ip));
- __ push(r0);
- __ bdnz(&loop);
- __ bind(&no_args);
-
- // Handle step in.
- Label skip_step_in;
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ mov(r5, Operand(debug_step_in_fp));
- __ LoadP(r5, MemOperand(r5));
- __ and_(r0, r5, r5, SetRC);
- __ beq(&skip_step_in, cr0);
-
- __ Push(r3, r4, r4);
- __ CallRuntime(Runtime::kHandleStepInForDerivedConstructors, 1);
- __ Pop(r3, r4);
-
- __ bind(&skip_step_in);
-
- // Call the function.
- // r3: number of arguments
- // r4: constructor function
- ParameterCount actual(r3);
- __ InvokeFunction(r4, actual, CALL_FUNCTION, NullCallWrapper());
+void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, false);
+}
- // Restore context from the frame.
- // r3: result
- // sp[0]: number of arguments (smi-tagged)
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Get arguments count, skipping over new.target.
- __ LoadP(r4, MemOperand(sp, kPointerSize));
- // Leave construct frame.
- }
-
- __ SmiToPtrArrayOffset(r4, r4);
- __ add(sp, sp, r4);
- __ addi(sp, sp, Operand(kPointerSize));
- __ blr();
+void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ push(r4);
+ __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
@@ -761,7 +737,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
__ bgt(&okay); // Signed comparison.
// Out of stack space.
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&okay);
}
@@ -863,6 +839,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
//
// The live registers are:
// o r4: the JS function object being called.
+// o r6: the new target
// o cp: our context
// o pp: the caller's constant pool pointer (if enabled)
// o fp: the caller's frame pointer
@@ -880,6 +857,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushFixedFrame(r4);
__ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ push(r6);
+
+ // Push zero for bytecode array offset.
+ __ li(r3, Operand::Zero());
+ __ push(r3);
// Get the bytecode array from the function object and load the pointer to the
// first entry into kInterpreterBytecodeRegister.
@@ -908,7 +890,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ LoadRoot(r0, Heap::kRealStackLimitRootIndex);
__ cmpl(r6, r0);
__ bge(&ok);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
// If ok, push undefined as the initial value for all register file entries.
@@ -938,7 +920,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ cmp(sp, r0);
__ bge(&ok);
__ push(kInterpreterBytecodeArrayRegister);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kStackGuard);
__ pop(kInterpreterBytecodeArrayRegister);
__ bind(&ok);
}
@@ -946,9 +928,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load accumulator, register file, bytecode offset, dispatch table into
// registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
- __ subi(
- kInterpreterRegisterFileRegister, fp,
- Operand(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ addi(kInterpreterRegisterFileRegister, fp,
+ Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ mov(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ LoadRoot(kInterpreterDispatchTableRegister,
@@ -965,6 +946,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// and header removal.
__ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(ip);
+ __ bkpt(0); // Does not return here.
}
@@ -1025,13 +1007,14 @@ void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : argument count (not including receiver)
- // -- r6 : original constructor
+ // -- r6 : new target
// -- r4 : constructor to call
// -- r5 : address of the first argument
// -----------------------------------
// Push a slot for the receiver to be constructed.
- __ push(r3);
+ __ li(r0, Operand::Zero());
+ __ push(r0);
// Push the arguments (skip if none).
Label skip;
@@ -1041,40 +1024,105 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
__ bind(&skip);
// Call the constructor with r3, r4, and r6 unmodified.
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
-void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileLazy);
- GenerateTailCallToReturnedCode(masm);
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+ MacroAssembler* masm, Deoptimizer::BailoutType type) {
+ // Enter an internal frame.
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ // Save accumulator register and pass the deoptimization type to
+ // the runtime system.
+ __ LoadSmiLiteral(r4, Smi::FromInt(static_cast<int>(type)));
+ __ Push(kInterpreterAccumulatorRegister, r4);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
+ __ pop(kInterpreterAccumulatorRegister); // Restore accumulator register.
+ // Tear down internal frame.
+ }
+
+ // Drop state (we don't use these for interpreter deopts).
+ __ Drop(1);
+
+ // Initialize register file register and dispatch table register.
+ __ addi(kInterpreterRegisterFileRegister, fp,
+ Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+ __ LoadRoot(kInterpreterDispatchTableRegister,
+ Heap::kInterpreterTableRootIndex);
+ __ addi(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Get the context from the frame.
+ // TODO(rmcilroy): Update interpreter frame to expect current context at the
+ // context slot instead of the function context.
+ __ LoadP(kContextRegister,
+ MemOperand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kContextFromRegisterPointer));
+
+ // Get the bytecode array pointer from the frame.
+ __ LoadP(r4,
+ MemOperand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kFunctionFromRegisterPointer));
+ __ LoadP(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(r4, SharedFunctionInfo::kFunctionDataOffset));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ TestIfSmi(kInterpreterBytecodeArrayRegister, r0);
+ __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ CompareObjectType(kInterpreterBytecodeArrayRegister, r4, no_reg,
+ BYTECODE_ARRAY_TYPE);
+ __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ }
+
+ // Get the target bytecode offset from the frame.
+ __ LoadP(kInterpreterBytecodeOffsetRegister,
+ MemOperand(
+ kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+
+ // Dispatch to the target bytecode.
+ __ lbzx(r4, MemOperand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister));
+ __ ShiftLeftImm(ip, r4, Operand(kPointerSizeLog2));
+ __ LoadPX(ip, MemOperand(kInterpreterDispatchTableRegister, ip));
+ __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(ip);
}
-static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function onto the stack.
- // Push function as parameter to the runtime call.
- __ Push(r4, r4);
- // Whether to compile in a background thread.
- __ LoadRoot(
- r0, concurrent ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
- __ push(r0);
+void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
- __ CallRuntime(Runtime::kCompileOptimized, 2);
- // Restore receiver.
- __ pop(r4);
+
+void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+
+void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileLazy);
+ GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- CallCompileOptimized(masm, false);
+ CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- CallCompileOptimized(masm, true);
+ CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
GenerateTailCallToReturnedCode(masm);
}
@@ -1093,15 +1141,16 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// the runtime:
// r3 - contains return address (beginning of patch sequence)
// r4 - isolate
+ // r6 - new target
// lr - return address
FrameScope scope(masm, StackFrame::MANUAL);
__ mflr(r0);
- __ MultiPush(r0.bit() | r3.bit() | r4.bit() | fp.bit());
+ __ MultiPush(r0.bit() | r3.bit() | r4.bit() | r6.bit() | fp.bit());
__ PrepareCallCFunction(2, 0, r5);
__ mov(r4, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::get_make_code_young_function(masm->isolate()), 2);
- __ MultiPop(r0.bit() | r3.bit() | r4.bit() | fp.bit());
+ __ MultiPop(r0.bit() | r3.bit() | r4.bit() | r6.bit() | fp.bit());
__ mtlr(r0);
__ mr(ip, r3);
__ Jump(ip);
@@ -1134,16 +1183,17 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// the runtime:
// r3 - contains return address (beginning of patch sequence)
// r4 - isolate
+ // r6 - new target
// lr - return address
FrameScope scope(masm, StackFrame::MANUAL);
__ mflr(r0);
- __ MultiPush(r0.bit() | r3.bit() | r4.bit() | fp.bit());
+ __ MultiPush(r0.bit() | r3.bit() | r4.bit() | r6.bit() | fp.bit());
__ PrepareCallCFunction(2, 0, r5);
__ mov(r4, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
2);
- __ MultiPop(r0.bit() | r3.bit() | r4.bit() | fp.bit());
+ __ MultiPop(r0.bit() | r3.bit() | r4.bit() | r6.bit() | fp.bit());
__ mtlr(r0);
__ mr(ip, r3);
@@ -1177,7 +1227,7 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
// registers.
__ MultiPush(kJSCallerSaved | kCalleeSaved);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
__ MultiPop(kJSCallerSaved | kCalleeSaved);
}
@@ -1203,7 +1253,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Pass the function and deoptimization type to the runtime system.
__ LoadSmiLiteral(r3, Smi::FromInt(static_cast<int>(type)));
__ push(r3);
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
}
// Get the full codegen state from the stack and untag it -> r9.
@@ -1243,6 +1293,111 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
}
+// Clobbers registers {r7, r8, r9, r10}.
+void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
+ Register function_template_info,
+ Label* receiver_check_failed) {
+ Register signature = r7;
+ Register map = r8;
+ Register constructor = r9;
+ Register scratch = r10;
+
+ // If there is no signature, return the holder.
+ __ LoadP(signature, FieldMemOperand(function_template_info,
+ FunctionTemplateInfo::kSignatureOffset));
+ Label receiver_check_passed;
+ __ JumpIfRoot(signature, Heap::kUndefinedValueRootIndex,
+ &receiver_check_passed);
+
+ // Walk the prototype chain.
+ __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ Label prototype_loop_start;
+ __ bind(&prototype_loop_start);
+
+ // Get the constructor, if any.
+ __ GetMapConstructor(constructor, map, scratch, scratch);
+ __ cmpi(scratch, Operand(JS_FUNCTION_TYPE));
+ Label next_prototype;
+ __ bne(&next_prototype);
+ Register type = constructor;
+ __ LoadP(type,
+ FieldMemOperand(constructor, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(type,
+ FieldMemOperand(type, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Loop through the chain of inheriting function templates.
+ Label function_template_loop;
+ __ bind(&function_template_loop);
+
+ // If the signatures match, we have a compatible receiver.
+ __ cmp(signature, type);
+ __ beq(&receiver_check_passed);
+
+ // If the current type is not a FunctionTemplateInfo, load the next prototype
+ // in the chain.
+ __ JumpIfSmi(type, &next_prototype);
+ __ CompareObjectType(type, scratch, scratch, FUNCTION_TEMPLATE_INFO_TYPE);
+ __ bne(&next_prototype);
+
+ // Otherwise load the parent function template and iterate.
+ __ LoadP(type,
+ FieldMemOperand(type, FunctionTemplateInfo::kParentTemplateOffset));
+ __ b(&function_template_loop);
+
+ // Load the next prototype.
+ __ bind(&next_prototype);
+ __ LoadP(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
+ // End if the prototype is null or not hidden.
+ __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, receiver_check_failed);
+ __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ lwz(scratch, FieldMemOperand(map, Map::kBitField3Offset));
+ __ DecodeField<Map::IsHiddenPrototype>(scratch, SetRC);
+ __ beq(receiver_check_failed, cr0);
+ // Iterate.
+ __ b(&prototype_loop_start);
+
+ __ bind(&receiver_check_passed);
+}
+
+
+void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : number of arguments excluding receiver
+ // -- r4 : callee
+ // -- lr : return address
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[4 * (argc - 1)] : first argument
+ // -- sp[4 * argc] : receiver
+ // -----------------------------------
+
+
+ // Load the FunctionTemplateInfo.
+ __ LoadP(r6, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Do the compatible receiver check.
+ Label receiver_check_failed;
+ __ ShiftLeftImm(r11, r3, Operand(kPointerSizeLog2));
+ __ LoadPX(r5, MemOperand(sp, r11));
+ CompatibleReceiverCheck(masm, r5, r6, &receiver_check_failed);
+
+ // Get the callback offset from the FunctionTemplateInfo, and jump to the
+ // beginning of the code.
+ __ LoadP(r7, FieldMemOperand(r6, FunctionTemplateInfo::kCallCodeOffset));
+ __ LoadP(r7, FieldMemOperand(r7, CallHandlerInfo::kFastHandlerOffset));
+ __ addi(ip, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(ip);
+
+ // Compatible receiver check failed: throw an Illegal Invocation exception.
+ __ bind(&receiver_check_failed);
+ // Drop the arguments (including the receiver);
+ __ addi(r11, r11, Operand(kPointerSize));
+ __ add(sp, sp, r11);
+ __ TailCallRuntime(Runtime::kThrowIllegalInvocation);
+}
+
+
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -1250,7 +1405,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
__ push(r3);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
// If the code object is null, just return to the unoptimized code.
@@ -1298,7 +1453,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ bge(&ok);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kStackGuard);
}
__ Jump(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
@@ -1309,7 +1464,127 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
// static
-void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
+ int field_index) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- sp[0] : receiver
+ // -----------------------------------
+
+ // 1. Pop receiver into r3 and check that it's actually a JSDate object.
+ Label receiver_not_date;
+ {
+ __ Pop(r3);
+ __ JumpIfSmi(r3, &receiver_not_date);
+ __ CompareObjectType(r3, r4, r5, JS_DATE_TYPE);
+ __ bne(&receiver_not_date);
+ }
+
+ // 2. Load the specified date field, falling back to the runtime as necessary.
+ if (field_index == JSDate::kDateValue) {
+ __ LoadP(r3, FieldMemOperand(r3, JSDate::kValueOffset));
+ } else {
+ if (field_index < JSDate::kFirstUncachedField) {
+ Label stamp_mismatch;
+ __ mov(r4, Operand(ExternalReference::date_cache_stamp(masm->isolate())));
+ __ LoadP(r4, MemOperand(r4));
+ __ LoadP(ip, FieldMemOperand(r3, JSDate::kCacheStampOffset));
+ __ cmp(r4, ip);
+ __ bne(&stamp_mismatch);
+ __ LoadP(r3, FieldMemOperand(
+ r3, JSDate::kValueOffset + field_index * kPointerSize));
+ __ Ret();
+ __ bind(&stamp_mismatch);
+ }
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ PrepareCallCFunction(2, r4);
+ __ LoadSmiLiteral(r4, Smi::FromInt(field_index));
+ __ CallCFunction(
+ ExternalReference::get_date_field_function(masm->isolate()), 2);
+ }
+ __ Ret();
+
+ // 3. Raise a TypeError if the receiver is not a date.
+ __ bind(&receiver_not_date);
+ __ TailCallRuntime(Runtime::kThrowNotDateError);
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : argc
+ // -- sp[0] : argArray
+ // -- sp[4] : thisArg
+ // -- sp[8] : receiver
+ // -----------------------------------
+
+ // 1. Load receiver into r4, argArray into r3 (if present), remove all
+ // arguments from the stack (including the receiver), and push thisArg (if
+ // present) instead.
+ {
+ Label skip;
+ Register arg_size = r5;
+ Register new_sp = r6;
+ Register scratch = r7;
+ __ ShiftLeftImm(arg_size, r3, Operand(kPointerSizeLog2));
+ __ add(new_sp, sp, arg_size);
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ mr(scratch, r3);
+ __ LoadP(r4, MemOperand(new_sp, 0)); // receiver
+ __ cmpi(arg_size, Operand(kPointerSize));
+ __ blt(&skip);
+ __ LoadP(scratch, MemOperand(new_sp, 1 * -kPointerSize)); // thisArg
+ __ beq(&skip);
+ __ LoadP(r3, MemOperand(new_sp, 2 * -kPointerSize)); // argArray
+ __ bind(&skip);
+ __ mr(sp, new_sp);
+ __ StoreP(scratch, MemOperand(sp, 0));
+ }
+
+ // ----------- S t a t e -------------
+ // -- r3 : argArray
+ // -- r4 : receiver
+ // -- sp[0] : thisArg
+ // -----------------------------------
+
+ // 2. Make sure the receiver is actually callable.
+ Label receiver_not_callable;
+ __ JumpIfSmi(r4, &receiver_not_callable);
+ __ LoadP(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
+ __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
+ __ TestBit(r7, Map::kIsCallable, r0);
+ __ beq(&receiver_not_callable, cr0);
+
+ // 3. Tail call with no arguments if argArray is null or undefined.
+ Label no_arguments;
+ __ JumpIfRoot(r3, Heap::kNullValueRootIndex, &no_arguments);
+ __ JumpIfRoot(r3, Heap::kUndefinedValueRootIndex, &no_arguments);
+
+ // 4a. Apply the receiver to the given argArray (passing undefined for
+ // new.target).
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+ // 4b. The argArray is either null or undefined, so we tail call without any
+ // arguments to the receiver.
+ __ bind(&no_arguments);
+ {
+ __ li(r3, Operand::Zero());
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ }
+
+ // 4c. The receiver is not callable, throw an appropriate TypeError.
+ __ bind(&receiver_not_callable);
+ {
+ __ StoreP(r4, MemOperand(sp, 0));
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+ }
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
// r3: actual number of arguments
{
@@ -1354,185 +1629,144 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
-static void Generate_PushAppliedArguments(MacroAssembler* masm,
- const int vectorOffset,
- const int argumentsOffset,
- const int indexOffset,
- const int limitOffset) {
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
- Register slot = LoadDescriptor::SlotRegister();
- Register vector = LoadWithVectorDescriptor::VectorRegister();
-
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- __ LoadP(key, MemOperand(fp, indexOffset));
- __ b(&entry);
- __ bind(&loop);
- __ LoadP(receiver, MemOperand(fp, argumentsOffset));
-
- // Use inline caching to speed up access to arguments.
- int slot_index = TypeFeedbackVector::PushAppliedArgumentsIndex();
- __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
- __ LoadP(vector, MemOperand(fp, vectorOffset));
- Handle<Code> ic =
- KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
- __ Call(ic, RelocInfo::CODE_TARGET);
-
- // Push the nth argument.
- __ push(r3);
-
- // Update the index on the stack and in register key.
- __ LoadP(key, MemOperand(fp, indexOffset));
- __ AddSmiLiteral(key, key, Smi::FromInt(1), r0);
- __ StoreP(key, MemOperand(fp, indexOffset));
-
- // Test if the copy loop has finished copying all the elements from the
- // arguments object.
- __ bind(&entry);
- __ LoadP(r0, MemOperand(fp, limitOffset));
- __ cmp(key, r0);
- __ bne(&loop);
-
- // On exit, the pushed arguments count is in r3, untagged
- __ SmiUntag(r3, key);
-}
-
-
-// Used by FunctionApply and ReflectApply
-static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
- const int kFormalParameters = targetIsArgument ? 3 : 2;
- const int kStackSize = kFormalParameters + 1;
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : argc
+ // -- sp[0] : argumentsList
+ // -- sp[4] : thisArgument
+ // -- sp[8] : target
+ // -- sp[12] : receiver
+ // -----------------------------------
+ // 1. Load target into r4 (if present), argumentsList into r3 (if present),
+ // remove all arguments from the stack (including the receiver), and push
+ // thisArgument (if present) instead.
{
- FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
- const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
- const int kReceiverOffset = kArgumentsOffset + kPointerSize;
- const int kFunctionOffset = kReceiverOffset + kPointerSize;
- const int kVectorOffset =
- InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
-
- // Push the vector.
- __ LoadP(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r4,
- FieldMemOperand(r4, SharedFunctionInfo::kFeedbackVectorOffset));
- __ push(r4);
-
- __ LoadP(r3, MemOperand(fp, kFunctionOffset)); // get the function
- __ LoadP(r4, MemOperand(fp, kArgumentsOffset)); // get the args array
- __ Push(r3, r4);
- if (targetIsArgument) {
- __ InvokeBuiltin(Context::REFLECT_APPLY_PREPARE_BUILTIN_INDEX,
- CALL_FUNCTION);
- } else {
- __ InvokeBuiltin(Context::APPLY_PREPARE_BUILTIN_INDEX, CALL_FUNCTION);
- }
-
- Generate_CheckStackOverflow(masm, r3, kArgcIsSmiTagged);
+ Label skip;
+ Register arg_size = r5;
+ Register new_sp = r6;
+ Register scratch = r7;
+ __ ShiftLeftImm(arg_size, r3, Operand(kPointerSizeLog2));
+ __ add(new_sp, sp, arg_size);
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ __ mr(scratch, r4);
+ __ mr(r3, r4);
+ __ cmpi(arg_size, Operand(kPointerSize));
+ __ blt(&skip);
+ __ LoadP(r4, MemOperand(new_sp, 1 * -kPointerSize)); // target
+ __ beq(&skip);
+ __ LoadP(scratch, MemOperand(new_sp, 2 * -kPointerSize)); // thisArgument
+ __ cmpi(arg_size, Operand(2 * kPointerSize));
+ __ beq(&skip);
+ __ LoadP(r3, MemOperand(new_sp, 3 * -kPointerSize)); // argumentsList
+ __ bind(&skip);
+ __ mr(sp, new_sp);
+ __ StoreP(scratch, MemOperand(sp, 0));
+ }
- // Push current limit and index.
- const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
- const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
- __ li(r4, Operand::Zero());
- __ LoadP(r5, MemOperand(fp, kReceiverOffset));
- __ Push(r3, r4, r5); // limit, initial index and receiver.
+ // ----------- S t a t e -------------
+ // -- r3 : argumentsList
+ // -- r4 : target
+ // -- sp[0] : thisArgument
+ // -----------------------------------
- // Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
- kIndexOffset, kLimitOffset);
+ // 2. Make sure the target is actually callable.
+ Label target_not_callable;
+ __ JumpIfSmi(r4, &target_not_callable);
+ __ LoadP(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
+ __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
+ __ TestBit(r7, Map::kIsCallable, r0);
+ __ beq(&target_not_callable, cr0);
- // Call the callable.
- // TODO(bmeurer): This should be a tail call according to ES6.
- __ LoadP(r4, MemOperand(fp, kFunctionOffset));
- __ Call(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // 3a. Apply the target to the given argumentsList (passing undefined for
+ // new.target).
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
- // Tear down the internal frame and remove function, receiver and args.
+ // 3b. The target is not callable, throw an appropriate TypeError.
+ __ bind(&target_not_callable);
+ {
+ __ StoreP(r4, MemOperand(sp, 0));
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
}
- __ addi(sp, sp, Operand(kStackSize * kPointerSize));
- __ blr();
}
-static void Generate_ConstructHelper(MacroAssembler* masm) {
- const int kFormalParameters = 3;
- const int kStackSize = kFormalParameters + 1;
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : argc
+ // -- sp[0] : new.target (optional)
+ // -- sp[4] : argumentsList
+ // -- sp[8] : target
+ // -- sp[12] : receiver
+ // -----------------------------------
+ // 1. Load target into r4 (if present), argumentsList into r3 (if present),
+ // new.target into r6 (if present, otherwise use target), remove all
+ // arguments from the stack (including the receiver), and push thisArgument
+ // (if present) instead.
{
- FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
- const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
- const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
- const int kFunctionOffset = kArgumentsOffset + kPointerSize;
- static const int kVectorOffset =
- InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
-
- // Push the vector.
- __ LoadP(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r4,
- FieldMemOperand(r4, SharedFunctionInfo::kFeedbackVectorOffset));
- __ push(r4);
-
- // If newTarget is not supplied, set it to constructor
- Label validate_arguments;
- __ LoadP(r3, MemOperand(fp, kNewTargetOffset));
- __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
- __ bne(&validate_arguments);
- __ LoadP(r3, MemOperand(fp, kFunctionOffset));
- __ StoreP(r3, MemOperand(fp, kNewTargetOffset));
-
- // Validate arguments
- __ bind(&validate_arguments);
- __ LoadP(r3, MemOperand(fp, kFunctionOffset)); // get the function
- __ push(r3);
- __ LoadP(r3, MemOperand(fp, kArgumentsOffset)); // get the args array
- __ push(r3);
- __ LoadP(r3, MemOperand(fp, kNewTargetOffset)); // get the new.target
- __ push(r3);
- __ InvokeBuiltin(Context::REFLECT_CONSTRUCT_PREPARE_BUILTIN_INDEX,
- CALL_FUNCTION);
-
- Generate_CheckStackOverflow(masm, r3, kArgcIsSmiTagged);
-
- // Push current limit and index.
- const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
- const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
- __ li(r4, Operand::Zero());
- __ Push(r3, r4); // limit and initial index.
- // Push the constructor function as callee
- __ LoadP(r3, MemOperand(fp, kFunctionOffset));
- __ push(r3);
-
- // Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
- kIndexOffset, kLimitOffset);
-
- // Use undefined feedback vector
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- __ LoadP(r4, MemOperand(fp, kFunctionOffset));
- __ LoadP(r7, MemOperand(fp, kNewTargetOffset));
-
- // Call the function.
- CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
-
- // Leave internal frame.
+ Label skip;
+ Register arg_size = r5;
+ Register new_sp = r7;
+ __ ShiftLeftImm(arg_size, r3, Operand(kPointerSizeLog2));
+ __ add(new_sp, sp, arg_size);
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ __ mr(r3, r4);
+ __ mr(r6, r4);
+ __ StoreP(r4, MemOperand(new_sp, 0)); // receiver (undefined)
+ __ cmpi(arg_size, Operand(kPointerSize));
+ __ blt(&skip);
+ __ LoadP(r4, MemOperand(new_sp, 1 * -kPointerSize)); // target
+ __ mr(r6, r4); // new.target defaults to target
+ __ beq(&skip);
+ __ LoadP(r3, MemOperand(new_sp, 2 * -kPointerSize)); // argumentsList
+ __ cmpi(arg_size, Operand(2 * kPointerSize));
+ __ beq(&skip);
+ __ LoadP(r6, MemOperand(new_sp, 3 * -kPointerSize)); // new.target
+ __ bind(&skip);
+ __ mr(sp, new_sp);
}
- __ addi(sp, sp, Operand(kStackSize * kPointerSize));
- __ blr();
-}
+ // ----------- S t a t e -------------
+ // -- r3 : argumentsList
+ // -- r6 : new.target
+ // -- r4 : target
+ // -- sp[0] : receiver (undefined)
+ // -----------------------------------
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- Generate_ApplyHelper(masm, false);
-}
+ // 2. Make sure the target is actually a constructor.
+ Label target_not_constructor;
+ __ JumpIfSmi(r4, &target_not_constructor);
+ __ LoadP(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
+ __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
+ __ TestBit(r7, Map::kIsConstructor, r0);
+ __ beq(&target_not_constructor, cr0);
+ // 3. Make sure the target is actually a constructor.
+ Label new_target_not_constructor;
+ __ JumpIfSmi(r6, &new_target_not_constructor);
+ __ LoadP(r7, FieldMemOperand(r6, HeapObject::kMapOffset));
+ __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
+ __ TestBit(r7, Map::kIsConstructor, r0);
+ __ beq(&new_target_not_constructor, cr0);
-void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
- Generate_ApplyHelper(masm, true);
-}
+ // 4a. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 4b. The target is not a constructor, throw an appropriate TypeError.
+ __ bind(&target_not_constructor);
+ {
+ __ StoreP(r4, MemOperand(sp, 0));
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
-void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
- Generate_ConstructHelper(masm);
+ // 4c. The new.target is not a constructor, throw an appropriate TypeError.
+ __ bind(&new_target_not_constructor);
+ {
+ __ StoreP(r6, MemOperand(sp, 0));
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
}
@@ -1542,6 +1776,7 @@ static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
// -- r3 : actual number of arguments
// -- r4 : function (passed through to callee)
// -- r5 : expected number of arguments
+ // -- r6 : new target (passed through to callee)
// -----------------------------------
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
@@ -1588,6 +1823,131 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// static
+void Builtins::Generate_Apply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : argumentsList
+ // -- r4 : target
+ // -- r6 : new.target (checked to be constructor or undefined)
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+
+ // Create the list of arguments from the array-like argumentsList.
+ {
+ Label create_arguments, create_array, create_runtime, done_create;
+ __ JumpIfSmi(r3, &create_runtime);
+
+ // Load the map of argumentsList into r5.
+ __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset));
+
+ // Load native context into r7.
+ __ LoadP(r7, NativeContextMemOperand());
+
+ // Check if argumentsList is an (unmodified) arguments object.
+ __ LoadP(ip, ContextMemOperand(r7, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+ __ cmp(ip, r5);
+ __ beq(&create_arguments);
+ __ LoadP(ip, ContextMemOperand(r7, Context::STRICT_ARGUMENTS_MAP_INDEX));
+ __ cmp(ip, r5);
+ __ beq(&create_arguments);
+
+ // Check if argumentsList is a fast JSArray.
+ __ CompareInstanceType(r5, ip, JS_ARRAY_TYPE);
+ __ beq(&create_array);
+
+ // Ask the runtime to create the list (actually a FixedArray).
+ __ bind(&create_runtime);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r4, r6, r3);
+ __ CallRuntime(Runtime::kCreateListFromArrayLike);
+ __ Pop(r4, r6);
+ __ LoadP(r5, FieldMemOperand(r3, FixedArray::kLengthOffset));
+ __ SmiUntag(r5);
+ }
+ __ b(&done_create);
+
+ // Try to create the list from an arguments object.
+ __ bind(&create_arguments);
+ __ LoadP(r5, FieldMemOperand(
+ r3, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize));
+ __ LoadP(r7, FieldMemOperand(r3, JSObject::kElementsOffset));
+ __ LoadP(ip, FieldMemOperand(r7, FixedArray::kLengthOffset));
+ __ cmp(r5, ip);
+ __ bne(&create_runtime);
+ __ SmiUntag(r5);
+ __ mr(r3, r7);
+ __ b(&done_create);
+
+ // Try to create the list from a JSArray object.
+ __ bind(&create_array);
+ __ lbz(r5, FieldMemOperand(r5, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(r5);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ __ cmpi(r5, Operand(FAST_ELEMENTS));
+ __ bgt(&create_runtime);
+ __ cmpi(r5, Operand(FAST_HOLEY_SMI_ELEMENTS));
+ __ beq(&create_runtime);
+ __ LoadP(r5, FieldMemOperand(r3, JSArray::kLengthOffset));
+ __ LoadP(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
+ __ SmiUntag(r5);
+
+ __ bind(&done_create);
+ }
+
+ // Check for stack overflow.
+ {
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label done;
+ __ LoadRoot(ip, Heap::kRealStackLimitRootIndex);
+ // Make ip the space we have left. The stack might already be overflowed
+ // here which will cause ip to become negative.
+ __ sub(ip, sp, ip);
+ // Check if the arguments will overflow the stack.
+ __ ShiftLeftImm(r0, r5, Operand(kPointerSizeLog2));
+ __ cmp(ip, r0); // Signed comparison.
+ __ bgt(&done);
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&done);
+ }
+
+ // ----------- S t a t e -------------
+ // -- r4 : target
+ // -- r3 : args (a FixedArray built from argumentsList)
+ // -- r5 : len (number of elements to push from args)
+ // -- r6 : new.target (checked to be constructor or undefined)
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+
+ // Push arguments onto the stack (thisArgument is already on the stack).
+ {
+ Label loop, no_args;
+ __ cmpi(r5, Operand::Zero());
+ __ beq(&no_args);
+ __ addi(r3, r3,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+ __ mtctr(r5);
+ __ bind(&loop);
+ __ LoadPU(r0, MemOperand(r3, kPointerSize));
+ __ push(r0);
+ __ bdnz(&loop);
+ __ bind(&no_args);
+ __ mr(r3, r5);
+ }
+
+ // Dispatch to Call or Construct depending on whether new.target is undefined.
+ {
+ __ CompareRoot(r6, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET, eq);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ }
+}
+
+
+// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
ConvertReceiverMode mode) {
// ----------- S t a t e -------------
@@ -1679,17 +2039,128 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
#if !V8_TARGET_ARCH_PPC64
__ SmiUntag(r5);
#endif
- __ LoadP(r6, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
ParameterCount actual(r3);
ParameterCount expected(r5);
- __ InvokeCode(r6, expected, actual, JUMP_FUNCTION, NullCallWrapper());
+ __ InvokeFunctionCode(r4, no_reg, expected, actual, JUMP_FUNCTION,
+ CheckDebugStepCallWrapper());
// The function is a "classConstructor", need to raise an exception.
__ bind(&class_constructor);
{
FrameAndConstantPoolScope frame(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kThrowConstructorNonCallableError, 0);
+ __ push(r4);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+ }
+}
+
+
+namespace {
+
+void Generate_PushBoundArguments(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r4 : target (checked to be a JSBoundFunction)
+ // -- r6 : new.target (only in case of [[Construct]])
+ // -----------------------------------
+
+ // Load [[BoundArguments]] into r5 and length of that into r7.
+ Label no_bound_arguments;
+ __ LoadP(r5, FieldMemOperand(r4, JSBoundFunction::kBoundArgumentsOffset));
+ __ LoadP(r7, FieldMemOperand(r5, FixedArray::kLengthOffset));
+ __ SmiUntag(r7, SetRC);
+ __ beq(&no_bound_arguments, cr0);
+ {
+ // ----------- S t a t e -------------
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r4 : target (checked to be a JSBoundFunction)
+ // -- r5 : the [[BoundArguments]] (implemented as FixedArray)
+ // -- r6 : new.target (only in case of [[Construct]])
+ // -- r7 : the number of [[BoundArguments]]
+ // -----------------------------------
+
+ // Reserve stack space for the [[BoundArguments]].
+ {
+ Label done;
+ __ mr(r9, sp); // preserve previous stack pointer
+ __ ShiftLeftImm(r10, r7, Operand(kPointerSizeLog2));
+ __ sub(sp, sp, r10);
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack
+ // limit".
+ __ CompareRoot(sp, Heap::kRealStackLimitRootIndex);
+ __ bgt(&done); // Signed comparison.
+ // Restore the stack pointer.
+ __ mr(sp, r9);
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ }
+ __ bind(&done);
+ }
+
+ // Relocate arguments down the stack.
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r9 : the previous stack pointer
+ // -- r10: the size of the [[BoundArguments]]
+ {
+ Label skip, loop;
+ __ li(r8, Operand::Zero());
+ __ cmpi(r3, Operand::Zero());
+ __ beq(&skip);
+ __ mtctr(r3);
+ __ bind(&loop);
+ __ LoadPX(r0, MemOperand(r9, r8));
+ __ StorePX(r0, MemOperand(sp, r8));
+ __ addi(r8, r8, Operand(kPointerSize));
+ __ bdnz(&loop);
+ __ bind(&skip);
+ }
+
+ // Copy [[BoundArguments]] to the stack (below the arguments).
+ {
+ Label loop;
+ __ addi(r5, r5, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(r5, r5, r10);
+ __ mtctr(r7);
+ __ bind(&loop);
+ __ LoadPU(r0, MemOperand(r5, -kPointerSize));
+ __ StorePX(r0, MemOperand(sp, r8));
+ __ addi(r8, r8, Operand(kPointerSize));
+ __ bdnz(&loop);
+ __ add(r3, r3, r7);
+ }
}
+ __ bind(&no_bound_arguments);
+}
+
+} // namespace
+
+
+// static
+void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r4 : the function to call (checked to be a JSBoundFunction)
+ // -----------------------------------
+ __ AssertBoundFunction(r4);
+
+ // Patch the receiver to [[BoundThis]].
+ __ LoadP(ip, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset));
+ __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
+ __ StorePX(ip, MemOperand(sp, r0));
+
+ // Push the [[BoundArguments]] onto the stack.
+ Generate_PushBoundArguments(masm);
+
+ // Call the [[BoundTargetFunction]] via the Call builtin.
+ __ LoadP(r4,
+ FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ mov(ip, Operand(ExternalReference(Builtins::kCall_ReceiverIsAny,
+ masm->isolate())));
+ __ LoadP(ip, MemOperand(ip));
+ __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(ip);
}
@@ -1706,14 +2177,20 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ CompareObjectType(r4, r7, r8, JS_FUNCTION_TYPE);
__ Jump(masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET, eq);
- __ cmpi(r8, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ cmpi(r8, Operand(JS_BOUND_FUNCTION_TYPE));
+ __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
+ RelocInfo::CODE_TARGET, eq);
+ __ cmpi(r8, Operand(JS_PROXY_TYPE));
__ bne(&non_function);
- // 1. Call to function proxy.
- // TODO(neis): This doesn't match the ES6 spec for [[Call]] on proxies.
- __ LoadP(r4, FieldMemOperand(r4, JSFunctionProxy::kCallTrapOffset));
- __ AssertNotSmi(r4);
- __ b(&non_smi);
+ // 1. Runtime fallback for Proxy [[Call]].
+ __ Push(r4);
+ // Increase the arguments size to include the pushed function and the
+ // existing receiver on the stack.
+ __ addi(r3, r3, Operand(2));
+ // Tail-call to the runtime.
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
@@ -1726,7 +2203,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ ShiftLeftImm(r8, r3, Operand(kPointerSizeLog2));
__ StorePX(r4, MemOperand(sp, r8));
// Let the "call_as_function_delegate" take care of the rest.
- __ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r4);
+ __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r4);
__ Jump(masm->isolate()->builtins()->CallFunction(
ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
@@ -1736,7 +2213,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r4);
- __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable);
}
}
@@ -1746,10 +2223,9 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r4 : the constructor to call (checked to be a JSFunction)
- // -- r6 : the original constructor (checked to be a JSFunction)
+ // -- r6 : the new target (checked to be a constructor)
// -----------------------------------
__ AssertFunction(r4);
- __ AssertFunction(r6);
// Calling convention for function specific ConstructStubs require
// r5 to contain either an AllocationSite or undefined.
@@ -1765,17 +2241,51 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// static
+void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r4 : the function to call (checked to be a JSBoundFunction)
+ // -- r6 : the new target (checked to be a constructor)
+ // -----------------------------------
+ __ AssertBoundFunction(r4);
+
+ // Push the [[BoundArguments]] onto the stack.
+ Generate_PushBoundArguments(masm);
+
+ // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
+ Label skip;
+ __ cmp(r4, r6);
+ __ bne(&skip);
+ __ LoadP(r6,
+ FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ bind(&skip);
+
+ // Construct the [[BoundTargetFunction]] via the Construct builtin.
+ __ LoadP(r4,
+ FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ mov(ip, Operand(ExternalReference(Builtins::kConstruct, masm->isolate())));
+ __ LoadP(ip, MemOperand(ip));
+ __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(ip);
+}
+
+
+// static
void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
- // -- r4 : the constructor to call (checked to be a JSFunctionProxy)
- // -- r6 : the original constructor (either the same as the constructor or
+ // -- r4 : the constructor to call (checked to be a JSProxy)
+ // -- r6 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
- // TODO(neis): This doesn't match the ES6 spec for [[Construct]] on proxies.
- __ LoadP(r4, FieldMemOperand(r4, JSFunctionProxy::kConstructTrapOffset));
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // Call into the Runtime for Proxy [[Construct]].
+ __ Push(r4, r6);
+ // Include the pushed new_target, constructor and the receiver.
+ __ addi(r3, r3, Operand(3));
+ // Tail-call to the runtime.
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
}
@@ -1784,23 +2294,32 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r4 : the constructor to call (can be any Object)
- // -- r6 : the original constructor (either the same as the constructor or
+ // -- r6 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
- // Check if target has a [[Construct]] internal method.
+ // Check if target is a Smi.
Label non_constructor;
__ JumpIfSmi(r4, &non_constructor);
- __ LoadP(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
+
+ // Dispatch based on instance type.
+ __ CompareObjectType(r4, r7, r8, JS_FUNCTION_TYPE);
+ __ Jump(masm->isolate()->builtins()->ConstructFunction(),
+ RelocInfo::CODE_TARGET, eq);
+
+ // Check if target has a [[Construct]] internal method.
__ lbz(r5, FieldMemOperand(r7, Map::kBitFieldOffset));
__ TestBit(r5, Map::kIsConstructor, r0);
__ beq(&non_constructor, cr0);
- // Dispatch based on instance type.
- __ CompareInstanceType(r7, r8, JS_FUNCTION_TYPE);
- __ Jump(masm->isolate()->builtins()->ConstructFunction(),
+ // Only dispatch to bound functions after checking whether they are
+ // constructors.
+ __ cmpi(r8, Operand(JS_BOUND_FUNCTION_TYPE));
+ __ Jump(masm->isolate()->builtins()->ConstructBoundFunction(),
RelocInfo::CODE_TARGET, eq);
- __ cmpi(r8, Operand(JS_FUNCTION_PROXY_TYPE));
+
+ // Only dispatch to proxies after checking whether they are constructors.
+ __ cmpi(r8, Operand(JS_PROXY_TYPE));
__ Jump(masm->isolate()->builtins()->ConstructProxy(), RelocInfo::CODE_TARGET,
eq);
@@ -1810,7 +2329,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ ShiftLeftImm(r8, r3, Operand(kPointerSizeLog2));
__ StorePX(r4, MemOperand(sp, r8));
// Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadGlobalFunction(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, r4);
+ __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, r4);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
@@ -1818,11 +2337,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Called Construct on an Object that doesn't have a [[Construct]] internal
// method.
__ bind(&non_constructor);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r4);
- __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
- }
+ __ Jump(masm->isolate()->builtins()->ConstructedNonConstructable(),
+ RelocInfo::CODE_TARGET);
}
@@ -1831,11 +2347,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- r3 : actual number of arguments
// -- r4 : function (passed through to callee)
// -- r5 : expected number of arguments
+ // -- r6 : new target (passed through to callee)
// -----------------------------------
- Label stack_overflow;
- ArgumentAdaptorStackCheck(masm, &stack_overflow);
- Label invoke, dont_adapt_arguments;
+ Label invoke, dont_adapt_arguments, stack_overflow;
Label enough, too_few;
__ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
@@ -1847,31 +2362,34 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Enough parameters: actual >= expected
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
+ ArgumentAdaptorStackCheck(masm, &stack_overflow);
- // Calculate copy start address into r3 and copy end address into r6.
+ // Calculate copy start address into r3 and copy end address into r7.
// r3: actual number of arguments as a smi
// r4: function
// r5: expected number of arguments
+ // r6: new target (passed through to callee)
// ip: code entry to call
__ SmiToPtrArrayOffset(r3, r3);
__ add(r3, r3, fp);
// adjust for return address and receiver
__ addi(r3, r3, Operand(2 * kPointerSize));
- __ ShiftLeftImm(r6, r5, Operand(kPointerSizeLog2));
- __ sub(r6, r3, r6);
+ __ ShiftLeftImm(r7, r5, Operand(kPointerSizeLog2));
+ __ sub(r7, r3, r7);
// Copy the arguments (including the receiver) to the new stack frame.
// r3: copy start address
// r4: function
// r5: expected number of arguments
- // r6: copy end address
+ // r6: new target (passed through to callee)
+ // r7: copy end address
// ip: code entry to call
Label copy;
__ bind(&copy);
__ LoadP(r0, MemOperand(r3, 0));
__ push(r0);
- __ cmp(r3, r6); // Compare before moving to next argument.
+ __ cmp(r3, r7); // Compare before moving to next argument.
__ subi(r3, r3, Operand(kPointerSize));
__ bne(&copy);
@@ -1902,16 +2420,18 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments, 0);
+ __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
}
__ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
+ ArgumentAdaptorStackCheck(masm, &stack_overflow);
// Calculate copy start address into r0 and copy end address is fp.
// r3: actual number of arguments as a smi
// r4: function
// r5: expected number of arguments
+ // r6: new target (passed through to callee)
// ip: code entry to call
__ SmiToPtrArrayOffset(r3, r3);
__ add(r3, r3, fp);
@@ -1920,6 +2440,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r3: copy start address
// r4: function
// r5: expected number of arguments
+ // r6: new target (passed through to callee)
// ip: code entry to call
Label copy;
__ bind(&copy);
@@ -1933,18 +2454,19 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Fill the remaining expected arguments with undefined.
// r4: function
// r5: expected number of arguments
+ // r6: new target (passed through to callee)
// ip: code entry to call
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ ShiftLeftImm(r6, r5, Operand(kPointerSizeLog2));
- __ sub(r6, fp, r6);
+ __ ShiftLeftImm(r7, r5, Operand(kPointerSizeLog2));
+ __ sub(r7, fp, r7);
// Adjust for frame.
- __ subi(r6, r6, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
+ __ subi(r7, r7, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
2 * kPointerSize));
Label fill;
__ bind(&fill);
__ push(r0);
- __ cmp(sp, r6);
+ __ cmp(sp, r7);
__ bne(&fill);
}
@@ -1953,6 +2475,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ mr(r3, r5);
// r3 : expected number of arguments
// r4 : function (passed through to callee)
+ // r6 : new target (passed through to callee)
__ CallJSEntry(ip);
// Store offset of return address for deoptimizer.
@@ -1972,8 +2495,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&stack_overflow);
{
FrameScope frame(masm, StackFrame::MANUAL);
- EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bkpt(0);
}
}
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
index 92501a4a23..26fbe98cf9 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -260,7 +260,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
// Smis. If it's not a heap number, then return equal.
if (cond == lt || cond == gt) {
// Call runtime on identical JSObjects.
- __ CompareObjectType(r3, r7, r7, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(r3, r7, r7, FIRST_JS_RECEIVER_TYPE);
__ bge(slow);
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmpi(r7, Operand(SYMBOL_TYPE));
@@ -281,7 +281,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
__ beq(&heap_number);
// Comparing JS objects with <=, >= is complicated.
if (cond != eq) {
- __ cmpi(r7, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ cmpi(r7, Operand(FIRST_JS_RECEIVER_TYPE));
__ bge(slow);
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmpi(r7, Operand(SYMBOL_TYPE));
@@ -456,11 +456,11 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs,
// If either operand is a JS object or an oddball value, then they are
// not equal since their pointers are different.
// There is no test for undetectability in strict equality.
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
Label first_non_object;
// Get the type of the first operand into r5 and compare it with
- // FIRST_SPEC_OBJECT_TYPE.
- __ CompareObjectType(rhs, r5, r5, FIRST_SPEC_OBJECT_TYPE);
+ // FIRST_JS_RECEIVER_TYPE.
+ __ CompareObjectType(rhs, r5, r5, FIRST_JS_RECEIVER_TYPE);
__ blt(&first_non_object);
// Return non-zero (r3 is not zero)
@@ -473,7 +473,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs,
__ cmpi(r5, Operand(ODDBALL_TYPE));
__ beq(&return_not_equal);
- __ CompareObjectType(lhs, r6, r6, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(lhs, r6, r6, FIRST_JS_RECEIVER_TYPE);
__ bge(&return_not_equal);
// Check for oddballs: true, false, null, undefined.
@@ -536,9 +536,9 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
__ Ret();
__ bind(&object_test);
- __ cmpi(r5, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ cmpi(r5, Operand(FIRST_JS_RECEIVER_TYPE));
__ blt(not_both_strings);
- __ CompareObjectType(lhs, r5, r6, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(lhs, r5, r6, FIRST_JS_RECEIVER_TYPE);
__ blt(not_both_strings);
// If both objects are undetectable, they are equal. Otherwise, they
// are not equal, since they are different objects and an object is not
@@ -708,8 +708,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
if (cc == eq) {
- __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals, 2,
- 1);
+ __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
} else {
int ncr; // NaN compare result
if (cc == lt || cc == le) {
@@ -723,9 +722,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ TailCallRuntime(
- is_strong(strength()) ? Runtime::kCompare_Strong : Runtime::kCompare, 3,
- 1);
+ __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
+ : Runtime::kCompare);
}
__ bind(&miss);
@@ -948,7 +946,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
+ __ TailCallRuntime(Runtime::kMathPowRT);
// The stub is called from non-optimized code, which expects the result
// as heap number in exponent.
@@ -1093,16 +1091,15 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Call C built-in.
__ mov(isolate_reg, Operand(ExternalReference::isolate_address(isolate())));
+ Register target = r15;
#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
// Native AIX/PPC64 Linux use a function descriptor.
__ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(r15, kPointerSize));
__ LoadP(ip, MemOperand(r15, 0)); // Instruction address
- Register target = ip;
-#elif ABI_TOC_ADDRESSABILITY_VIA_IP
+ target = ip;
+#elif ABI_CALL_VIA_IP
__ Move(ip, r15);
- Register target = ip;
-#else
- Register target = r15;
+ target = ip;
#endif
// To let the GC traverse the return address of the exit frames, we need to
@@ -1424,15 +1421,6 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
__ TestBit(scratch, Map::kHasNonInstancePrototype, r0);
__ bne(&slow_case, cr0);
- // Ensure that {function} is not bound.
- Register const shared_info = scratch;
- __ LoadP(shared_info,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ lwz(scratch, FieldMemOperand(shared_info,
- SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBit(scratch, SharedFunctionInfo::kBoundBit, r0);
- __ bne(&slow_case, cr0);
-
// Get the "prototype" (or initial map) of the {function}.
__ LoadP(function_prototype,
FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
@@ -1457,29 +1445,47 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
// Loop through the prototype chain looking for the {function} prototype.
// Assume true, and change to false if not found.
- Register const object_prototype = object_map;
+ Register const object_instance_type = function_map;
+ Register const map_bit_field = function_map;
Register const null = scratch;
- Label done, loop;
- __ LoadRoot(r3, Heap::kTrueValueRootIndex);
+ Register const result = r3;
+
+ Label done, loop, fast_runtime_fallback;
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
__ LoadRoot(null, Heap::kNullValueRootIndex);
__ bind(&loop);
- __ LoadP(object_prototype,
- FieldMemOperand(object_map, Map::kPrototypeOffset));
- __ cmp(object_prototype, function_prototype);
+
+ // Check if the object needs to be access checked.
+ __ lbz(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset));
+ __ TestBit(map_bit_field, Map::kIsAccessCheckNeeded, r0);
+ __ bne(&fast_runtime_fallback, cr0);
+ // Check if the current object is a Proxy.
+ __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
+ __ beq(&fast_runtime_fallback);
+
+ __ LoadP(object, FieldMemOperand(object_map, Map::kPrototypeOffset));
+ __ cmp(object, function_prototype);
__ beq(&done);
- __ cmp(object_prototype, null);
- __ LoadP(object_map,
- FieldMemOperand(object_prototype, HeapObject::kMapOffset));
+ __ cmp(object, null);
+ __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
__ bne(&loop);
- __ LoadRoot(r3, Heap::kFalseValueRootIndex);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
__ bind(&done);
- __ StoreRoot(r3, Heap::kInstanceofCacheAnswerRootIndex);
+ __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
__ Ret();
- // Slow-case: Call the runtime function.
+ // Found Proxy or access check needed: Call the runtime
+ __ bind(&fast_runtime_fallback);
+ __ Push(object, function_prototype);
+ // Invalidate the instanceof cache.
+ __ LoadSmiLiteral(scratch, Smi::FromInt(0));
+ __ StoreRoot(scratch, Heap::kInstanceofCacheFunctionRootIndex);
+ __ TailCallRuntime(Runtime::kHasInPrototypeChain);
+
+ // Slow-case: Call the %InstanceOf runtime function.
__ bind(&slow_case);
__ Push(object, function);
- __ TailCallRuntime(Runtime::kInstanceOf, 2, 1);
+ __ TailCallRuntime(Runtime::kInstanceOf);
}
@@ -1584,7 +1590,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// by calling the runtime system.
__ bind(&slow);
__ push(r4);
- __ TailCallRuntime(Runtime::kArguments, 1, 1);
+ __ TailCallRuntime(Runtime::kArguments);
}
@@ -1612,7 +1618,7 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
__ bind(&runtime);
__ Push(r4, r6, r5);
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@@ -1621,8 +1627,8 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// r5 : number of parameters (tagged)
// r6 : parameters pointer
// Registers used over whole function:
- // r8 : arguments count (tagged)
- // r9 : mapped parameter count (tagged)
+ // r8 : arguments count (tagged)
+ // r9 : mapped parameter count (tagged)
DCHECK(r4.is(ArgumentsAccessNewDescriptor::function()));
DCHECK(r5.is(ArgumentsAccessNewDescriptor::parameter_count()));
@@ -1693,7 +1699,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ addi(r11, r11, Operand(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
- __ Allocate(r11, r3, r7, r11, &runtime, TAG_OBJECT);
+ __ Allocate(r11, r3, r11, r7, &runtime, TAG_OBJECT);
// r3 = address of new object(s) (tagged)
// r5 = argument count (smi-tagged)
@@ -1703,9 +1709,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
const int kAliasedOffset =
Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
- __ LoadP(r7,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ LoadP(r7, FieldMemOperand(r7, JSGlobalObject::kNativeContextOffset));
+ __ LoadP(r7, NativeContextMemOperand());
__ cmpi(r9, Operand::Zero());
if (CpuFeatures::IsSupported(ISELECT)) {
__ LoadP(r11, MemOperand(r7, kNormalOffset));
@@ -1856,7 +1860,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// r8 = argument count (tagged)
__ bind(&runtime);
__ Push(r4, r6, r8);
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@@ -1875,7 +1879,7 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
__ Push(receiver, key); // Receiver, key.
// Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kLoadElementWithInterceptor, 2, 1);
+ __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
__ bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
@@ -1920,12 +1924,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
// Get the arguments boilerplate from the current native context.
- __ LoadP(r7,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ LoadP(r7, FieldMemOperand(r7, JSGlobalObject::kNativeContextOffset));
- __ LoadP(
- r7,
- MemOperand(r7, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
+ __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r7);
__ StoreP(r7, FieldMemOperand(r3, JSObject::kMapOffset), r0);
__ LoadRoot(r8, Heap::kEmptyFixedArrayRootIndex);
@@ -1972,7 +1971,30 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
__ Push(r4, r6, r5);
- __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArguments);
+}
+
+
+void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
+ // r5 : number of parameters (tagged)
+ // r6 : parameters pointer
+ // r7 : rest parameter index (tagged)
+
+ Label runtime;
+ __ LoadP(r8, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(r3, MemOperand(r8, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ bne(&runtime);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ LoadP(r5, MemOperand(r8, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiToPtrArrayOffset(r0, r5);
+ __ add(r6, r8, r0);
+ __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ __ bind(&runtime);
+ __ Push(r5, r6, r7);
+ __ TailCallRuntime(Runtime::kNewRestParam);
}
@@ -1981,7 +2003,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -2276,7 +2298,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ beq(&runtime);
// For exception, throw the exception again.
- __ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExecReThrow);
__ bind(&failure);
// For failure and exception return null.
@@ -2365,7 +2387,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
// Deferred code for string handling.
// (6) Not a long external string? If yes, go to (8).
@@ -2408,35 +2430,25 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
-static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
- bool is_super) {
+static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// r3 : number of arguments to the construct function
// r4 : the function to call
// r5 : feedback vector
// r6 : slot in feedback vector (Smi)
- // r7 : original constructor (for IsSuperConstructorCall)
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(r3);
- if (is_super) {
- __ Push(r6, r5, r4, r3, r7);
- } else {
- __ Push(r6, r5, r4, r3);
- }
+ __ Push(r6, r5, r4, r3);
__ CallStub(stub);
- if (is_super) {
- __ Pop(r6, r5, r4, r3, r7);
- } else {
- __ Pop(r6, r5, r4, r3);
- }
+ __ Pop(r6, r5, r4, r3);
__ SmiUntag(r3);
}
-static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
@@ -2444,7 +2456,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// r4 : the function to call
// r5 : feedback vector
// r6 : slot in feedback vector (Smi)
- // r7 : original constructor (for IsSuperConstructorCall)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
@@ -2486,7 +2497,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
__ bne(&miss);
// Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r8);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
__ cmp(r4, r8);
__ bne(&megamorphic);
__ b(&done);
@@ -2510,7 +2521,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
__ bind(&initialize);
// Make sure the function is the Array() function.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r8);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
__ cmp(r4, r8);
__ bne(&not_array_function);
@@ -2518,13 +2529,13 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &create_stub, is_super);
+ CallStubInRecordCallTarget(masm, &create_stub);
__ b(&done);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &weak_cell_stub, is_super);
+ CallStubInRecordCallTarget(masm, &weak_cell_stub);
__ bind(&done);
}
@@ -2534,7 +2545,6 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// r4 : the function to call
// r5 : feedback vector
// r6 : slot in feedback vector (Smi, for RecordCallTarget)
- // r7 : original constructor (for IsSuperConstructorCall)
Label non_function;
// Check that the function is not a smi.
@@ -2543,35 +2553,29 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ CompareObjectType(r4, r8, r8, JS_FUNCTION_TYPE);
__ bne(&non_function);
- if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm, IsSuperConstructorCall());
-
- __ SmiToPtrArrayOffset(r8, r6);
- __ add(r8, r5, r8);
- // Put the AllocationSite from the feedback vector into r5, or undefined.
- __ LoadP(r5, FieldMemOperand(r8, FixedArray::kHeaderSize));
- __ LoadP(r8, FieldMemOperand(r5, AllocationSite::kMapOffset));
- __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
- __ isel(eq, r5, r5, r8);
- } else {
- Label feedback_register_initialized;
- __ beq(&feedback_register_initialized);
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- __ bind(&feedback_register_initialized);
- }
+ GenerateRecordCallTarget(masm);
- __ AssertUndefinedOrAllocationSite(r5, r8);
- }
-
- // Pass function as original constructor.
- if (IsSuperConstructorCall()) {
- __ mr(r6, r7);
+ __ SmiToPtrArrayOffset(r8, r6);
+ __ add(r8, r5, r8);
+ // Put the AllocationSite from the feedback vector into r5, or undefined.
+ __ LoadP(r5, FieldMemOperand(r8, FixedArray::kHeaderSize));
+ __ LoadP(r8, FieldMemOperand(r5, AllocationSite::kMapOffset));
+ __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
+ __ isel(eq, r5, r5, r8);
} else {
- __ mr(r6, r4);
+ Label feedback_register_initialized;
+ __ beq(&feedback_register_initialized);
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
}
+ __ AssertUndefinedOrAllocationSite(r5, r8);
+
+ // Pass function as new target.
+ __ mr(r6, r4);
+
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
__ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
@@ -2590,7 +2594,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// r6 - slot id
// r5 - vector
// r7 - allocation site (loaded from vector[slot])
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r8);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
__ cmp(r4, r8);
__ bne(miss);
@@ -2615,11 +2619,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// r4 - function
// r6 - slot id (Smi)
// r5 - vector
- const int with_types_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
- const int generic_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
- Label extra_checks_or_miss, call;
+ Label extra_checks_or_miss, call, call_function;
int argc = arg_count();
ParameterCount actual(argc);
@@ -2656,9 +2656,10 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ AddSmiLiteral(r6, r6, Smi::FromInt(CallICNexus::kCallCountIncrement), r0);
__ StoreP(r6, FieldMemOperand(r9, count_offset), r0);
- __ bind(&call);
+ __ bind(&call_function);
__ mov(r3, Operand(argc));
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+ RelocInfo::CODE_TARGET);
__ bind(&extra_checks_or_miss);
Label uninitialized, miss, not_allocation_site;
@@ -2692,14 +2693,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bne(&miss);
__ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
__ StoreP(ip, FieldMemOperand(r9, FixedArray::kHeaderSize), r0);
- // We have to update statistics for runtime profiling.
- __ LoadP(r7, FieldMemOperand(r5, with_types_offset));
- __ SubSmiLiteral(r7, r7, Smi::FromInt(1), r0);
- __ StoreP(r7, FieldMemOperand(r5, with_types_offset), r0);
- __ LoadP(r7, FieldMemOperand(r5, generic_offset));
- __ AddSmiLiteral(r7, r7, Smi::FromInt(1), r0);
- __ StoreP(r7, FieldMemOperand(r5, generic_offset), r0);
- __ b(&call);
+
+ __ bind(&call);
+ __ mov(r3, Operand(argc));
+ __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+ RelocInfo::CODE_TARGET);
__ bind(&uninitialized);
@@ -2712,14 +2710,16 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Make sure the function is not the Array() function, which requires special
// behavior on MISS.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r7);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7);
__ cmp(r4, r7);
__ beq(&miss);
- // Update stats.
- __ LoadP(r7, FieldMemOperand(r5, with_types_offset));
- __ AddSmiLiteral(r7, r7, Smi::FromInt(1), r0);
- __ StoreP(r7, FieldMemOperand(r5, with_types_offset), r0);
+ // Make sure the function belongs to the same native context.
+ __ LoadP(r7, FieldMemOperand(r4, JSFunction::kContextOffset));
+ __ LoadP(r7, ContextMemOperand(r7, Context::NATIVE_CONTEXT_INDEX));
+ __ LoadP(ip, NativeContextMemOperand());
+ __ cmp(r7, ip);
+ __ bne(&miss);
// Initialize the call counter.
__ LoadSmiLiteral(r8, Smi::FromInt(CallICNexus::kCallCountIncrement));
@@ -2737,7 +2737,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Pop(r4);
}
- __ b(&call);
+ __ b(&call_function);
// We are here because tracing is on or we encountered a MISS case we can't
// handle here.
@@ -2755,7 +2755,7 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(r4, r5, r6);
// Call the entry.
- __ CallRuntime(Runtime::kCallIC_Miss, 3);
+ __ CallRuntime(Runtime::kCallIC_Miss);
// Move result to r4 and exit the internal frame.
__ mr(r4, r3);
@@ -2814,11 +2814,11 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ Push(object_, index_);
}
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
} else {
DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
+ __ CallRuntime(Runtime::kNumberToSmi);
}
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
@@ -2845,7 +2845,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.BeforeCall(masm);
__ SmiTag(index_);
__ Push(object_, index_);
- __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
+ __ CallRuntime(Runtime::kStringCharCodeAtRT);
__ Move(result_, r3);
call_helper.AfterCall(masm);
__ b(&exit_);
@@ -2885,7 +2885,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
__ push(code_);
- __ CallRuntime(Runtime::kStringCharFromCode, 1);
+ __ CallRuntime(Runtime::kStringCharFromCode);
__ Move(result_, r3);
call_helper.AfterCall(masm);
__ b(&exit_);
@@ -3139,7 +3139,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kSubString);
__ bind(&single_char);
// r3: original string
@@ -3179,7 +3179,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ blr();
__ bind(&slow_string);
__ push(r3); // Push argument.
- __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kStringToNumber);
__ bind(&not_string);
Label not_oddball;
@@ -3190,7 +3190,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ bind(&not_oddball);
__ push(r3); // Push argument.
- __ TailCallRuntime(Runtime::kToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kToNumber);
}
@@ -3212,7 +3212,7 @@ void ToLengthStub::Generate(MacroAssembler* masm) {
__ bind(&not_smi);
__ push(r3); // Push argument.
- __ TailCallRuntime(Runtime::kToLength, 1, 1);
+ __ TailCallRuntime(Runtime::kToLength);
}
@@ -3242,7 +3242,7 @@ void ToStringStub::Generate(MacroAssembler* masm) {
__ bind(&not_oddball);
__ push(r3); // Push argument.
- __ TailCallRuntime(Runtime::kToString, 1, 1);
+ __ TailCallRuntime(Runtime::kToString);
}
@@ -3397,7 +3397,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// tagged as a small integer.
__ bind(&runtime);
__ Push(r4, r3);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
@@ -3439,7 +3439,7 @@ void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
__ CheckMap(r4, r5, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
__ CheckMap(r3, r6, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
if (op() != Token::EQ_STRICT && is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (!Token::IsEqualityOp(op())) {
__ LoadP(r4, FieldMemOperand(r4, Oddball::kToNumberOffset));
@@ -3723,9 +3723,9 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
__ bind(&runtime);
__ Push(left, right);
if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ __ TailCallRuntime(Runtime::kStringEquals);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
__ bind(&miss);
@@ -3733,16 +3733,17 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
}
-void CompareICStub::GenerateObjects(MacroAssembler* masm) {
- DCHECK(state() == CompareICState::OBJECT);
+void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
+ DCHECK_EQ(CompareICState::RECEIVER, state());
Label miss;
__ and_(r5, r4, r3);
__ JumpIfSmi(r5, &miss);
- __ CompareObjectType(r3, r5, r5, JS_OBJECT_TYPE);
- __ bne(&miss);
- __ CompareObjectType(r4, r5, r5, JS_OBJECT_TYPE);
- __ bne(&miss);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ __ CompareObjectType(r3, r5, r5, FIRST_JS_RECEIVER_TYPE);
+ __ blt(&miss);
+ __ CompareObjectType(r4, r5, r5, FIRST_JS_RECEIVER_TYPE);
+ __ blt(&miss);
DCHECK(GetCondition() == eq);
__ sub(r3, r3, r4);
@@ -3753,7 +3754,7 @@ void CompareICStub::GenerateObjects(MacroAssembler* masm) {
}
-void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
+void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
Label miss;
Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
__ and_(r5, r4, r3);
@@ -3770,7 +3771,7 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ sub(r3, r3, r4);
__ Ret();
} else if (is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (op() == Token::LT || op() == Token::LTE) {
__ LoadSmiLiteral(r5, Smi::FromInt(GREATER));
@@ -3778,7 +3779,7 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ LoadSmiLiteral(r5, Smi::FromInt(LESS));
}
__ Push(r4, r3, r5);
- __ TailCallRuntime(Runtime::kCompare, 3, 1);
+ __ TailCallRuntime(Runtime::kCompare);
}
__ bind(&miss);
@@ -3794,7 +3795,7 @@ void CompareICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(r4, r3);
__ LoadSmiLiteral(r0, Smi::FromInt(op()));
__ push(r0);
- __ CallRuntime(Runtime::kCompareIC_Miss, 3);
+ __ CallRuntime(Runtime::kCompareIC_Miss);
// Compute the entry point of the rewritten stub.
__ addi(r5, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
@@ -3825,7 +3826,7 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) {
__ LoadP(ip, MemOperand(target, 0)); // Instruction address
#else
// ip needs to be set for DirectCEentryStub::Generate, and also
- // for ABI_TOC_ADDRESSABILITY_VIA_IP.
+ // for ABI_CALL_VIA_IP.
__ Move(ip, target);
#endif
@@ -4242,11 +4243,11 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// We need extra registers for this, so we push the object and the address
// register temporarily.
__ Push(regs_.object(), regs_.address());
- __ EnsureNotWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- regs_.address(), // Scratch.
- &need_incremental_pop_scratch);
+ __ JumpIfWhite(regs_.scratch0(), // The value.
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ regs_.address(), // Scratch.
+ &need_incremental_pop_scratch);
__ Pop(regs_.object(), regs_.address());
regs_.Restore(masm);
@@ -4266,75 +4267,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
}
-void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : element value to store
- // -- r6 : element index as smi
- // -- sp[0] : array literal index in function as smi
- // -- sp[4] : array literal
- // clobbers r3, r5, r7
- // -----------------------------------
-
- Label element_done;
- Label double_elements;
- Label smi_element;
- Label slow_elements;
- Label fast_elements;
-
- // Get array literal index, array literal and its map.
- __ LoadP(r7, MemOperand(sp, 0 * kPointerSize));
- __ LoadP(r4, MemOperand(sp, 1 * kPointerSize));
- __ LoadP(r5, FieldMemOperand(r4, JSObject::kMapOffset));
-
- __ CheckFastElements(r5, r8, &double_elements);
- // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
- __ JumpIfSmi(r3, &smi_element);
- __ CheckFastSmiElements(r5, r8, &fast_elements);
-
- // Store into the array literal requires a elements transition. Call into
- // the runtime.
- __ bind(&slow_elements);
- // call.
- __ Push(r4, r6, r3);
- __ LoadP(r8, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ LoadP(r8, FieldMemOperand(r8, JSFunction::kLiteralsOffset));
- __ Push(r8, r7);
- __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
-
- // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
- __ bind(&fast_elements);
- __ LoadP(r8, FieldMemOperand(r4, JSObject::kElementsOffset));
- __ SmiToPtrArrayOffset(r9, r6);
- __ add(r9, r8, r9);
-#if V8_TARGET_ARCH_PPC64
- // add due to offset alignment requirements of StorePU
- __ addi(r9, r9, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ StoreP(r3, MemOperand(r9));
-#else
- __ StorePU(r3, MemOperand(r9, FixedArray::kHeaderSize - kHeapObjectTag));
-#endif
- // Update the write barrier for the array store.
- __ RecordWrite(r8, r9, r3, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ Ret();
-
- // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
- // and value is Smi.
- __ bind(&smi_element);
- __ LoadP(r8, FieldMemOperand(r4, JSObject::kElementsOffset));
- __ SmiToPtrArrayOffset(r9, r6);
- __ add(r9, r8, r9);
- __ StoreP(r3, FieldMemOperand(r9, FixedArray::kHeaderSize), r0);
- __ Ret();
-
- // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
- __ bind(&double_elements);
- __ LoadP(r8, FieldMemOperand(r4, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(r3, r6, r8, r9, d0, &slow_elements);
- __ Ret();
-}
-
-
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
@@ -4865,7 +4797,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// Function descriptor
__ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(ip, kPointerSize));
__ LoadP(ip, MemOperand(ip, 0));
-#elif ABI_TOC_ADDRESSABILITY_VIA_IP
+#elif ABI_CALL_VIA_IP
// ip set above, so nothing to do.
#endif
@@ -5074,7 +5006,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// -- r3 : argc (only if argument_count() == ANY)
// -- r4 : constructor
// -- r5 : AllocationSite or undefined
- // -- r6 : original constructor
+ // -- r6 : new target
// -- sp[0] : return address
// -- sp[4] : last argument
// -----------------------------------
@@ -5095,6 +5027,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ AssertUndefinedOrAllocationSite(r5, r7);
}
+ // Enter the context of the Array function.
+ __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+
Label subclassing;
__ cmp(r6, r4);
__ bne(&subclassing);
@@ -5114,25 +5049,25 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
__ bind(&subclassing);
- __ push(r4);
- __ push(r6);
-
- // Adjust argc.
switch (argument_count()) {
case ANY:
case MORE_THAN_ONE:
- __ addi(r3, r3, Operand(2));
+ __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
+ __ StorePX(r4, MemOperand(sp, r0));
+ __ addi(r3, r3, Operand(3));
break;
case NONE:
- __ li(r3, Operand(2));
+ __ StoreP(r4, MemOperand(sp, 0 * kPointerSize));
+ __ li(r3, Operand(3));
break;
case ONE:
- __ li(r3, Operand(3));
+ __ StoreP(r4, MemOperand(sp, 1 * kPointerSize));
+ __ li(r3, Operand(4));
break;
}
- __ JumpToExternalReference(
- ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()));
+ __ Push(r6, r5);
+ __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
}
@@ -5216,14 +5151,14 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
// Go up the context chain to the script context.
for (int i = 0; i < depth(); ++i) {
- __ LoadP(result, ContextOperand(context, Context::PREVIOUS_INDEX));
+ __ LoadP(result, ContextMemOperand(context, Context::PREVIOUS_INDEX));
context = result;
}
// Load the PropertyCell value at the specified slot.
__ ShiftLeftImm(r0, slot, Operand(kPointerSizeLog2));
__ add(result, context, r0);
- __ LoadP(result, ContextOperand(result));
+ __ LoadP(result, ContextMemOperand(result));
__ LoadP(result, FieldMemOperand(result, PropertyCell::kValueOffset));
// If the result is not the_hole, return. Otherwise, handle in the runtime.
@@ -5233,7 +5168,7 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
// Fallback to runtime.
__ SmiTag(slot);
__ Push(slot);
- __ TailCallRuntime(Runtime::kLoadGlobalViaContext, 1, 1);
+ __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
}
@@ -5259,14 +5194,14 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
// Go up the context chain to the script context.
for (int i = 0; i < depth(); i++) {
- __ LoadP(context_temp, ContextOperand(context, Context::PREVIOUS_INDEX));
+ __ LoadP(context_temp, ContextMemOperand(context, Context::PREVIOUS_INDEX));
context = context_temp;
}
// Load the PropertyCell at the specified slot.
__ ShiftLeftImm(r0, slot, Operand(kPointerSizeLog2));
__ add(cell, context, r0);
- __ LoadP(cell, ContextOperand(cell));
+ __ LoadP(cell, ContextMemOperand(cell));
// Load PropertyDetails for the cell (actually only the cell_type and kind).
__ LoadP(cell_details, FieldMemOperand(cell, PropertyCell::kDetailsOffset));
@@ -5361,8 +5296,7 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
__ Push(slot, value);
__ TailCallRuntime(is_strict(language_mode())
? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2, 1);
+ : Runtime::kStoreGlobalViaContext_Sloppy);
}
@@ -5498,7 +5432,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Re-throw by promoting a scheduled exception.
__ bind(&promote_scheduled_exception);
- __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException);
// HandleScope limit has changed. Delete allocated extensions.
__ bind(&delete_allocated_handles);
diff --git a/deps/v8/src/ppc/code-stubs-ppc.h b/deps/v8/src/ppc/code-stubs-ppc.h
index ef4bdce5d1..d394171d89 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.h
+++ b/deps/v8/src/ppc/code-stubs-ppc.h
@@ -127,8 +127,8 @@ class RecordWriteStub : public PlatformCodeStub {
}
static void Patch(Code* stub, Mode mode) {
- MacroAssembler masm(NULL, stub->instruction_start(),
- stub->instruction_size());
+ MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
+ stub->instruction_size(), CodeObjectRequired::kNo);
switch (mode) {
case STORE_BUFFER_ONLY:
DCHECK(GetMode(stub) == INCREMENTAL ||
diff --git a/deps/v8/src/ppc/codegen-ppc.cc b/deps/v8/src/ppc/codegen-ppc.cc
index b313d11bb3..2bf8b4ee83 100644
--- a/deps/v8/src/ppc/codegen-ppc.cc
+++ b/deps/v8/src/ppc/codegen-ppc.cc
@@ -18,23 +18,23 @@ namespace internal {
#if defined(USE_SIMULATOR)
-byte* fast_exp_ppc_machine_code = NULL;
-double fast_exp_simulator(double x) {
- return Simulator::current(Isolate::Current())
+byte* fast_exp_ppc_machine_code = nullptr;
+double fast_exp_simulator(double x, Isolate* isolate) {
+ return Simulator::current(isolate)
->CallFPReturnsDouble(fast_exp_ppc_machine_code, x, 0);
}
#endif
-UnaryMathFunction CreateExpFunction() {
- if (!FLAG_fast_math) return &std::exp;
+UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &std::exp;
+ if (buffer == nullptr) return nullptr;
ExternalReference::InitializeMathExpData();
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
{
DoubleRegister input = d1;
@@ -62,11 +62,11 @@ UnaryMathFunction CreateExpFunction() {
DCHECK(!RelocInfo::RequiresRelocation(desc));
#endif
- Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
+ return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#else
fast_exp_ppc_machine_code = buffer;
return &fast_exp_simulator;
@@ -74,16 +74,17 @@ UnaryMathFunction CreateExpFunction() {
}
-UnaryMathFunction CreateSqrtFunction() {
+UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
- return &std::sqrt;
+ return nullptr;
#else
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &std::sqrt;
+ if (buffer == nullptr) return nullptr;
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
// Called from C
__ function_descriptor();
@@ -99,9 +100,9 @@ UnaryMathFunction CreateSqrtFunction() {
DCHECK(!RelocInfo::RequiresRelocation(desc));
#endif
- Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
+ return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}
@@ -607,15 +608,17 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm, DoubleRegister input,
#undef __
-CodeAgingHelper::CodeAgingHelper() {
+CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
+ USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
// Since patcher is a large object, allocate it dynamically when needed,
// to avoid overloading the stack in stress conditions.
// DONT_FLUSH is used because the CodeAgingHelper is initialized early in
// the process, before ARM simulator ICache is setup.
- base::SmartPointer<CodePatcher> patcher(new CodePatcher(
- young_sequence_.start(), young_sequence_.length() / Assembler::kInstrSize,
- CodePatcher::DONT_FLUSH));
+ base::SmartPointer<CodePatcher> patcher(
+ new CodePatcher(isolate, young_sequence_.start(),
+ young_sequence_.length() / Assembler::kInstrSize,
+ CodePatcher::DONT_FLUSH));
PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
patcher->masm()->PushFixedFrame(r4);
patcher->masm()->addi(fp, sp,
@@ -664,7 +667,8 @@ void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Code::Age age,
} else {
// FIXED_SEQUENCE
Code* stub = GetCodeAgeStub(isolate, age, parity);
- CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
+ CodePatcher patcher(isolate, sequence,
+ young_length / Assembler::kInstrSize);
Assembler::BlockTrampolinePoolScope block_trampoline_pool(patcher.masm());
intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
// Don't use Call -- we need to preserve ip and lr.
diff --git a/deps/v8/src/ppc/codegen-ppc.h b/deps/v8/src/ppc/codegen-ppc.h
index 7f19beea7d..c3cd9b39a0 100644
--- a/deps/v8/src/ppc/codegen-ppc.h
+++ b/deps/v8/src/ppc/codegen-ppc.h
@@ -5,7 +5,7 @@
#ifndef V8_PPC_CODEGEN_PPC_H_
#define V8_PPC_CODEGEN_PPC_H_
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/macro-assembler.h"
namespace v8 {
diff --git a/deps/v8/src/ppc/constants-ppc.h b/deps/v8/src/ppc/constants-ppc.h
index 87a82719be..4c404ae911 100644
--- a/deps/v8/src/ppc/constants-ppc.h
+++ b/deps/v8/src/ppc/constants-ppc.h
@@ -275,24 +275,29 @@ enum OpcodeExt4 {
FMADD = 29 << 1, // Floating Multiply-Add
// Bits 10-1
- FCMPU = 0 << 1, // Floating Compare Unordered
- FRSP = 12 << 1, // Floating-Point Rounding
- FCTIW = 14 << 1, // Floating Convert to Integer Word X-form
- FCTIWZ = 15 << 1, // Floating Convert to Integer Word with Round to Zero
- FNEG = 40 << 1, // Floating Negate
- MCRFS = 64 << 1, // Move to Condition Register from FPSCR
- FMR = 72 << 1, // Floating Move Register
- MTFSFI = 134 << 1, // Move to FPSCR Field Immediate
- FABS = 264 << 1, // Floating Absolute Value
- FRIN = 392 << 1, // Floating Round to Integer Nearest
- FRIZ = 424 << 1, // Floating Round to Integer Toward Zero
- FRIP = 456 << 1, // Floating Round to Integer Plus
- FRIM = 488 << 1, // Floating Round to Integer Minus
- MFFS = 583 << 1, // move from FPSCR x-form
- MTFSF = 711 << 1, // move to FPSCR fields XFL-form
- FCFID = 846 << 1, // Floating convert from integer doubleword
- FCTID = 814 << 1, // Floating convert from integer doubleword
- FCTIDZ = 815 << 1 // Floating convert from integer doubleword
+ FCMPU = 0 << 1, // Floating Compare Unordered
+ FRSP = 12 << 1, // Floating-Point Rounding
+ FCTIW = 14 << 1, // Floating Convert to Integer Word X-form
+ FCTIWZ = 15 << 1, // Floating Convert to Integer Word with Round to Zero
+ MTFSB1 = 38 << 1, // Move to FPSCR Bit 1
+ FNEG = 40 << 1, // Floating Negate
+ MCRFS = 64 << 1, // Move to Condition Register from FPSCR
+ MTFSB0 = 70 << 1, // Move to FPSCR Bit 0
+ FMR = 72 << 1, // Floating Move Register
+ MTFSFI = 134 << 1, // Move to FPSCR Field Immediate
+ FABS = 264 << 1, // Floating Absolute Value
+ FRIN = 392 << 1, // Floating Round to Integer Nearest
+ FRIZ = 424 << 1, // Floating Round to Integer Toward Zero
+ FRIP = 456 << 1, // Floating Round to Integer Plus
+ FRIM = 488 << 1, // Floating Round to Integer Minus
+ MFFS = 583 << 1, // move from FPSCR x-form
+ MTFSF = 711 << 1, // move to FPSCR fields XFL-form
+ FCTID = 814 << 1, // Floating convert to integer doubleword
+ FCTIDZ = 815 << 1, // ^^^ with round toward zero
+ FCFID = 846 << 1, // Floating convert from integer doubleword
+ FCTIDU = 942 << 1, // Floating convert to integer doubleword unsigned
+ FCTIDUZ = 943 << 1, // ^^^ with round toward zero
+ FCFIDU = 974 << 1 // Floating convert from integer doubleword unsigned
};
enum OpcodeExt5 {
@@ -399,6 +404,13 @@ enum CRBit { CR_LT = 0, CR_GT = 1, CR_EQ = 2, CR_SO = 3, CR_FU = 3 };
#define CRWIDTH 4
+// These are the documented bit positions biased down by 32
+enum FPSCRBit {
+ VXSOFT = 21, // 53: Software-Defined Condition
+ VXSQRT = 22, // 54: Invalid Square Root
+ VXCVI = 23 // 55: Invalid Integer Convert
+};
+
// -----------------------------------------------------------------------------
// Supervisor Call (svc) specific support.
diff --git a/deps/v8/src/ppc/deoptimizer-ppc.cc b/deps/v8/src/ppc/deoptimizer-ppc.cc
index 831ccf6cdc..4232342b93 100644
--- a/deps/v8/src/ppc/deoptimizer-ppc.cc
+++ b/deps/v8/src/ppc/deoptimizer-ppc.cc
@@ -45,14 +45,15 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
} else {
pointer = code->instruction_start();
}
- CodePatcher patcher(pointer, 1);
+ CodePatcher patcher(isolate, pointer, 1);
patcher.masm()->bkpt(0);
DeoptimizationInputData* data =
DeoptimizationInputData::cast(code->deoptimization_data());
int osr_offset = data->OsrPcOffset()->value();
if (osr_offset > 0) {
- CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
+ 1);
osr_patcher.masm()->bkpt(0);
}
}
@@ -75,7 +76,7 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0);
DCHECK(call_size_in_bytes <= patch_size());
- CodePatcher patcher(call_address, call_size_in_words);
+ CodePatcher patcher(isolate, call_address, call_size_in_words);
patcher.masm()->Call(deopt_entry, kRelocInfo_NONEPTR);
DCHECK(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
diff --git a/deps/v8/src/ppc/disasm-ppc.cc b/deps/v8/src/ppc/disasm-ppc.cc
index 83fbc7e29c..d9450f8a42 100644
--- a/deps/v8/src/ppc/disasm-ppc.cc
+++ b/deps/v8/src/ppc/disasm-ppc.cc
@@ -889,6 +889,10 @@ void Decoder::DecodeExt3(Instruction* instr) {
Format(instr, "fcfids'. 'Dt, 'Db");
break;
}
+ case FCFIDU: {
+ Format(instr, "fcfidus'.'Dt, 'Db");
+ break;
+ }
default: {
Unknown(instr); // not used by V8
}
@@ -945,6 +949,10 @@ void Decoder::DecodeExt4(Instruction* instr) {
Format(instr, "fcfid'. 'Dt, 'Db");
break;
}
+ case FCFIDU: {
+ Format(instr, "fcfidu'. 'Dt, 'Db");
+ break;
+ }
case FCTID: {
Format(instr, "fctid 'Dt, 'Db");
break;
@@ -953,6 +961,14 @@ void Decoder::DecodeExt4(Instruction* instr) {
Format(instr, "fctidz 'Dt, 'Db");
break;
}
+ case FCTIDU: {
+ Format(instr, "fctidu 'Dt, 'Db");
+ break;
+ }
+ case FCTIDUZ: {
+ Format(instr, "fctiduz 'Dt, 'Db");
+ break;
+ }
case FCTIW: {
Format(instr, "fctiw'. 'Dt, 'Db");
break;
@@ -1001,6 +1017,18 @@ void Decoder::DecodeExt4(Instruction* instr) {
Format(instr, "fneg'. 'Dt, 'Db");
break;
}
+ case MCRFS: {
+ Format(instr, "mcrfs ?,?");
+ break;
+ }
+ case MTFSB0: {
+ Format(instr, "mtfsb0'. ?");
+ break;
+ }
+ case MTFSB1: {
+ Format(instr, "mtfsb1'. ?");
+ break;
+ }
default: {
Unknown(instr); // not used by V8
}
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
index b54845d4b3..b649f71ea3 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -63,6 +63,11 @@ const Register ArgumentsAccessNewDescriptor::parameter_count() { return r5; }
const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return r6; }
+const Register RestParamAccessDescriptor::parameter_count() { return r5; }
+const Register RestParamAccessDescriptor::parameter_pointer() { return r6; }
+const Register RestParamAccessDescriptor::rest_parameter_index() { return r7; }
+
+
const Register ApiGetterDescriptor::function_address() { return r5; }
@@ -125,6 +130,13 @@ void TypeofDescriptor::InitializePlatformSpecific(
}
+void FastCloneRegExpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r6, r5, r4, r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r6, r5, r4};
@@ -187,7 +199,7 @@ void CallConstructDescriptor::InitializePlatformSpecific(
// r4 : the function to call
// r5 : feedback vector
// r6 : slot in feedback vector (Smi, for RecordCallTarget)
- // r7 : original constructor (for IsSuperConstructorCall)
+ // r7 : new target (for IsSuperConstructorCall)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {r3, r4, r7, r5};
@@ -204,6 +216,27 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
}
+void ConstructStubDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r3 : number of arguments
+ // r4 : the target to call
+ // r6 : the new target
+ // r5 : allocation site or undefined
+ Register registers[] = {r4, r6, r3, r5};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void ConstructTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r3 : number of arguments
+ // r4 : the target to call
+ // r6 : the new target
+ Register registers[] = {r4, r6, r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r5, r4, r3};
@@ -341,6 +374,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r4, // JSFunction
+ r6, // the new target
r3, // actual number of arguments
r5, // expected number of arguments
};
@@ -373,27 +407,6 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
}
-void MathRoundVariantCallFromUnoptimizedCodeDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r4, // math rounding function
- r6, // vector slot id
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void MathRoundVariantCallFromOptimizedCodeDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r4, // math rounding function
- r6, // vector slot id
- r7, // type vector
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
@@ -409,7 +422,7 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r3, // argument count (not including receiver)
- r6, // original constructor
+ r6, // new target
r4, // constructor to call
r5 // address of the first argument
};
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index e543ba853b..9cd35ab01c 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -20,11 +20,12 @@
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
has_frame_(false) {
- if (isolate() != NULL) {
+ if (create_code_object == CodeObjectRequired::kYes) {
code_object_ =
Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
}
@@ -49,8 +50,7 @@ void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
if (cond != al) b(NegateCondition(cond), &skip, cr);
- DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY ||
- rmode == RelocInfo::CONSTRUCT_CALL);
+ DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY);
mov(ip, Operand(target, rmode));
mtctr(ip);
@@ -671,6 +671,20 @@ void MacroAssembler::ConvertInt64ToDouble(Register src,
}
+void MacroAssembler::ConvertUnsignedInt64ToFloat(Register src,
+ DoubleRegister double_dst) {
+ MovInt64ToDouble(double_dst, src);
+ fcfidus(double_dst, double_dst);
+}
+
+
+void MacroAssembler::ConvertUnsignedInt64ToDouble(Register src,
+ DoubleRegister double_dst) {
+ MovInt64ToDouble(double_dst, src);
+ fcfidu(double_dst, double_dst);
+}
+
+
void MacroAssembler::ConvertInt64ToFloat(Register src,
DoubleRegister double_dst) {
MovInt64ToDouble(double_dst, src);
@@ -701,6 +715,22 @@ void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
dst, double_dst);
}
+#if V8_TARGET_ARCH_PPC64
+void MacroAssembler::ConvertDoubleToUnsignedInt64(
+ const DoubleRegister double_input, const Register dst,
+ const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
+ if (rounding_mode == kRoundToZero) {
+ fctiduz(double_dst, double_input);
+ } else {
+ SetRoundingMode(rounding_mode);
+ fctidu(double_dst, double_input);
+ ResetRoundingMode();
+ }
+
+ MovDoubleToInt64(dst, double_dst);
+}
+#endif
+
void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
Register code_target_address) {
@@ -723,20 +753,26 @@ void MacroAssembler::LoadConstantPoolPointerRegister() {
}
-void MacroAssembler::StubPrologue(int prologue_offset) {
+void MacroAssembler::StubPrologue(Register base, int prologue_offset) {
LoadSmiLiteral(r11, Smi::FromInt(StackFrame::STUB));
PushFixedFrame(r11);
// Adjust FP to point to saved FP.
addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
if (FLAG_enable_embedded_constant_pool) {
- // ip contains prologue address
- LoadConstantPoolPointerRegister(ip, -prologue_offset);
+ if (!base.is(no_reg)) {
+ // base contains prologue address
+ LoadConstantPoolPointerRegister(base, -prologue_offset);
+ } else {
+ LoadConstantPoolPointerRegister();
+ }
set_constant_pool_available(true);
}
}
-void MacroAssembler::Prologue(bool code_pre_aging, int prologue_offset) {
+void MacroAssembler::Prologue(bool code_pre_aging, Register base,
+ int prologue_offset) {
+ DCHECK(!base.is(no_reg));
{
PredictableCodeSizeScope predictible_code_size_scope(
this, kNoCodeAgeSequenceLength);
@@ -766,8 +802,8 @@ void MacroAssembler::Prologue(bool code_pre_aging, int prologue_offset) {
}
}
if (FLAG_enable_embedded_constant_pool) {
- // ip contains prologue address
- LoadConstantPoolPointerRegister(ip, -prologue_offset);
+ // base contains prologue address
+ LoadConstantPoolPointerRegister(base, -prologue_offset);
set_constant_pool_available(true);
}
}
@@ -987,9 +1023,7 @@ void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_reg, Label* done,
+ const ParameterCount& actual, Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
@@ -1010,8 +1044,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// ARM has some sanity checks as per below, considering add them for PPC
// DCHECK(actual.is_immediate() || actual.reg().is(r3));
// DCHECK(expected.is_immediate() || expected.reg().is(r5));
- // DCHECK((!code_constant.is_null() && code_reg.is(no_reg))
- // || code_reg.is(r6));
if (expected.is_immediate()) {
DCHECK(actual.is_immediate());
@@ -1043,11 +1075,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
if (!definitely_matches) {
- if (!code_constant.is_null()) {
- mov(r6, Operand(code_constant));
- addi(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
- }
-
Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(adaptor));
@@ -1064,17 +1091,78 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
-void MacroAssembler::InvokeCode(Register code, const ParameterCount& expected,
- const ParameterCount& actual, InvokeFlag flag,
- const CallWrapper& call_wrapper) {
+void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ Label skip_flooding;
+ ExternalReference step_in_enabled =
+ ExternalReference::debug_step_in_enabled_address(isolate());
+ mov(r7, Operand(step_in_enabled));
+ lbz(r7, MemOperand(r7));
+ cmpi(r7, Operand::Zero());
+ beq(&skip_flooding);
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ if (expected.is_reg()) {
+ SmiTag(expected.reg());
+ Push(expected.reg());
+ }
+ if (actual.is_reg()) {
+ SmiTag(actual.reg());
+ Push(actual.reg());
+ }
+ if (new_target.is_valid()) {
+ Push(new_target);
+ }
+ Push(fun, fun);
+ CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+ Pop(fun);
+ if (new_target.is_valid()) {
+ Pop(new_target);
+ }
+ if (actual.is_reg()) {
+ Pop(actual.reg());
+ SmiUntag(actual.reg());
+ }
+ if (expected.is_reg()) {
+ Pop(expected.reg());
+ SmiUntag(expected.reg());
+ }
+ }
+ bind(&skip_flooding);
+}
+
+
+void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(function.is(r4));
+ DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r6));
+
+ if (call_wrapper.NeedsDebugStepCheck()) {
+ FloodFunctionIfStepping(function, new_target, expected, actual);
+ }
+
+ // Clear the new.target register if not given.
+ if (!new_target.is_valid()) {
+ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+ }
Label done;
bool definitely_mismatches = false;
- InvokePrologue(expected, actual, Handle<Code>::null(), code, &done,
- &definitely_mismatches, flag, call_wrapper);
+ InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
+ call_wrapper);
if (!definitely_mismatches) {
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ Register code = ip;
+ LoadP(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
CallJSEntry(code);
@@ -1091,7 +1179,8 @@ void MacroAssembler::InvokeCode(Register code, const ParameterCount& expected,
}
-void MacroAssembler::InvokeFunction(Register fun, const ParameterCount& actual,
+void MacroAssembler::InvokeFunction(Register fun, Register new_target,
+ const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
@@ -1101,20 +1190,19 @@ void MacroAssembler::InvokeFunction(Register fun, const ParameterCount& actual,
DCHECK(fun.is(r4));
Register expected_reg = r5;
- Register code_reg = ip;
+ Register temp_reg = r7;
- LoadP(code_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ LoadP(temp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
LoadWordArith(expected_reg,
FieldMemOperand(
- code_reg, SharedFunctionInfo::kFormalParameterCountOffset));
+ temp_reg, SharedFunctionInfo::kFormalParameterCountOffset));
#if !defined(V8_TARGET_ARCH_PPC64)
SmiUntag(expected_reg);
#endif
- LoadP(code_reg, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(fun, new_target, expected, actual, flag, call_wrapper);
}
@@ -1132,11 +1220,7 @@ void MacroAssembler::InvokeFunction(Register function,
// Get the function and setup the context.
LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
- InvokeCode(ip, expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(r4, no_reg, expected, actual, flag, call_wrapper);
}
@@ -1223,11 +1307,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
#endif
// Load the native context of the current context.
- int offset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- LoadP(scratch, FieldMemOperand(scratch, offset));
- LoadP(scratch,
- FieldMemOperand(scratch, JSGlobalObject::kNativeContextOffset));
+ LoadP(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
// Check the context is a native context.
if (emit_debug_code()) {
@@ -1417,11 +1497,7 @@ void MacroAssembler::Allocate(int object_size, Register result,
return;
}
- DCHECK(!result.is(scratch1));
- DCHECK(!result.is(scratch2));
- DCHECK(!scratch1.is(scratch2));
- DCHECK(!scratch1.is(ip));
- DCHECK(!scratch2.is(ip));
+ DCHECK(!AreAliased(result, scratch1, scratch2, ip));
// Make object size into bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
@@ -1440,26 +1516,26 @@ void MacroAssembler::Allocate(int object_size, Register result,
DCHECK((limit - top) == kPointerSize);
// Set up allocation top address register.
- Register topaddr = scratch1;
- mov(topaddr, Operand(allocation_top));
-
+ Register top_address = scratch1;
// This code stores a temporary value in ip. This is OK, as the code below
// does not need ip for implicit literal generation.
+ Register alloc_limit = ip;
+ Register result_end = scratch2;
+ mov(top_address, Operand(allocation_top));
+
if ((flags & RESULT_CONTAINS_TOP) == 0) {
// Load allocation top into result and allocation limit into ip.
- LoadP(result, MemOperand(topaddr));
- LoadP(ip, MemOperand(topaddr, kPointerSize));
+ LoadP(result, MemOperand(top_address));
+ LoadP(alloc_limit, MemOperand(top_address, kPointerSize));
} else {
if (emit_debug_code()) {
- // Assert that result actually contains top on entry. ip is used
- // immediately below so this use of ip does not cause difference with
- // respect to register content between debug and release mode.
- LoadP(ip, MemOperand(topaddr));
- cmp(result, ip);
+ // Assert that result actually contains top on entry.
+ LoadP(alloc_limit, MemOperand(top_address));
+ cmp(result, alloc_limit);
Check(eq, kUnexpectedAllocationTop);
}
- // Load allocation limit into ip. Result already contains allocation top.
- LoadP(ip, MemOperand(topaddr, limit - top), r0);
+ // Load allocation limit. Result already contains allocation top.
+ LoadP(alloc_limit, MemOperand(top_address, limit - top));
}
if ((flags & DOUBLE_ALIGNMENT) != 0) {
@@ -1469,15 +1545,15 @@ void MacroAssembler::Allocate(int object_size, Register result,
STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
#else
STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
- andi(scratch2, result, Operand(kDoubleAlignmentMask));
+ andi(result_end, result, Operand(kDoubleAlignmentMask));
Label aligned;
beq(&aligned, cr0);
if ((flags & PRETENURE) != 0) {
- cmpl(result, ip);
+ cmpl(result, alloc_limit);
bge(gc_required);
}
- mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
- stw(scratch2, MemOperand(result));
+ mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+ stw(result_end, MemOperand(result));
addi(result, result, Operand(kDoubleSize / 2));
bind(&aligned);
#endif
@@ -1485,17 +1561,17 @@ void MacroAssembler::Allocate(int object_size, Register result,
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top.
- sub(r0, ip, result);
+ sub(r0, alloc_limit, result);
if (is_int16(object_size)) {
cmpi(r0, Operand(object_size));
blt(gc_required);
- addi(scratch2, result, Operand(object_size));
+ addi(result_end, result, Operand(object_size));
} else {
- Cmpi(r0, Operand(object_size), scratch2);
+ Cmpi(r0, Operand(object_size), result_end);
blt(gc_required);
- add(scratch2, result, scratch2);
+ add(result_end, result, result_end);
}
- StoreP(scratch2, MemOperand(topaddr));
+ StoreP(result_end, MemOperand(top_address));
// Tag object if requested.
if ((flags & TAG_OBJECT) != 0) {
@@ -1505,28 +1581,24 @@ void MacroAssembler::Allocate(int object_size, Register result,
void MacroAssembler::Allocate(Register object_size, Register result,
- Register scratch1, Register scratch2,
+ Register result_end, Register scratch,
Label* gc_required, AllocationFlags flags) {
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
li(result, Operand(0x7091));
- li(scratch1, Operand(0x7191));
- li(scratch2, Operand(0x7291));
+ li(scratch, Operand(0x7191));
+ li(result_end, Operand(0x7291));
}
b(gc_required);
return;
}
- // Assert that the register arguments are different and that none of
- // them are ip. ip is used explicitly in the code generated below.
- DCHECK(!result.is(scratch1));
- DCHECK(!result.is(scratch2));
- DCHECK(!scratch1.is(scratch2));
- DCHECK(!object_size.is(ip));
- DCHECK(!result.is(ip));
- DCHECK(!scratch1.is(ip));
- DCHECK(!scratch2.is(ip));
+ // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
+ // is not specified. Other registers must not overlap.
+ DCHECK(!AreAliased(object_size, result, scratch, ip));
+ DCHECK(!AreAliased(result_end, result, scratch, ip));
+ DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
// Check relative positions of allocation top and limit addresses.
ExternalReference allocation_top =
@@ -1537,27 +1609,26 @@ void MacroAssembler::Allocate(Register object_size, Register result,
intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
DCHECK((limit - top) == kPointerSize);
- // Set up allocation top address.
- Register topaddr = scratch1;
- mov(topaddr, Operand(allocation_top));
-
+ // Set up allocation top address and allocation limit registers.
+ Register top_address = scratch;
// This code stores a temporary value in ip. This is OK, as the code below
// does not need ip for implicit literal generation.
+ Register alloc_limit = ip;
+ mov(top_address, Operand(allocation_top));
+
if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into ip.
- LoadP(result, MemOperand(topaddr));
- LoadP(ip, MemOperand(topaddr, kPointerSize));
+ // Load allocation top into result and allocation limit into alloc_limit..
+ LoadP(result, MemOperand(top_address));
+ LoadP(alloc_limit, MemOperand(top_address, kPointerSize));
} else {
if (emit_debug_code()) {
- // Assert that result actually contains top on entry. ip is used
- // immediately below so this use of ip does not cause difference with
- // respect to register content between debug and release mode.
- LoadP(ip, MemOperand(topaddr));
- cmp(result, ip);
+ // Assert that result actually contains top on entry.
+ LoadP(alloc_limit, MemOperand(top_address));
+ cmp(result, alloc_limit);
Check(eq, kUnexpectedAllocationTop);
}
- // Load allocation limit into ip. Result already contains allocation top.
- LoadP(ip, MemOperand(topaddr, limit - top));
+ // Load allocation limit. Result already contains allocation top.
+ LoadP(alloc_limit, MemOperand(top_address, limit - top));
}
if ((flags & DOUBLE_ALIGNMENT) != 0) {
@@ -1567,15 +1638,15 @@ void MacroAssembler::Allocate(Register object_size, Register result,
STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
#else
STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
- andi(scratch2, result, Operand(kDoubleAlignmentMask));
+ andi(result_end, result, Operand(kDoubleAlignmentMask));
Label aligned;
beq(&aligned, cr0);
if ((flags & PRETENURE) != 0) {
- cmpl(result, ip);
+ cmpl(result, alloc_limit);
bge(gc_required);
}
- mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
- stw(scratch2, MemOperand(result));
+ mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+ stw(result_end, MemOperand(result));
addi(result, result, Operand(kDoubleSize / 2));
bind(&aligned);
#endif
@@ -1584,24 +1655,24 @@ void MacroAssembler::Allocate(Register object_size, Register result,
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top. Object size may be in words so a shift is
// required to get the number of bytes.
- sub(r0, ip, result);
+ sub(r0, alloc_limit, result);
if ((flags & SIZE_IN_WORDS) != 0) {
- ShiftLeftImm(scratch2, object_size, Operand(kPointerSizeLog2));
- cmp(r0, scratch2);
+ ShiftLeftImm(result_end, object_size, Operand(kPointerSizeLog2));
+ cmp(r0, result_end);
blt(gc_required);
- add(scratch2, result, scratch2);
+ add(result_end, result, result_end);
} else {
cmp(r0, object_size);
blt(gc_required);
- add(scratch2, result, object_size);
+ add(result_end, result, object_size);
}
// Update allocation top. result temporarily holds the new top.
if (emit_debug_code()) {
- andi(r0, scratch2, Operand(kObjectAlignmentMask));
+ andi(r0, result_end, Operand(kObjectAlignmentMask));
Check(eq, kUnalignedAllocationInNewSpace, cr0);
}
- StoreP(scratch2, MemOperand(topaddr));
+ StoreP(result_end, MemOperand(top_address));
// Tag object if requested.
if ((flags & TAG_OBJECT) != 0) {
@@ -1770,6 +1841,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
Register value_reg, Register key_reg, Register elements_reg,
Register scratch1, DoubleRegister double_scratch, Label* fail,
int elements_offset) {
+ DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
Label smi_value, store;
// Handle smi values specially.
@@ -2246,22 +2318,13 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext,
}
-void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- mov(r3, Operand(num_arguments));
- JumpToExternalReference(ext);
-}
-
-
-void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid, int num_arguments,
- int result_size) {
- TailCallExternalReference(ExternalReference(fid, isolate()), num_arguments,
- result_size);
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ DCHECK_EQ(1, function->result_size);
+ if (function->nargs >= 0) {
+ mov(r3, Operand(function->nargs));
+ }
+ JumpToExternalReference(ExternalReference(fid, isolate()));
}
@@ -2277,35 +2340,10 @@ void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
// You can't call a builtin without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- GetBuiltinEntry(ip, native_context_index);
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(ip));
- CallJSEntry(ip);
- call_wrapper.AfterCall();
- } else {
- DCHECK(flag == JUMP_FUNCTION);
- JumpToJSEntry(ip);
- }
-}
-
-
-void MacroAssembler::GetBuiltinFunction(Register target,
- int native_context_index) {
- // Load the builtins object into target register.
- LoadP(target,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- LoadP(target, FieldMemOperand(target, JSGlobalObject::kNativeContextOffset));
- // Load the JavaScript builtin function from the builtins object.
- LoadP(target, ContextOperand(target, native_context_index), r0);
-}
-
-
-void MacroAssembler::GetBuiltinEntry(Register target,
- int native_context_index) {
- DCHECK(!target.is(r4));
- GetBuiltinFunction(r4, native_context_index);
- // Load the code entry point from the builtins object.
- LoadP(target, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
+ // Fake a parameter count to avoid emitting code to do the check.
+ ParameterCount expected(0);
+ LoadNativeContextSlot(native_context_index, r4);
+ InvokeFunctionCode(r4, no_reg, expected, expected, flag, call_wrapper);
}
@@ -2427,44 +2465,27 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
-void MacroAssembler::LoadGlobalProxy(Register dst) {
- LoadP(dst, GlobalObjectOperand());
- LoadP(dst, FieldMemOperand(dst, JSGlobalObject::kGlobalProxyOffset));
-}
-
-
void MacroAssembler::LoadTransitionedArrayMapConditional(
ElementsKind expected_kind, ElementsKind transitioned_kind,
Register map_in_out, Register scratch, Label* no_map_match) {
- // Load the global or builtins object from the current context.
- LoadP(scratch,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- LoadP(scratch,
- FieldMemOperand(scratch, JSGlobalObject::kNativeContextOffset));
+ DCHECK(IsFastElementsKind(expected_kind));
+ DCHECK(IsFastElementsKind(transitioned_kind));
// Check that the function's map is the same as the expected cached map.
- LoadP(scratch,
- MemOperand(scratch, Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
- size_t offset = expected_kind * kPointerSize + FixedArrayBase::kHeaderSize;
- LoadP(ip, FieldMemOperand(scratch, offset));
+ LoadP(scratch, NativeContextMemOperand());
+ LoadP(ip, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
cmp(map_in_out, ip);
bne(no_map_match);
// Use the transitioned cached map.
- offset = transitioned_kind * kPointerSize + FixedArrayBase::kHeaderSize;
- LoadP(map_in_out, FieldMemOperand(scratch, offset));
+ LoadP(map_in_out,
+ ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
}
-void MacroAssembler::LoadGlobalFunction(int index, Register function) {
- // Load the global or builtins object from the current context.
- LoadP(function,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- LoadP(function,
- FieldMemOperand(function, JSGlobalObject::kNativeContextOffset));
- // Load the function from the native context.
- LoadP(function, MemOperand(function, Context::SlotOffset(index)), r0);
+void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
+ LoadP(dst, NativeContextMemOperand());
+ LoadP(dst, ContextMemOperand(dst, index));
}
@@ -2623,6 +2644,19 @@ void MacroAssembler::AssertFunction(Register object) {
}
+void MacroAssembler::AssertBoundFunction(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ TestIfSmi(object, r0);
+ Check(ne, kOperandIsASmiAndNotABoundFunction, cr0);
+ push(object);
+ CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
+ pop(object);
+ Check(eq, kOperandIsNotABoundFunction);
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
if (emit_debug_code()) {
@@ -2733,29 +2767,25 @@ void MacroAssembler::AllocateHeapNumberWithValue(
}
-// Copies a fixed number of fields of heap objects from src to dst.
-void MacroAssembler::CopyFields(Register dst, Register src, RegList temps,
- int field_count) {
- // At least one bit set in the first 15 registers.
- DCHECK((temps & ((1 << 15) - 1)) != 0);
- DCHECK((temps & dst.bit()) == 0);
- DCHECK((temps & src.bit()) == 0);
- // Primitive implementation using only one temporary register.
+void MacroAssembler::AllocateJSValue(Register result, Register constructor,
+ Register value, Register scratch1,
+ Register scratch2, Label* gc_required) {
+ DCHECK(!result.is(constructor));
+ DCHECK(!result.is(scratch1));
+ DCHECK(!result.is(scratch2));
+ DCHECK(!result.is(value));
- Register tmp = no_reg;
- // Find a temp register in temps list.
- for (int i = 0; i < 15; i++) {
- if ((temps & (1 << i)) != 0) {
- tmp.set_code(i);
- break;
- }
- }
- DCHECK(!tmp.is(no_reg));
+ // Allocate JSValue in new space.
+ Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
- for (int i = 0; i < field_count; i++) {
- LoadP(tmp, FieldMemOperand(src, i * kPointerSize), r0);
- StoreP(tmp, FieldMemOperand(dst, i * kPointerSize), r0);
- }
+ // Initialize the JSValue.
+ LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
+ StoreP(scratch1, FieldMemOperand(result, HeapObject::kMapOffset), r0);
+ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
+ StoreP(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset), r0);
+ StoreP(scratch1, FieldMemOperand(result, JSObject::kElementsOffset), r0);
+ StoreP(value, FieldMemOperand(result, JSValue::kValueOffset), r0);
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
@@ -2866,25 +2896,25 @@ void MacroAssembler::CopyBytes(Register src, Register dst, Register length,
}
-void MacroAssembler::InitializeNFieldsWithFiller(Register start_offset,
+void MacroAssembler::InitializeNFieldsWithFiller(Register current_address,
Register count,
Register filler) {
Label loop;
mtctr(count);
bind(&loop);
- StoreP(filler, MemOperand(start_offset));
- addi(start_offset, start_offset, Operand(kPointerSize));
+ StoreP(filler, MemOperand(current_address));
+ addi(current_address, current_address, Operand(kPointerSize));
bdnz(&loop);
}
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
+void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
+ Register end_address,
Register filler) {
Label done;
- sub(r0, end_offset, start_offset, LeaveOE, SetRC);
+ sub(r0, end_address, current_address, LeaveOE, SetRC);
beq(&done, cr0);
ShiftRightImm(r0, r0, Operand(kPointerSizeLog2));
- InitializeNFieldsWithFiller(start_offset, r0, filler);
+ InitializeNFieldsWithFiller(current_address, r0, filler);
bind(&done);
}
@@ -3060,17 +3090,16 @@ void MacroAssembler::CallCFunctionHelper(Register function,
// Just call directly. The function called cannot cause a GC, or
// allow preemption, so the return address in the link register
// stays correct.
+ Register dest = function;
#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
// AIX uses a function descriptor. When calling C code be aware
// of this descriptor and pick up values from it
LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(function, kPointerSize));
LoadP(ip, MemOperand(function, 0));
- Register dest = ip;
-#elif ABI_TOC_ADDRESSABILITY_VIA_IP
+ dest = ip;
+#elif ABI_CALL_VIA_IP
Move(ip, function);
- Register dest = ip;
-#else
- Register dest = function;
+ dest = ip;
#endif
Call(dest);
@@ -3172,8 +3201,8 @@ void MacroAssembler::CheckPageFlag(
void MacroAssembler::JumpIfBlack(Register object, Register scratch0,
Register scratch1, Label* on_black) {
- HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
}
@@ -3206,27 +3235,6 @@ void MacroAssembler::HasColor(Register object, Register bitmap_scratch,
}
-// Detect some, but not all, common pointer-free objects. This is used by the
-// incremental write barrier which doesn't care about oddballs (they are always
-// marked black immediately so this code is not hit).
-void MacroAssembler::JumpIfDataObject(Register value, Register scratch,
- Label* not_data_object) {
- Label is_data_object;
- LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
- CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
- beq(&is_data_object);
- DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- STATIC_ASSERT((kIsIndirectStringMask | kIsNotStringMask) == 0x81);
- andi(scratch, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
- bne(not_data_object, cr0);
- bind(&is_data_object);
-}
-
-
void MacroAssembler::GetMarkBits(Register addr_reg, Register bitmap_reg,
Register mask_reg) {
DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
@@ -3243,117 +3251,23 @@ void MacroAssembler::GetMarkBits(Register addr_reg, Register bitmap_reg,
}
-void MacroAssembler::EnsureNotWhite(Register value, Register bitmap_scratch,
- Register mask_scratch,
- Register load_scratch,
- Label* value_is_white_and_not_data) {
+void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
+ Register mask_scratch, Register load_scratch,
+ Label* value_is_white) {
DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
GetMarkBits(value, bitmap_scratch, mask_scratch);
// If the value is black or grey we don't need to do anything.
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
- Label done;
-
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
lwz(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
and_(r0, mask_scratch, load_scratch, SetRC);
- bne(&done, cr0);
-
- if (emit_debug_code()) {
- // Check for impossible bit pattern.
- Label ok;
- // LSL may overflow, making the check conservative.
- slwi(r0, mask_scratch, Operand(1));
- and_(r0, load_scratch, r0, SetRC);
- beq(&ok, cr0);
- stop("Impossible marking bit pattern");
- bind(&ok);
- }
-
- // Value is white. We check whether it is data that doesn't need scanning.
- // Currently only checks for HeapNumber and non-cons strings.
- Register map = load_scratch; // Holds map while checking type.
- Register length = load_scratch; // Holds length of object after testing type.
- Label is_data_object, maybe_string_object, is_string_object, is_encoded;
-#if V8_TARGET_ARCH_PPC64
- Label length_computed;
-#endif
-
-
- // Check for heap-number
- LoadP(map, FieldMemOperand(value, HeapObject::kMapOffset));
- CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- bne(&maybe_string_object);
- li(length, Operand(HeapNumber::kSize));
- b(&is_data_object);
- bind(&maybe_string_object);
-
- // Check for strings.
- DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- Register instance_type = load_scratch;
- lbz(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
- andi(r0, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
- bne(value_is_white_and_not_data, cr0);
- // It's a non-indirect (non-cons and non-slice) string.
- // If it's external, the length is just ExternalString::kSize.
- // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
- // External strings are the only ones with the kExternalStringTag bit
- // set.
- DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
- DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
- andi(r0, instance_type, Operand(kExternalStringTag));
- beq(&is_string_object, cr0);
- li(length, Operand(ExternalString::kSize));
- b(&is_data_object);
- bind(&is_string_object);
-
- // Sequential string, either Latin1 or UC16.
- // For Latin1 (char-size of 1) we untag the smi to get the length.
- // For UC16 (char-size of 2):
- // - (32-bit) we just leave the smi tag in place, thereby getting
- // the length multiplied by 2.
- // - (64-bit) we compute the offset in the 2-byte array
- DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
- LoadP(ip, FieldMemOperand(value, String::kLengthOffset));
- andi(r0, instance_type, Operand(kStringEncodingMask));
- beq(&is_encoded, cr0);
- SmiUntag(ip);
-#if V8_TARGET_ARCH_PPC64
- b(&length_computed);
-#endif
- bind(&is_encoded);
-#if V8_TARGET_ARCH_PPC64
- SmiToShortArrayOffset(ip, ip);
- bind(&length_computed);
-#else
- DCHECK(kSmiShift == 1);
-#endif
- addi(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
- li(r0, Operand(~kObjectAlignmentMask));
- and_(length, length, r0);
-
- bind(&is_data_object);
- // Value is a data object, and it is white. Mark it black. Since we know
- // that the object is white we can make it black by flipping one bit.
- lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- orx(ip, ip, mask_scratch);
- stw(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
-
- mov(ip, Operand(~Page::kPageAlignmentMask));
- and_(bitmap_scratch, bitmap_scratch, ip);
- lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
- add(ip, ip, length);
- stw(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
-
- bind(&done);
+ beq(value_is_white, cr0);
}
@@ -4324,10 +4238,12 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(Register object,
#ifdef DEBUG
bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
- Register reg5, Register reg6, Register reg7, Register reg8) {
+ Register reg5, Register reg6, Register reg7, Register reg8,
+ Register reg9, Register reg10) {
int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
- reg7.is_valid() + reg8.is_valid();
+ reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
+ reg10.is_valid();
RegList regs = 0;
if (reg1.is_valid()) regs |= reg1.bit();
@@ -4338,6 +4254,8 @@ bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
if (reg6.is_valid()) regs |= reg6.bit();
if (reg7.is_valid()) regs |= reg7.bit();
if (reg8.is_valid()) regs |= reg8.bit();
+ if (reg9.is_valid()) regs |= reg9.bit();
+ if (reg10.is_valid()) regs |= reg10.bit();
int n_of_non_aliasing_regs = NumRegs(regs);
return n_of_valid_regs != n_of_non_aliasing_regs;
@@ -4345,11 +4263,11 @@ bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
#endif
-CodePatcher::CodePatcher(byte* address, int instructions,
+CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
FlushICache flush_cache)
: address_(address),
size_(instructions * Assembler::kInstrSize),
- masm_(NULL, address, size_ + Assembler::kGap),
+ masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
flush_cache_(flush_cache) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
@@ -4361,7 +4279,7 @@ CodePatcher::CodePatcher(byte* address, int instructions,
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
if (flush_cache_ == FLUSH) {
- Assembler::FlushICacheWithoutIsolate(address_, size_);
+ Assembler::FlushICache(masm_.isolate(), address_, size_);
}
// Check that the code was patched as expected.
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
index d4660d9207..78de89aa5c 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -24,6 +24,7 @@ const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r15};
const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r16};
const Register kInterpreterDispatchTableRegister = {Register::kCode_r17};
const Register kJavaScriptCallArgCountRegister = {Register::kCode_r3};
+const Register kJavaScriptCallNewTargetRegister = {Register::kCode_r6};
const Register kRuntimeCallFunctionRegister = {Register::kCode_r4};
const Register kRuntimeCallArgCountRegister = {Register::kCode_r3};
@@ -65,7 +66,8 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
Register reg4 = no_reg, Register reg5 = no_reg,
Register reg6 = no_reg, Register reg7 = no_reg,
- Register reg8 = no_reg);
+ Register reg8 = no_reg, Register reg9 = no_reg,
+ Register reg10 = no_reg);
#endif
// These exist to provide portability between 32 and 64bit
@@ -109,11 +111,8 @@ bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler : public Assembler {
public:
- // The isolate parameter can be NULL if the macro assembler should
- // not use isolate-dependent functionality. In this case, it's the
- // responsibility of the caller to never invoke such function on the
- // macro assembler.
- MacroAssembler(Isolate* isolate, void* buffer, int size);
+ MacroAssembler(Isolate* isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object);
// Returns the size of a call in instructions. Note, the value returned is
@@ -217,18 +216,10 @@ class MacroAssembler : public Assembler {
void JumpIfBlack(Register object, Register scratch0, Register scratch1,
Label* on_black);
- // Checks the color of an object. If the object is already grey or black
- // then we just fall through, since it is already live. If it is white and
- // we can determine that it doesn't need to be scanned, then we just mark it
- // black and fall through. For the rest we jump to the label so the
- // incremental marker can fix its assumptions.
- void EnsureNotWhite(Register object, Register scratch1, Register scratch2,
- Register scratch3, Label* object_is_white_and_not_data);
-
- // Detects conservatively whether an object is data-only, i.e. it does need to
- // be scanned by the garbage collector.
- void JumpIfDataObject(Register value, Register scratch,
- Label* not_data_object);
+ // Checks the color of an object. If the object is white we jump to the
+ // incremental marker.
+ void JumpIfWhite(Register value, Register scratch1, Register scratch2,
+ Register scratch3, Label* value_is_white);
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
@@ -387,8 +378,10 @@ class MacroAssembler : public Assembler {
const Register int_scratch);
#if V8_TARGET_ARCH_PPC64
- void ConvertInt64ToDouble(Register src, DoubleRegister double_dst);
void ConvertInt64ToFloat(Register src, DoubleRegister double_dst);
+ void ConvertInt64ToDouble(Register src, DoubleRegister double_dst);
+ void ConvertUnsignedInt64ToFloat(Register src, DoubleRegister double_dst);
+ void ConvertUnsignedInt64ToDouble(Register src, DoubleRegister double_dst);
#endif
// Converts the double_input to an integer. Note that, upon return,
@@ -400,9 +393,18 @@ class MacroAssembler : public Assembler {
const Register dst, const DoubleRegister double_dst,
FPRoundingMode rounding_mode = kRoundToZero);
+#if V8_TARGET_ARCH_PPC64
+ // Converts the double_input to an unsigned integer. Note that, upon return,
+ // the contents of double_dst will also hold the fixed point representation.
+ void ConvertDoubleToUnsignedInt64(
+ const DoubleRegister double_input, const Register dst,
+ const DoubleRegister double_dst,
+ FPRoundingMode rounding_mode = kRoundToZero);
+#endif
+
// Generates function and stub prologue code.
- void StubPrologue(int prologue_offset = 0);
- void Prologue(bool code_pre_aging, int prologue_offset = 0);
+ void StubPrologue(Register base = no_reg, int prologue_offset = 0);
+ void Prologue(bool code_pre_aging, Register base, int prologue_offset = 0);
// Enter exit frame.
// stack_space - extra stack space, used for parameters before call to C.
@@ -421,8 +423,15 @@ class MacroAssembler : public Assembler {
void LoadContext(Register dst, int context_chain_length);
+ // Load the global object from the current context.
+ void LoadGlobalObject(Register dst) {
+ LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
+ }
+
// Load the global proxy from the current context.
- void LoadGlobalProxy(Register dst);
+ void LoadGlobalProxy(Register dst) {
+ LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
+ }
// Conditionally load the cached Array transitioned map of type
// transitioned_kind from the native context if the map in register
@@ -434,7 +443,7 @@ class MacroAssembler : public Assembler {
Register scratch,
Label* no_map_match);
- void LoadGlobalFunction(int index, Register function);
+ void LoadNativeContextSlot(int index, Register dst);
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
@@ -542,14 +551,20 @@ class MacroAssembler : public Assembler {
// JavaScript invokes
// Invoke the JavaScript function code by either calling or jumping.
- void InvokeCode(Register code, const ParameterCount& expected,
- const ParameterCount& actual, InvokeFlag flag,
- const CallWrapper& call_wrapper);
+ void InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual, InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ void FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
- void InvokeFunction(Register function, const ParameterCount& actual,
- InvokeFlag flag, const CallWrapper& call_wrapper);
+ void InvokeFunction(Register function, Register new_target,
+ const ParameterCount& actual, InvokeFlag flag,
+ const CallWrapper& call_wrapper);
void InvokeFunction(Register function, const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag,
@@ -641,8 +656,8 @@ class MacroAssembler : public Assembler {
void Allocate(int object_size, Register result, Register scratch1,
Register scratch2, Label* gc_required, AllocationFlags flags);
- void Allocate(Register object_size, Register result, Register scratch1,
- Register scratch2, Label* gc_required, AllocationFlags flags);
+ void Allocate(Register object_size, Register result, Register result_end,
+ Register scratch, Label* gc_required, AllocationFlags flags);
void AllocateTwoByteString(Register result, Register length,
Register scratch1, Register scratch2,
@@ -675,8 +690,11 @@ class MacroAssembler : public Assembler {
Register heap_number_map,
Label* gc_required);
- // Copies a fixed number of fields of heap objects from src to dst.
- void CopyFields(Register dst, Register src, RegList temps, int field_count);
+ // Allocate and initialize a JSValue wrapper with the specified {constructor}
+ // and {value}.
+ void AllocateJSValue(Register result, Register constructor, Register value,
+ Register scratch1, Register scratch2,
+ Label* gc_required);
// Copies a number of bytes from src to dst. All registers are clobbered. On
// exit src and dst will point to the place just after where the last byte was
@@ -684,17 +702,17 @@ class MacroAssembler : public Assembler {
void CopyBytes(Register src, Register dst, Register length, Register scratch);
// Initialize fields with filler values. |count| fields starting at
- // |start_offset| are overwritten with the value in |filler|. At the end the
- // loop, |start_offset| points at the next uninitialized field. |count| is
- // assumed to be non-zero.
- void InitializeNFieldsWithFiller(Register start_offset, Register count,
+ // |current_address| are overwritten with the value in |filler|. At the end
+ // the loop, |current_address| points at the next uninitialized field.
+ // |count| is assumed to be non-zero.
+ void InitializeNFieldsWithFiller(Register current_address, Register count,
Register filler);
- // Initialize fields with filler values. Fields starting at |start_offset|
- // not including end_offset are overwritten with the value in |filler|. At
- // the end the loop, |start_offset| takes the value of |end_offset|.
- void InitializeFieldsWithFiller(Register start_offset, Register end_offset,
- Register filler);
+ // Initialize fields with filler values. Fields starting at |current_address|
+ // not including |end_address| are overwritten with the value in |filler|. At
+ // the end the loop, |current_address| takes the value of |end_address|.
+ void InitializeFieldsWithFiller(Register current_address,
+ Register end_address, Register filler);
// ---------------------------------------------------------------------------
// Support functions.
@@ -919,29 +937,29 @@ class MacroAssembler : public Assembler {
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
- void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, kSaveFPRegs);
}
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id, int num_arguments,
+ void CallRuntime(Runtime::FunctionId fid,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ CallRuntime(function, function->nargs, save_doubles);
+ }
+
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext, int num_arguments);
- // Tail call of a runtime routine (jump).
- // Like JumpToExternalReference, but also takes care of passing the number
- // of parameters.
- void TailCallExternalReference(const ExternalReference& ext,
- int num_arguments, int result_size);
-
// Convenience function: tail call a runtime routine (jump).
- void TailCallRuntime(Runtime::FunctionId fid, int num_arguments,
- int result_size);
+ void TailCallRuntime(Runtime::FunctionId fid);
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
@@ -990,13 +1008,6 @@ class MacroAssembler : public Assembler {
void InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper = NullCallWrapper());
- // Store the code object for the given builtin in the target register and
- // setup the function in r1.
- void GetBuiltinEntry(Register target, int native_context_index);
-
- // Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, int native_context_index);
-
Handle<Object> CodeObject() {
DCHECK(!code_object_.is_null());
return code_object_;
@@ -1317,6 +1328,10 @@ class MacroAssembler : public Assembler {
void AssertFunction(Register object);
+ // Abort execution if argument is not a JSBoundFunction,
+ // enabled via --debug-code.
+ void AssertBoundFunction(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
@@ -1483,8 +1498,7 @@ class MacroAssembler : public Assembler {
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual, Handle<Code> code_constant,
- Register code_reg, Label* done,
+ const ParameterCount& actual, Label* done,
bool* definitely_mismatches, InvokeFlag flag,
const CallWrapper& call_wrapper);
@@ -1531,7 +1545,8 @@ class CodePatcher {
public:
enum FlushICache { FLUSH, DONT_FLUSH };
- CodePatcher(byte* address, int instructions, FlushICache flush_cache = FLUSH);
+ CodePatcher(Isolate* isolate, byte* address, int instructions,
+ FlushICache flush_cache = FLUSH);
~CodePatcher();
// Macro assembler to emit code.
@@ -1555,13 +1570,13 @@ class CodePatcher {
// -----------------------------------------------------------------------------
// Static helper functions.
-inline MemOperand ContextOperand(Register context, int index = 0) {
+inline MemOperand ContextMemOperand(Register context, int index = 0) {
return MemOperand(context, Context::SlotOffset(index));
}
-inline MemOperand GlobalObjectOperand() {
- return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
+inline MemOperand NativeContextMemOperand() {
+ return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
}
diff --git a/deps/v8/src/ppc/simulator-ppc.cc b/deps/v8/src/ppc/simulator-ppc.cc
index fa088a2c30..0efa6605d5 100644
--- a/deps/v8/src/ppc/simulator-ppc.cc
+++ b/deps/v8/src/ppc/simulator-ppc.cc
@@ -445,7 +445,7 @@ void PPCDebugger::Debug() {
reinterpret_cast<intptr_t>(cur), *cur, *cur);
HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
intptr_t value = *cur;
- Heap* current_heap = v8::internal::Isolate::Current()->heap();
+ Heap* current_heap = sim_->isolate_->heap();
if (((value & 1) == 0) || current_heap->Contains(obj)) {
PrintF(" (");
if ((value & 1) == 0) {
@@ -844,12 +844,12 @@ Simulator::~Simulator() { free(stack_); }
// offset from the svc instruction so the simulator knows what to call.
class Redirection {
public:
- Redirection(void* external_function, ExternalReference::Type type)
+ Redirection(Isolate* isolate, void* external_function,
+ ExternalReference::Type type)
: external_function_(external_function),
swi_instruction_(rtCallRedirInstr | kCallRtRedirected),
type_(type),
next_(NULL) {
- Isolate* isolate = Isolate::Current();
next_ = isolate->simulator_redirection();
Simulator::current(isolate)->FlushICache(
isolate->simulator_i_cache(),
@@ -864,9 +864,8 @@ class Redirection {
void* external_function() { return external_function_; }
ExternalReference::Type type() { return type_; }
- static Redirection* Get(void* external_function,
+ static Redirection* Get(Isolate* isolate, void* external_function,
ExternalReference::Type type) {
- Isolate* isolate = Isolate::Current();
Redirection* current = isolate->simulator_redirection();
for (; current != NULL; current = current->next_) {
if (current->external_function_ == external_function) {
@@ -874,7 +873,7 @@ class Redirection {
return current;
}
}
- return new Redirection(external_function, type);
+ return new Redirection(isolate, external_function, type);
}
static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
@@ -919,9 +918,10 @@ void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
}
-void* Simulator::RedirectExternalReference(void* external_function,
+void* Simulator::RedirectExternalReference(Isolate* isolate,
+ void* external_function,
ExternalReference::Type type) {
- Redirection* redirection = Redirection::Get(external_function, type);
+ Redirection* redirection = Redirection::Get(isolate, external_function, type);
return redirection->address_of_swi_instruction();
}
@@ -2702,9 +2702,17 @@ void Simulator::ExecuteExt3(Instruction* instr) {
// fcfids
int frt = instr->RTValue();
int frb = instr->RBValue();
- double t_val = get_double_from_d_register(frb);
- int64_t* frb_val_p = reinterpret_cast<int64_t*>(&t_val);
- double frt_val = static_cast<float>(*frb_val_p);
+ int64_t frb_val = get_d_register(frb);
+ double frt_val = static_cast<float>(frb_val);
+ set_d_register_from_double(frt, frt_val);
+ return;
+ }
+ case FCFIDU: {
+ // fcfidus
+ int frt = instr->RTValue();
+ int frb = instr->RBValue();
+ uint64_t frb_val = get_d_register(frb);
+ double frt_val = static_cast<float>(frb_val);
set_d_register_from_double(frt, frt_val);
return;
}
@@ -2746,10 +2754,11 @@ void Simulator::ExecuteExt4(Instruction* instr) {
return;
}
case FSQRT: {
+ lazily_initialize_fast_sqrt(isolate_);
int frt = instr->RTValue();
int frb = instr->RBValue();
double frb_val = get_double_from_d_register(frb);
- double frt_val = fast_sqrt(frb_val);
+ double frt_val = fast_sqrt(frb_val, isolate_);
set_d_register_from_double(frt, frt_val);
return;
}
@@ -2886,64 +2895,107 @@ void Simulator::ExecuteExt4(Instruction* instr) {
case FCFID: {
int frt = instr->RTValue();
int frb = instr->RBValue();
- double t_val = get_double_from_d_register(frb);
- int64_t* frb_val_p = reinterpret_cast<int64_t*>(&t_val);
- double frt_val = static_cast<double>(*frb_val_p);
+ int64_t frb_val = get_d_register(frb);
+ double frt_val = static_cast<double>(frb_val);
set_d_register_from_double(frt, frt_val);
return;
}
- case FCTID: {
+ case FCFIDU: {
+ int frt = instr->RTValue();
+ int frb = instr->RBValue();
+ uint64_t frb_val = get_d_register(frb);
+ double frt_val = static_cast<double>(frb_val);
+ set_d_register_from_double(frt, frt_val);
+ return;
+ }
+ case FCTID:
+ case FCTIDZ: {
int frt = instr->RTValue();
int frb = instr->RBValue();
double frb_val = get_double_from_d_register(frb);
+ int mode = (opcode == FCTIDZ) ? kRoundToZero
+ : (fp_condition_reg_ & kFPRoundingModeMask);
int64_t frt_val;
int64_t one = 1; // work-around gcc
- int64_t kMinLongLong = (one << 63);
- int64_t kMaxLongLong = kMinLongLong - 1;
+ int64_t kMinVal = (one << 63);
+ int64_t kMaxVal = kMinVal - 1;
+ bool invalid_convert = false;
- if (frb_val > kMaxLongLong) {
- frt_val = kMaxLongLong;
- } else if (frb_val < kMinLongLong) {
- frt_val = kMinLongLong;
+ if (std::isnan(frb_val)) {
+ frt_val = kMinVal;
+ invalid_convert = true;
} else {
- switch (fp_condition_reg_ & kFPRoundingModeMask) {
+ switch (mode) {
case kRoundToZero:
- frt_val = (int64_t)frb_val;
+ frb_val = std::trunc(frb_val);
break;
case kRoundToPlusInf:
- frt_val = (int64_t)std::ceil(frb_val);
+ frb_val = std::ceil(frb_val);
break;
case kRoundToMinusInf:
- frt_val = (int64_t)std::floor(frb_val);
+ frb_val = std::floor(frb_val);
break;
default:
- frt_val = (int64_t)frb_val;
UNIMPLEMENTED(); // Not used by V8.
break;
}
+ if (frb_val < static_cast<double>(kMinVal)) {
+ frt_val = kMinVal;
+ invalid_convert = true;
+ } else if (frb_val >= static_cast<double>(kMaxVal)) {
+ frt_val = kMaxVal;
+ invalid_convert = true;
+ } else {
+ frt_val = (int64_t)frb_val;
+ }
}
- double* p = reinterpret_cast<double*>(&frt_val);
- set_d_register_from_double(frt, *p);
+ set_d_register(frt, frt_val);
+ if (invalid_convert) SetFPSCR(VXCVI);
return;
}
- case FCTIDZ: {
+ case FCTIDU:
+ case FCTIDUZ: {
int frt = instr->RTValue();
int frb = instr->RBValue();
double frb_val = get_double_from_d_register(frb);
- int64_t frt_val;
- int64_t one = 1; // work-around gcc
- int64_t kMinLongLong = (one << 63);
- int64_t kMaxLongLong = kMinLongLong - 1;
-
- if (frb_val > kMaxLongLong) {
- frt_val = kMaxLongLong;
- } else if (frb_val < kMinLongLong) {
- frt_val = kMinLongLong;
+ int mode = (opcode == FCTIDUZ)
+ ? kRoundToZero
+ : (fp_condition_reg_ & kFPRoundingModeMask);
+ uint64_t frt_val;
+ uint64_t kMinVal = 0;
+ uint64_t kMaxVal = kMinVal - 1;
+ bool invalid_convert = false;
+
+ if (std::isnan(frb_val)) {
+ frt_val = kMinVal;
+ invalid_convert = true;
} else {
- frt_val = (int64_t)frb_val;
+ switch (mode) {
+ case kRoundToZero:
+ frb_val = std::trunc(frb_val);
+ break;
+ case kRoundToPlusInf:
+ frb_val = std::ceil(frb_val);
+ break;
+ case kRoundToMinusInf:
+ frb_val = std::floor(frb_val);
+ break;
+ default:
+ UNIMPLEMENTED(); // Not used by V8.
+ break;
+ }
+ if (frb_val < static_cast<double>(kMinVal)) {
+ frt_val = kMinVal;
+ invalid_convert = true;
+ } else if (frb_val >= static_cast<double>(kMaxVal)) {
+ frt_val = kMaxVal;
+ invalid_convert = true;
+ } else {
+ frt_val = (uint64_t)frb_val;
+ }
}
- double* p = reinterpret_cast<double*>(&frt_val);
- set_d_register_from_double(frt, *p);
+ set_d_register(frt, frt_val);
+ if (invalid_convert) SetFPSCR(VXCVI);
return;
}
case FCTIW:
@@ -2951,44 +3003,47 @@ void Simulator::ExecuteExt4(Instruction* instr) {
int frt = instr->RTValue();
int frb = instr->RBValue();
double frb_val = get_double_from_d_register(frb);
+ int mode = (opcode == FCTIWZ) ? kRoundToZero
+ : (fp_condition_reg_ & kFPRoundingModeMask);
int64_t frt_val;
- if (frb_val > kMaxInt) {
- frt_val = kMaxInt;
- } else if (frb_val < kMinInt) {
- frt_val = kMinInt;
- } else {
- if (opcode == FCTIWZ) {
- frt_val = (int64_t)frb_val;
- } else {
- switch (fp_condition_reg_ & kFPRoundingModeMask) {
- case kRoundToZero:
- frt_val = (int64_t)frb_val;
- break;
- case kRoundToPlusInf:
- frt_val = (int64_t)std::ceil(frb_val);
- break;
- case kRoundToMinusInf:
- frt_val = (int64_t)std::floor(frb_val);
- break;
- case kRoundToNearest:
- frt_val = (int64_t)lround(frb_val);
-
- // Round to even if exactly halfway. (lround rounds up)
- if (std::fabs(static_cast<double>(frt_val) - frb_val) == 0.5 &&
- (frt_val % 2)) {
- frt_val += ((frt_val > 0) ? -1 : 1);
- }
+ int64_t kMinVal = kMinInt;
+ int64_t kMaxVal = kMaxInt;
- break;
- default:
- DCHECK(false);
- frt_val = (int64_t)frb_val;
- break;
+ if (std::isnan(frb_val)) {
+ frt_val = kMinVal;
+ } else {
+ switch (mode) {
+ case kRoundToZero:
+ frb_val = std::trunc(frb_val);
+ break;
+ case kRoundToPlusInf:
+ frb_val = std::ceil(frb_val);
+ break;
+ case kRoundToMinusInf:
+ frb_val = std::floor(frb_val);
+ break;
+ case kRoundToNearest: {
+ double orig = frb_val;
+ frb_val = lround(frb_val);
+ // Round to even if exactly halfway. (lround rounds up)
+ if (std::fabs(frb_val - orig) == 0.5 && ((int64_t)frb_val % 2)) {
+ frb_val += ((frb_val > 0) ? -1.0 : 1.0);
+ }
+ break;
}
+ default:
+ UNIMPLEMENTED(); // Not used by V8.
+ break;
+ }
+ if (frb_val < kMinVal) {
+ frt_val = kMinVal;
+ } else if (frb_val > kMaxVal) {
+ frt_val = kMaxVal;
+ } else {
+ frt_val = (int64_t)frb_val;
}
}
- double* p = reinterpret_cast<double*>(&frt_val);
- set_d_register_from_double(frt, *p);
+ set_d_register(frt, frt_val);
return;
}
case FNEG: {
@@ -3041,6 +3096,43 @@ void Simulator::ExecuteExt4(Instruction* instr) {
set_d_register(frt, lval);
return;
}
+ case MCRFS: {
+ int bf = instr->Bits(25, 23);
+ int bfa = instr->Bits(20, 18);
+ int cr_shift = (7 - bf) * CRWIDTH;
+ int fp_shift = (7 - bfa) * CRWIDTH;
+ int field_val = (fp_condition_reg_ >> fp_shift) & 0xf;
+ condition_reg_ &= ~(0x0f << cr_shift);
+ condition_reg_ |= (field_val << cr_shift);
+ // Clear copied exception bits
+ switch (bfa) {
+ case 5:
+ ClearFPSCR(VXSOFT);
+ ClearFPSCR(VXSQRT);
+ ClearFPSCR(VXCVI);
+ break;
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+ return;
+ }
+ case MTFSB0: {
+ int bt = instr->Bits(25, 21);
+ ClearFPSCR(bt);
+ if (instr->Bit(0)) { // RC bit set
+ UNIMPLEMENTED();
+ }
+ return;
+ }
+ case MTFSB1: {
+ int bt = instr->Bits(25, 21);
+ SetFPSCR(bt);
+ if (instr->Bit(0)) { // RC bit set
+ UNIMPLEMENTED();
+ }
+ return;
+ }
case FABS: {
int frt = instr->RTValue();
int frb = instr->RBValue();
@@ -3769,6 +3861,9 @@ void Simulator::CallInternal(byte* entry) {
set_pc(reinterpret_cast<intptr_t>(entry));
#endif
+ // Put target address in ip (for JS prologue).
+ set_register(r12, get_pc());
+
// Put down marker for end of simulation. The simulator will stop simulation
// when the PC reaches this value. By saving the "end simulation" value into
// the LR the simulation stops when returning to this call point.
diff --git a/deps/v8/src/ppc/simulator-ppc.h b/deps/v8/src/ppc/simulator-ppc.h
index bdf50ba474..a3b03dc506 100644
--- a/deps/v8/src/ppc/simulator-ppc.h
+++ b/deps/v8/src/ppc/simulator-ppc.h
@@ -22,7 +22,7 @@ namespace v8 {
namespace internal {
// When running without a simulator we call the entry directly.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
typedef int (*ppc_regexp_matcher)(String*, int, const byte*, const byte*, int*,
@@ -33,8 +33,9 @@ typedef int (*ppc_regexp_matcher)(String*, int, const byte*, const byte*, int*,
// should act as a function matching the type ppc_regexp_matcher.
// The ninth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- (FUNCTION_CAST<ppc_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, \
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) \
+ (FUNCTION_CAST<ppc_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, \
NULL, p8))
// The stack limit beyond which we will throw stack overflow errors in
@@ -48,11 +49,15 @@ class SimulatorStack : public v8::internal::AllStatic {
return c_limit;
}
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
+ uintptr_t try_catch_address) {
+ USE(isolate);
return try_catch_address;
}
- static inline void UnregisterCTryCatch() {}
+ static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
+ USE(isolate);
+ }
};
} // namespace internal
} // namespace v8
@@ -318,6 +323,9 @@ class Simulator {
#endif
void ExecuteGeneric(Instruction* instr);
+ void SetFPSCR(int bit) { fp_condition_reg_ |= (1 << (31 - bit)); }
+ void ClearFPSCR(int bit) { fp_condition_reg_ &= ~(1 << (31 - bit)); }
+
// Executes one instruction.
void ExecuteInstruction(Instruction* instr);
@@ -329,7 +337,8 @@ class Simulator {
// Runtime call support.
static void* RedirectExternalReference(
- void* external_function, v8::internal::ExternalReference::Type type);
+ Isolate* isolate, void* external_function,
+ v8::internal::ExternalReference::Type type);
// Handle arguments and return value for runtime FP functions.
void GetFpArgs(double* x, double* y, intptr_t* z);
@@ -391,16 +400,17 @@ class Simulator {
// When running with the simulator transition into simulated execution at this
// point.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
- FUNCTION_ADDR(entry), 5, (intptr_t)p0, (intptr_t)p1, (intptr_t)p2, \
+#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
+ reinterpret_cast<Object*>(Simulator::current(isolate)->Call( \
+ FUNCTION_ADDR(entry), 5, (intptr_t)p0, (intptr_t)p1, (intptr_t)p2, \
(intptr_t)p3, (intptr_t)p4))
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- Simulator::current(Isolate::Current()) \
- ->Call(entry, 10, (intptr_t)p0, (intptr_t)p1, (intptr_t)p2, \
- (intptr_t)p3, (intptr_t)p4, (intptr_t)p5, (intptr_t)p6, \
- (intptr_t)p7, (intptr_t)NULL, (intptr_t)p8)
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) \
+ Simulator::current(isolate)->Call(entry, 10, (intptr_t)p0, (intptr_t)p1, \
+ (intptr_t)p2, (intptr_t)p3, (intptr_t)p4, \
+ (intptr_t)p5, (intptr_t)p6, (intptr_t)p7, \
+ (intptr_t)NULL, (intptr_t)p8)
// The simulator has its own stack. Thus it has a different stack limit from
@@ -414,13 +424,14 @@ class SimulatorStack : public v8::internal::AllStatic {
return Simulator::current(isolate)->StackLimit(c_limit);
}
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- Simulator* sim = Simulator::current(Isolate::Current());
+ static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
+ uintptr_t try_catch_address) {
+ Simulator* sim = Simulator::current(isolate);
return sim->PushAddress(try_catch_address);
}
- static inline void UnregisterCTryCatch() {
- Simulator::current(Isolate::Current())->PopAddress();
+ static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
+ Simulator::current(isolate)->PopAddress();
}
};
} // namespace internal