summaryrefslogtreecommitdiff
path: root/deps/v8/src/ppc
diff options
context:
space:
mode:
authorAli Ijaz Sheikh <ofrobots@google.com>2015-11-30 21:22:40 -0800
committerAli Ijaz Sheikh <ofrobots@google.com>2015-12-04 00:06:01 -0800
commit8a43a3d7619fde59f0d1f2fad05d8ae7d1732b02 (patch)
tree8698af91526d0eac90840dcba1e5b565160105c4 /deps/v8/src/ppc
parent8a2acd4cc9807510786b4b6f7ad3a947aeb3a14c (diff)
downloadandroid-node-v8-8a43a3d7619fde59f0d1f2fad05d8ae7d1732b02.tar.gz
android-node-v8-8a43a3d7619fde59f0d1f2fad05d8ae7d1732b02.tar.bz2
android-node-v8-8a43a3d7619fde59f0d1f2fad05d8ae7d1732b02.zip
deps: upgrade V8 to 4.7.80.24
Pick up the latest branch head for V8 4.7: https://github.com/v8/v8/commit/be169f8df059040e6a53ec1dd4579d8bca2167b5 Full change history for the 4.7 branch: https://chromium.googlesource.com/v8/v8.git/+log/branch-heads/4.7 V8 blog post about what is new on V8 4.7: http://v8project.blogspot.de/2015/10/v8-release-47.html PR-URL: https://github.com/nodejs/node/pull/4106 Reviewed-By: bnoordhuis - Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: targos - Michaƫl Zasso <mic.besace@gmail.com> Reviewed-By: rvagg - Rod Vagg <rod@vagg.org>
Diffstat (limited to 'deps/v8/src/ppc')
-rw-r--r--deps/v8/src/ppc/assembler-ppc-inl.h4
-rw-r--r--deps/v8/src/ppc/assembler-ppc.cc2
-rw-r--r--deps/v8/src/ppc/builtins-ppc.cc916
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc1224
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.h5
-rw-r--r--deps/v8/src/ppc/codegen-ppc.cc8
-rw-r--r--deps/v8/src/ppc/cpu-ppc.cc18
-rw-r--r--deps/v8/src/ppc/frames-ppc.cc1
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc48
-rw-r--r--deps/v8/src/ppc/lithium-codegen-ppc.cc330
-rw-r--r--deps/v8/src/ppc/lithium-codegen-ppc.h10
-rw-r--r--deps/v8/src/ppc/lithium-ppc.cc57
-rw-r--r--deps/v8/src/ppc/lithium-ppc.h79
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc383
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h80
15 files changed, 1325 insertions, 1840 deletions
diff --git a/deps/v8/src/ppc/assembler-ppc-inl.h b/deps/v8/src/ppc/assembler-ppc-inl.h
index 35968fc682..b1e2825751 100644
--- a/deps/v8/src/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/ppc/assembler-ppc-inl.h
@@ -698,7 +698,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
*(p + 3) = instr4;
*(p + 4) = instr5;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- CpuFeatures::FlushICache(p, 5 * kInstrSize);
+ Assembler::FlushICacheWithoutIsolate(p, 5 * kInstrSize);
}
#else
uint32_t* p = reinterpret_cast<uint32_t*>(pc);
@@ -713,7 +713,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
*p = instr1;
*(p + 1) = instr2;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- CpuFeatures::FlushICache(p, 2 * kInstrSize);
+ Assembler::FlushICacheWithoutIsolate(p, 2 * kInstrSize);
}
#endif
return;
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc
index 542968d8e7..6bbb53c4ba 100644
--- a/deps/v8/src/ppc/assembler-ppc.cc
+++ b/deps/v8/src/ppc/assembler-ppc.cc
@@ -34,6 +34,8 @@
// modified significantly by Google Inc.
// Copyright 2014 the V8 project authors. All rights reserved.
+#include "src/ppc/assembler-ppc.h"
+
#if V8_TARGET_ARCH_PPC
#include "src/base/bits.h"
diff --git a/deps/v8/src/ppc/builtins-ppc.cc b/deps/v8/src/ppc/builtins-ppc.cc
index 6ecfcea2f6..e08c865e4e 100644
--- a/deps/v8/src/ppc/builtins-ppc.cc
+++ b/deps/v8/src/ppc/builtins-ppc.cc
@@ -23,12 +23,19 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id,
// -- r3 : number of arguments excluding receiver
// -- r4 : called function (only guaranteed when
// extra_args requires it)
- // -- cp : context
// -- sp[0] : last argument
// -- ...
// -- sp[4 * (argc - 1)] : first argument (argc == r0)
// -- sp[4 * argc] : receiver
// -----------------------------------
+ __ AssertFunction(r4);
+
+ // Make sure we operate in the context of the called function (for example
+ // ConstructStubs implemented in C++ will be run in the context of the caller
+ // instead of the callee, due to the way that [[Construct]] is defined for
+ // ordinary functions).
+ // TODO(bmeurer): Can we make this more robust?
+ __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
// Insert extra arguments.
int num_extra_args = 0;
@@ -132,7 +139,8 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
}
-void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
+// static
+void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : number of arguments
// -- r4 : constructor function
@@ -140,122 +148,132 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
// -----------------------------------
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_ctor_calls(), 1, r5, r6);
-
- Register function = r4;
- if (FLAG_debug_code) {
- __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, r5);
- __ cmp(function, r5);
- __ Assert(eq, kUnexpectedStringFunction);
- }
- // Load the first arguments in r3 and get rid of the rest.
+ // 1. Load the first argument into r3 and get rid of the rest (including the
+ // receiver).
Label no_arguments;
- __ cmpi(r3, Operand::Zero());
- __ beq(&no_arguments);
- // First args = sp[(argc - 1) * 4].
- __ subi(r3, r3, Operand(1));
- __ ShiftLeftImm(r3, r3, Operand(kPointerSizeLog2));
- __ add(sp, sp, r3);
- __ LoadP(r3, MemOperand(sp));
- // sp now point to args[0], drop args[0] + receiver.
- __ Drop(2);
-
- Register argument = r5;
- Label not_cached, argument_is_string;
- __ LookupNumberStringCache(r3, // Input.
- argument, // Result.
- r6, // Scratch.
- r7, // Scratch.
- r8, // Scratch.
- &not_cached);
- __ IncrementCounter(counters->string_ctor_cached_number(), 1, r6, r7);
- __ bind(&argument_is_string);
+ {
+ __ cmpi(r3, Operand::Zero());
+ __ beq(&no_arguments);
+ __ subi(r3, r3, Operand(1));
+ __ ShiftLeftImm(r3, r3, Operand(kPointerSizeLog2));
+ __ LoadPUX(r3, MemOperand(sp, r3));
+ __ Drop(2);
+ }
- // ----------- S t a t e -------------
- // -- r5 : argument converted to string
- // -- r4 : constructor function
- // -- lr : return address
- // -----------------------------------
+ // 2a. At least one argument, return r3 if it's a string, otherwise
+ // dispatch to appropriate conversion.
+ Label to_string, symbol_descriptive_string;
+ {
+ __ JumpIfSmi(r3, &to_string);
+ STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
+ __ CompareObjectType(r3, r4, r4, FIRST_NONSTRING_TYPE);
+ __ bgt(&to_string);
+ __ beq(&symbol_descriptive_string);
+ __ Ret();
+ }
- Label gc_required;
- __ Allocate(JSValue::kSize,
- r3, // Result.
- r6, // Scratch.
- r7, // Scratch.
- &gc_required, TAG_OBJECT);
+ // 2b. No arguments, return the empty string (and pop the receiver).
+ __ bind(&no_arguments);
+ {
+ __ LoadRoot(r3, Heap::kempty_stringRootIndex);
+ __ Ret(1);
+ }
- // Initialising the String Object.
- Register map = r6;
- __ LoadGlobalFunctionInitialMap(function, map, r7);
- if (FLAG_debug_code) {
- __ lbz(r7, FieldMemOperand(map, Map::kInstanceSizeOffset));
- __ cmpi(r7, Operand(JSValue::kSize >> kPointerSizeLog2));
- __ Assert(eq, kUnexpectedStringWrapperInstanceSize);
- __ lbz(r7, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
- __ cmpi(r7, Operand::Zero());
- __ Assert(eq, kUnexpectedUnusedPropertiesOfStringWrapper);
+ // 3a. Convert r3 to a string.
+ __ bind(&to_string);
+ {
+ ToStringStub stub(masm->isolate());
+ __ TailCallStub(&stub);
}
- __ StoreP(map, FieldMemOperand(r3, HeapObject::kMapOffset), r0);
- __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(r6, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
- __ StoreP(r6, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
+ // 3b. Convert symbol in r3 to a string.
+ __ bind(&symbol_descriptive_string);
+ {
+ __ Push(r3);
+ __ TailCallRuntime(Runtime::kSymbolDescriptiveString, 1, 1);
+ }
+}
- __ StoreP(argument, FieldMemOperand(r3, JSValue::kValueOffset), r0);
- // Ensure the object is fully initialized.
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+// static
+void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : number of arguments
+ // -- r4 : constructor function
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
- __ Ret();
+ // 1. Load the first argument into r3 and get rid of the rest (including the
+ // receiver).
+ {
+ Label no_arguments, done;
+ __ cmpi(r3, Operand::Zero());
+ __ beq(&no_arguments);
+ __ subi(r3, r3, Operand(1));
+ __ ShiftLeftImm(r3, r3, Operand(kPointerSizeLog2));
+ __ LoadPUX(r3, MemOperand(sp, r3));
+ __ Drop(2);
+ __ b(&done);
+ __ bind(&no_arguments);
+ __ LoadRoot(r3, Heap::kempty_stringRootIndex);
+ __ Drop(1);
+ __ bind(&done);
+ }
- // The argument was not found in the number to string cache. Check
- // if it's a string already before calling the conversion builtin.
- Label convert_argument;
- __ bind(&not_cached);
- __ JumpIfSmi(r3, &convert_argument);
-
- // Is it a String?
- __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ lbz(r6, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kNotStringTag != 0);
- __ andi(r0, r6, Operand(kIsNotStringMask));
- __ bne(&convert_argument, cr0);
- __ mr(argument, r3);
- __ IncrementCounter(counters->string_ctor_conversions(), 1, r6, r7);
- __ b(&argument_is_string);
-
- // Invoke the conversion builtin and put the result into r5.
- __ bind(&convert_argument);
- __ push(function); // Preserve the function.
- __ IncrementCounter(counters->string_ctor_conversions(), 1, r6, r7);
+ // 2. Make sure r3 is a string.
{
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ push(r3);
- __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+ Label convert, done_convert;
+ __ JumpIfSmi(r3, &convert);
+ __ CompareObjectType(r3, r5, r5, FIRST_NONSTRING_TYPE);
+ __ blt(&done_convert);
+ __ bind(&convert);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ ToStringStub stub(masm->isolate());
+ __ push(r4);
+ __ CallStub(&stub);
+ __ pop(r4);
+ }
+ __ bind(&done_convert);
}
- __ pop(function);
- __ mr(argument, r3);
- __ b(&argument_is_string);
- // Load the empty string into r5, remove the receiver from the
- // stack, and jump back to the case where the argument is a string.
- __ bind(&no_arguments);
- __ LoadRoot(argument, Heap::kempty_stringRootIndex);
- __ Drop(1);
- __ b(&argument_is_string);
-
- // At this point the argument is already a string. Call runtime to
- // create a string wrapper.
- __ bind(&gc_required);
- __ IncrementCounter(counters->string_ctor_gc_required(), 1, r6, r7);
+ // 3. Allocate a JSValue wrapper for the string.
{
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ push(argument);
- __ CallRuntime(Runtime::kNewStringWrapper, 1);
+ // ----------- S t a t e -------------
+ // -- r3 : the first argument
+ // -- r4 : constructor function
+ // -- lr : return address
+ // -----------------------------------
+
+ Label allocate, done_allocate;
+ __ mr(r5, r3);
+ __ Allocate(JSValue::kSize, r3, r6, r7, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Initialize the JSValue in r3.
+ __ LoadGlobalFunctionInitialMap(r4, r6, r7);
+ __ StoreP(r6, FieldMemOperand(r3, HeapObject::kMapOffset), r0);
+ __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(r6, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
+ __ StoreP(r6, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
+ __ StoreP(r5, FieldMemOperand(r3, JSValue::kValueOffset), r0);
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+ __ Ret();
+
+ // Fallback to the runtime to allocate in new space.
+ __ bind(&allocate);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ LoadSmiLiteral(r6, Smi::FromInt(JSValue::kSize));
+ __ Push(r4, r5, r6);
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ Pop(r4, r5);
+ }
+ __ b(&done_allocate);
}
- __ Ret();
}
@@ -306,8 +324,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function,
- bool create_memento) {
+ bool is_api_function) {
// ----------- S t a t e -------------
// -- r3 : number of arguments
// -- r4 : constructor function
@@ -317,9 +334,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// -- sp[...]: constructor arguments
// -----------------------------------
- // Should never create mementos for api functions.
- DCHECK(!is_api_function || !create_memento);
-
Isolate* isolate = masm->isolate();
// Enter a construct frame.
@@ -391,9 +405,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r5: initial map
Label rt_call_reload_new_target;
__ lbz(r6, FieldMemOperand(r5, Map::kInstanceSizeOffset));
- if (create_memento) {
- __ addi(r6, r6, Operand(AllocationMemento::kSize / kPointerSize));
- }
__ Allocate(r6, r7, r8, r9, &rt_call_reload_new_target, SIZE_IN_WORDS);
@@ -401,7 +412,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// initial map and properties and elements are set to empty fixed array.
// r4: constructor function
// r5: initial map
- // r6: object size (including memento if create_memento)
+ // r6: object size
// r7: JSObject (not tagged)
__ LoadRoot(r9, Heap::kEmptyFixedArrayRootIndex);
__ mr(r8, r7);
@@ -416,7 +427,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Fill all the in-object properties with the appropriate filler.
// r4: constructor function
// r5: initial map
- // r6: object size (in words, including memento if create_memento)
+ // r6: object size
// r7: JSObject (not tagged)
// r8: First in-object property of JSObject (not tagged)
// r9: End of object
@@ -458,24 +469,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&no_inobject_slack_tracking);
}
- if (create_memento) {
- __ subi(r3, r9, Operand(AllocationMemento::kSize));
- __ InitializeFieldsWithFiller(r8, r3, r10);
-
- // Fill in memento fields.
- // r8: points to the allocated but uninitialized memento.
- __ LoadRoot(r10, Heap::kAllocationMementoMapRootIndex);
- __ StoreP(r10, MemOperand(r8, AllocationMemento::kMapOffset));
- // Load the AllocationSite
- __ LoadP(r10, MemOperand(sp, 3 * kPointerSize));
- __ AssertUndefinedOrAllocationSite(r10, r3);
- __ StoreP(r10,
- MemOperand(r8, AllocationMemento::kAllocationSiteOffset));
- __ addi(r8, r8, Operand(AllocationMemento::kAllocationSiteOffset +
- kPointerSize));
- } else {
- __ InitializeFieldsWithFiller(r8, r9, r10);
- }
+ __ InitializeFieldsWithFiller(r8, r9, r10);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on.
@@ -494,44 +488,14 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r4: constructor function
// r6: original constructor
__ bind(&rt_call);
- if (create_memento) {
- // Get the cell or allocation site.
- __ LoadP(r5, MemOperand(sp, 3 * kPointerSize));
- __ Push(r5, r4, r6);
- __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
- } else {
- __ Push(r4, r6);
- __ CallRuntime(Runtime::kNewObject, 2);
- }
+ __ Push(r4, r6);
+ __ CallRuntime(Runtime::kNewObject, 2);
__ mr(r7, r3);
- // Runtime_NewObjectWithAllocationSite increments allocation count.
- // Skip the increment.
- Label count_incremented;
- if (create_memento) {
- __ b(&count_incremented);
- }
-
// Receiver for constructor call allocated.
// r7: JSObject
__ bind(&allocated);
- if (create_memento) {
- __ LoadP(r5, MemOperand(sp, 3 * kPointerSize));
- __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
- __ cmp(r5, r8);
- __ beq(&count_incremented);
- // r5 is an AllocationSite. We are creating a memento from it, so we
- // need to increment the memento create count.
- __ LoadP(
- r6, FieldMemOperand(r5, AllocationSite::kPretenureCreateCountOffset));
- __ AddSmiLiteral(r6, r6, Smi::FromInt(1), r0);
- __ StoreP(
- r6, FieldMemOperand(r5, AllocationSite::kPretenureCreateCountOffset),
- r0);
- __ bind(&count_incremented);
- }
-
// Restore the parameters.
__ Pop(r4, ip);
@@ -633,12 +597,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, false);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
+ Generate_JSConstructStubHelper(masm, true);
}
@@ -732,8 +696,7 @@ enum IsTagged { kArgcIsSmiTagged, kArgcIsUntaggedInt };
// Clobbers r5; preserves all other registers.
-static void Generate_CheckStackOverflow(MacroAssembler* masm,
- const int calleeOffset, Register argc,
+static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
IsTagged argc_is_tagged) {
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
@@ -754,12 +717,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm,
__ bgt(&okay); // Signed comparison.
// Out of stack space.
- __ LoadP(r4, MemOperand(fp, calleeOffset));
- if (argc_is_tagged == kArgcIsUntaggedInt) {
- __ SmiTag(argc);
- }
- __ Push(r4, argc);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
__ bind(&okay);
}
@@ -768,7 +726,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm,
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Called from Generate_JS_Entry
- // r3: code entry
+ // r3: new.target
// r4: function
// r5: receiver
// r6: argc
@@ -783,22 +741,20 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Set up the context from the function argument.
- __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+ // Setup the context (we need to use the caller context from the isolate).
+ ExternalReference context_address(Isolate::kContextAddress,
+ masm->isolate());
+ __ mov(cp, Operand(context_address));
+ __ LoadP(cp, MemOperand(cp));
__ InitializeRootRegister();
// Push the function and the receiver onto the stack.
- __ push(r4);
- __ push(r5);
+ __ Push(r4, r5);
// Check if we have enough stack space to push all arguments.
- // The function is the first thing that was pushed above after entering
- // the internal frame.
- const int kFunctionOffset =
- InternalFrameConstants::kCodeOffset - kPointerSize;
// Clobbers r5.
- Generate_CheckStackOverflow(masm, kFunctionOffset, r6, kArgcIsUntaggedInt);
+ Generate_CheckStackOverflow(masm, r6, kArgcIsUntaggedInt);
// Copy arguments to the stack in a loop.
// r4: function
@@ -818,6 +774,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ cmp(r7, r5);
__ bne(&loop);
+ // Setup new.target and argc.
+ __ mr(r7, r3);
+ __ mr(r3, r6);
+ __ mr(r6, r7);
+
// Initialize all JavaScript callee-saved registers, since they will be seen
// by the garbage collector as part of handlers.
__ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
@@ -826,17 +787,12 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mr(r16, r7);
__ mr(r17, r7);
- // Invoke the code and pass argc as r3.
- __ mr(r3, r6);
- if (is_construct) {
- // No type feedback cell is available
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
- __ CallStub(&stub);
- } else {
- ParameterCount actual(r3);
- __ InvokeFunction(r4, actual, CALL_FUNCTION, NullCallWrapper());
- }
+ // Invoke the code.
+ Handle<Code> builtin = is_construct
+ ? masm->isolate()->builtins()->Construct()
+ : masm->isolate()->builtins()->Call();
+ __ Call(builtin, RelocInfo::CODE_TARGET);
+
// Exit the JS frame and remove the parameters (except function), and
// return.
}
@@ -908,7 +864,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ LoadRoot(r0, Heap::kRealStackLimitRootIndex);
__ cmpl(r6, r0);
__ bge(&ok);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
__ bind(&ok);
// If ok, push undefined as the initial value for all register file entries.
@@ -991,8 +947,11 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
- // Drop receiver + arguments.
- __ Drop(1); // TODO(rmcilroy): Get number of arguments from BytecodeArray.
+
+ // Drop receiver + arguments and return.
+ __ lwz(r0, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kParameterSizeOffset));
+ __ add(sp, sp, r0);
__ blr();
}
@@ -1260,6 +1219,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
}
+// static
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
// r3: actual number of arguments
@@ -1267,201 +1227,41 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
Label done;
__ cmpi(r3, Operand::Zero());
__ bne(&done);
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- __ push(r5);
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
__ addi(r3, r3, Operand(1));
__ bind(&done);
}
- // 2. Get the function to call (passed as receiver) from the stack, check
- // if it is a function.
- // r3: actual number of arguments
- Label slow, non_function;
- __ ShiftLeftImm(r4, r3, Operand(kPointerSizeLog2));
- __ add(r4, sp, r4);
- __ LoadP(r4, MemOperand(r4));
- __ JumpIfSmi(r4, &non_function);
- __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE);
- __ bne(&slow);
-
- // 3a. Patch the first argument if necessary when calling a function.
- // r3: actual number of arguments
- // r4: function
- Label shift_arguments;
- __ li(r7, Operand::Zero()); // indicate regular JS_FUNCTION
- {
- Label convert_to_object, use_global_proxy, patch_receiver;
- // Change context eagerly in case we need the global receiver.
- __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
-
- // Do not transform the receiver for strict mode functions.
- __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBit(r6,
-#if V8_TARGET_ARCH_PPC64
- SharedFunctionInfo::kStrictModeFunction,
-#else
- SharedFunctionInfo::kStrictModeFunction + kSmiTagSize,
-#endif
- r0);
- __ bne(&shift_arguments, cr0);
-
- // Do not transform the receiver for native (Compilerhints already in r6).
- __ TestBit(r6,
-#if V8_TARGET_ARCH_PPC64
- SharedFunctionInfo::kNative,
-#else
- SharedFunctionInfo::kNative + kSmiTagSize,
-#endif
- r0);
- __ bne(&shift_arguments, cr0);
-
- // Compute the receiver in sloppy mode.
- __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
- __ add(r5, sp, ip);
- __ LoadP(r5, MemOperand(r5, -kPointerSize));
- // r3: actual number of arguments
- // r4: function
- // r5: first argument
- __ JumpIfSmi(r5, &convert_to_object);
-
- __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
- __ cmp(r5, r6);
- __ beq(&use_global_proxy);
- __ LoadRoot(r6, Heap::kNullValueRootIndex);
- __ cmp(r5, r6);
- __ beq(&use_global_proxy);
-
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CompareObjectType(r5, r6, r6, FIRST_SPEC_OBJECT_TYPE);
- __ bge(&shift_arguments);
-
- __ bind(&convert_to_object);
-
- {
- // Enter an internal frame in order to preserve argument count.
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(r3);
- __ Push(r3);
- __ mr(r3, r5);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ mr(r5, r3);
-
- __ pop(r3);
- __ SmiUntag(r3);
-
- // Exit the internal frame.
- }
-
- // Restore the function to r4, and the flag to r7.
- __ ShiftLeftImm(r7, r3, Operand(kPointerSizeLog2));
- __ add(r7, sp, r7);
- __ LoadP(r4, MemOperand(r7));
- __ li(r7, Operand::Zero());
- __ b(&patch_receiver);
-
- __ bind(&use_global_proxy);
- __ LoadP(r5, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ LoadP(r5, FieldMemOperand(r5, GlobalObject::kGlobalProxyOffset));
-
- __ bind(&patch_receiver);
- __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
- __ add(r6, sp, ip);
- __ StoreP(r5, MemOperand(r6, -kPointerSize));
-
- __ b(&shift_arguments);
- }
-
- // 3b. Check for function proxy.
- __ bind(&slow);
- __ li(r7, Operand(1, RelocInfo::NONE32)); // indicate function proxy
- __ cmpi(r5, Operand(JS_FUNCTION_PROXY_TYPE));
- __ beq(&shift_arguments);
- __ bind(&non_function);
- __ li(r7, Operand(2, RelocInfo::NONE32)); // indicate non-function
-
- // 3c. Patch the first argument when calling a non-function. The
- // CALL_NON_FUNCTION builtin expects the non-function callee as
- // receiver, so overwrite the first argument which will ultimately
- // become the receiver.
+ // 2. Get the callable to call (passed as receiver) from the stack.
// r3: actual number of arguments
- // r4: function
- // r7: call type (0: JS function, 1: function proxy, 2: non-function)
- __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
- __ add(r5, sp, ip);
- __ StoreP(r4, MemOperand(r5, -kPointerSize));
+ __ ShiftLeftImm(r5, r3, Operand(kPointerSizeLog2));
+ __ LoadPX(r4, MemOperand(sp, r5));
- // 4. Shift arguments and return address one slot down on the stack
+ // 3. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
// the original first argument the new receiver.
// r3: actual number of arguments
- // r4: function
- // r7: call type (0: JS function, 1: function proxy, 2: non-function)
- __ bind(&shift_arguments);
+ // r4: callable
{
Label loop;
// Calculate the copy start address (destination). Copy end address is sp.
- __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
- __ add(r5, sp, ip);
+ __ add(r5, sp, r5);
+
+ __ mtctr(r3);
__ bind(&loop);
__ LoadP(ip, MemOperand(r5, -kPointerSize));
__ StoreP(ip, MemOperand(r5));
__ subi(r5, r5, Operand(kPointerSize));
- __ cmp(r5, sp);
- __ bne(&loop);
+ __ bdnz(&loop);
// Adjust the actual number of arguments and remove the top element
// (which is a copy of the last argument).
__ subi(r3, r3, Operand(1));
__ pop();
}
- // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
- // or a function proxy via CALL_FUNCTION_PROXY.
- // r3: actual number of arguments
- // r4: function
- // r7: call type (0: JS function, 1: function proxy, 2: non-function)
- {
- Label function, non_proxy;
- __ cmpi(r7, Operand::Zero());
- __ beq(&function);
- // Expected number of arguments is 0 for CALL_NON_FUNCTION.
- __ li(r5, Operand::Zero());
- __ cmpi(r7, Operand(1));
- __ bne(&non_proxy);
-
- __ push(r4); // re-add proxy object as additional argument
- __ addi(r3, r3, Operand(1));
- __ GetBuiltinFunction(r4, Builtins::CALL_FUNCTION_PROXY);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-
- __ bind(&non_proxy);
- __ GetBuiltinFunction(r4, Builtins::CALL_NON_FUNCTION);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
- __ bind(&function);
- }
-
- // 5b. Get the code to call from the function and check that the number of
- // expected arguments matches what we're providing. If so, jump
- // (tail-call) to the code in register ip without checking arguments.
- // r3: actual number of arguments
- // r4: function
- __ LoadP(r6, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadWordArith(
- r5, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
-#if !V8_TARGET_ARCH_PPC64
- __ SmiUntag(r5);
-#endif
- __ cmp(r5, r3); // Check formal and actual parameter counts.
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET, ne);
-
- __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
- ParameterCount expected(0);
- __ InvokeCode(ip, expected, expected, JUMP_FUNCTION, NullCallWrapper());
+ // 4. Call the callable.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
@@ -1530,114 +1330,32 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
__ push(r4);
__ LoadP(r3, MemOperand(fp, kFunctionOffset)); // get the function
- __ push(r3);
- __ LoadP(r3, MemOperand(fp, kArgumentsOffset)); // get the args array
- __ push(r3);
+ __ LoadP(r4, MemOperand(fp, kArgumentsOffset)); // get the args array
+ __ Push(r3, r4);
if (targetIsArgument) {
- __ InvokeBuiltin(Builtins::REFLECT_APPLY_PREPARE, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::REFLECT_APPLY_PREPARE_BUILTIN_INDEX,
+ CALL_FUNCTION);
} else {
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::APPLY_PREPARE_BUILTIN_INDEX, CALL_FUNCTION);
}
- Generate_CheckStackOverflow(masm, kFunctionOffset, r3, kArgcIsSmiTagged);
+ Generate_CheckStackOverflow(masm, r3, kArgcIsSmiTagged);
// Push current limit and index.
const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
__ li(r4, Operand::Zero());
- __ Push(r3, r4); // limit and initial index.
-
- // Get the receiver.
- __ LoadP(r3, MemOperand(fp, kReceiverOffset));
-
- // Check that the function is a JS function (otherwise it must be a proxy).
- Label push_receiver;
- __ LoadP(r4, MemOperand(fp, kFunctionOffset));
- __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE);
- __ bne(&push_receiver);
-
- // Change context eagerly to get the right global object if necessary.
- __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
- // Load the shared function info while the function is still in r4.
- __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
-
- // Compute the receiver.
- // Do not transform the receiver for strict mode functions.
- Label call_to_object, use_global_proxy;
- __ lwz(r5, FieldMemOperand(r5, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBit(r5,
-#if V8_TARGET_ARCH_PPC64
- SharedFunctionInfo::kStrictModeFunction,
-#else
- SharedFunctionInfo::kStrictModeFunction + kSmiTagSize,
-#endif
- r0);
- __ bne(&push_receiver, cr0);
-
- // Do not transform the receiver for strict mode functions.
- __ TestBit(r5,
-#if V8_TARGET_ARCH_PPC64
- SharedFunctionInfo::kNative,
-#else
- SharedFunctionInfo::kNative + kSmiTagSize,
-#endif
- r0);
- __ bne(&push_receiver, cr0);
-
- // Compute the receiver in sloppy mode.
- __ JumpIfSmi(r3, &call_to_object);
- __ LoadRoot(r4, Heap::kNullValueRootIndex);
- __ cmp(r3, r4);
- __ beq(&use_global_proxy);
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
- __ cmp(r3, r4);
- __ beq(&use_global_proxy);
-
- // Check if the receiver is already a JavaScript object.
- // r3: receiver
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
- __ bge(&push_receiver);
-
- // Convert the receiver to a regular object.
- // r3: receiver
- __ bind(&call_to_object);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ b(&push_receiver);
-
- __ bind(&use_global_proxy);
- __ LoadP(r3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ LoadP(r3, FieldMemOperand(r3, GlobalObject::kGlobalProxyOffset));
-
- // Push the receiver.
- // r3: receiver
- __ bind(&push_receiver);
- __ push(r3);
+ __ LoadP(r5, MemOperand(fp, kReceiverOffset));
+ __ Push(r3, r4, r5); // limit, initial index and receiver.
// Copy all arguments from the array to the stack.
Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
kIndexOffset, kLimitOffset);
- // Call the function.
- Label call_proxy;
- ParameterCount actual(r3);
+ // Call the callable.
+ // TODO(bmeurer): This should be a tail call according to ES6.
__ LoadP(r4, MemOperand(fp, kFunctionOffset));
- __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE);
- __ bne(&call_proxy);
- __ InvokeFunction(r4, actual, CALL_FUNCTION, NullCallWrapper());
-
- __ LeaveFrame(StackFrame::INTERNAL, kStackSize * kPointerSize);
- __ blr();
-
- // Call the function proxy.
- __ bind(&call_proxy);
- __ push(r4); // add function proxy as last argument
- __ addi(r3, r3, Operand(1));
- __ li(r5, Operand::Zero());
- __ GetBuiltinFunction(r4, Builtins::CALL_FUNCTION_PROXY);
- __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
+ __ Call(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
// Tear down the internal frame and remove function, receiver and args.
}
@@ -1680,9 +1398,10 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
__ push(r3);
__ LoadP(r3, MemOperand(fp, kNewTargetOffset)); // get the new.target
__ push(r3);
- __ InvokeBuiltin(Builtins::REFLECT_CONSTRUCT_PREPARE, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::REFLECT_CONSTRUCT_PREPARE_BUILTIN_INDEX,
+ CALL_FUNCTION);
- Generate_CheckStackOverflow(masm, kFunctionOffset, r3, kArgcIsSmiTagged);
+ Generate_CheckStackOverflow(masm, r3, kArgcIsSmiTagged);
// Push current limit and index.
const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
@@ -1779,6 +1498,253 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
}
+// static
+void Builtins::Generate_CallFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r4 : the function to call (checked to be a JSFunction)
+ // -----------------------------------
+
+ Label convert, convert_global_proxy, convert_to_object, done_convert;
+ __ AssertFunction(r4);
+ // TODO(bmeurer): Throw a TypeError if function's [[FunctionKind]] internal
+ // slot is "classConstructor".
+ // Enter the context of the function; ToObject has to run in the function
+ // context, and we also need to take the global proxy from the function
+ // context in case of conversion.
+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
+ SharedFunctionInfo::kStrictModeByteOffset);
+ __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ // We need to convert the receiver for non-native sloppy mode functions.
+ __ lbz(r6, FieldMemOperand(r5, SharedFunctionInfo::kNativeByteOffset));
+ __ andi(r0, r6, Operand((1 << SharedFunctionInfo::kNativeBitWithinByte) |
+ (1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
+ __ bne(&done_convert, cr0);
+ {
+ __ ShiftLeftImm(r6, r3, Operand(kPointerSizeLog2));
+ __ LoadPX(r6, MemOperand(sp, r6));
+
+ // ----------- S t a t e -------------
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r4 : the function to call (checked to be a JSFunction)
+ // -- r5 : the shared function info.
+ // -- r6 : the receiver
+ // -- cp : the function context.
+ // -----------------------------------
+
+ Label convert_receiver;
+ __ JumpIfSmi(r6, &convert_to_object);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(r6, r7, r7, FIRST_JS_RECEIVER_TYPE);
+ __ bge(&done_convert);
+ __ JumpIfRoot(r6, Heap::kUndefinedValueRootIndex, &convert_global_proxy);
+ __ JumpIfNotRoot(r6, Heap::kNullValueRootIndex, &convert_to_object);
+ __ bind(&convert_global_proxy);
+ {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(r6);
+ }
+ __ b(&convert_receiver);
+ __ bind(&convert_to_object);
+ {
+ // Convert receiver using ToObject.
+ // TODO(bmeurer): Inline the allocation here to avoid building the frame
+ // in the fast case? (fall back to AllocateInNewSpace?)
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(r3);
+ __ Push(r3, r4);
+ __ mr(r3, r6);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ mr(r6, r3);
+ __ Pop(r3, r4);
+ __ SmiUntag(r3);
+ }
+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ bind(&convert_receiver);
+ __ ShiftLeftImm(r7, r3, Operand(kPointerSizeLog2));
+ __ StorePX(r6, MemOperand(sp, r7));
+ }
+ __ bind(&done_convert);
+
+ // ----------- S t a t e -------------
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r4 : the function to call (checked to be a JSFunction)
+ // -- r5 : the shared function info.
+ // -- cp : the function context.
+ // -----------------------------------
+
+ __ LoadWordArith(
+ r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
+#if !V8_TARGET_ARCH_PPC64
+ __ SmiUntag(r5);
+#endif
+ __ LoadP(r6, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
+ ParameterCount actual(r3);
+ ParameterCount expected(r5);
+ __ InvokeCode(r6, expected, actual, JUMP_FUNCTION, NullCallWrapper());
+}
+
+
+// static
+void Builtins::Generate_Call(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r4 : the target to call (can be any Object).
+ // -----------------------------------
+
+ Label non_callable, non_function, non_smi;
+ __ JumpIfSmi(r4, &non_callable);
+ __ bind(&non_smi);
+ __ CompareObjectType(r4, r7, r8, JS_FUNCTION_TYPE);
+ __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET,
+ eq);
+ __ cmpi(r8, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ bne(&non_function);
+
+ // 1. Call to function proxy.
+ // TODO(neis): This doesn't match the ES6 spec for [[Call]] on proxies.
+ __ LoadP(r4, FieldMemOperand(r4, JSFunctionProxy::kCallTrapOffset));
+ __ AssertNotSmi(r4);
+ __ b(&non_smi);
+
+ // 2. Call to something else, which might have a [[Call]] internal method (if
+ // not we raise an exception).
+ __ bind(&non_function);
+ // Check if target has a [[Call]] internal method.
+ __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
+ __ TestBit(r7, Map::kIsCallable, r0);
+ __ beq(&non_callable, cr0);
+ // Overwrite the original receiver the (original) target.
+ __ ShiftLeftImm(r8, r3, Operand(kPointerSizeLog2));
+ __ StorePX(r4, MemOperand(sp, r8));
+ // Let the "call_as_function_delegate" take care of the rest.
+ __ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r4);
+ __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET);
+
+ // 3. Call to something that is not callable.
+ __ bind(&non_callable);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r4);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ }
+}
+
+
+// static
+void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r4 : the constructor to call (checked to be a JSFunction)
+ // -- r6 : the original constructor (checked to be a JSFunction)
+ // -----------------------------------
+ __ AssertFunction(r4);
+ __ AssertFunction(r6);
+
+ // Calling convention for function specific ConstructStubs require
+ // r5 to contain either an AllocationSite or undefined.
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kConstructStubOffset));
+ __ addi(ip, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(ip);
+}
+
+
+// static
+void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r4 : the constructor to call (checked to be a JSFunctionProxy)
+ // -- r6 : the original constructor (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -----------------------------------
+
+ // TODO(neis): This doesn't match the ES6 spec for [[Construct]] on proxies.
+ __ LoadP(r4, FieldMemOperand(r4, JSFunctionProxy::kConstructTrapOffset));
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+
+// static
+void Builtins::Generate_Construct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r4 : the constructor to call (can be any Object)
+ // -- r6 : the original constructor (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -----------------------------------
+
+ // Check if target has a [[Construct]] internal method.
+ Label non_constructor;
+ __ JumpIfSmi(r4, &non_constructor);
+ __ LoadP(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
+ __ lbz(r5, FieldMemOperand(r7, Map::kBitFieldOffset));
+ __ TestBit(r5, Map::kIsConstructor, r0);
+ __ beq(&non_constructor, cr0);
+
+ // Dispatch based on instance type.
+ __ CompareInstanceType(r7, r8, JS_FUNCTION_TYPE);
+ __ Jump(masm->isolate()->builtins()->ConstructFunction(),
+ RelocInfo::CODE_TARGET, eq);
+ __ cmpi(r8, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ Jump(masm->isolate()->builtins()->ConstructProxy(), RelocInfo::CODE_TARGET,
+ eq);
+
+ // Called Construct on an exotic Object with a [[Construct]] internal method.
+ {
+ // Overwrite the original receiver with the (original) target.
+ __ ShiftLeftImm(r8, r3, Operand(kPointerSizeLog2));
+ __ StorePX(r4, MemOperand(sp, r8));
+ // Let the "call_as_constructor_delegate" take care of the rest.
+ __ LoadGlobalFunction(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, r4);
+ __ Jump(masm->isolate()->builtins()->CallFunction(),
+ RelocInfo::CODE_TARGET);
+ }
+
+ // Called Construct on an Object that doesn't have a [[Construct]] internal
+ // method.
+ __ bind(&non_constructor);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r4);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ }
+}
+
+
+// static
+void Builtins::Generate_PushArgsAndCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r5 : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -- r4 : the target to call (can be any Object).
+
+ // Calculate number of arguments (add one for receiver).
+ __ addi(r6, r3, Operand(1));
+
+ // Push the arguments.
+ Label loop;
+ __ addi(r5, r5, Operand(kPointerSize)); // Bias up for LoadPU
+ __ mtctr(r6);
+ __ bind(&loop);
+ __ LoadPU(r6, MemOperand(r5, -kPointerSize));
+ __ push(r6);
+ __ bdnz(&loop);
+
+ // Call the target.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : actual number of arguments
@@ -1801,7 +1767,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
- // Calculate copy start address into r3 and copy end address into r5.
+ // Calculate copy start address into r3 and copy end address into r6.
// r3: actual number of arguments as a smi
// r4: function
// r5: expected number of arguments
@@ -1810,20 +1776,21 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ add(r3, r3, fp);
// adjust for return address and receiver
__ addi(r3, r3, Operand(2 * kPointerSize));
- __ ShiftLeftImm(r5, r5, Operand(kPointerSizeLog2));
- __ sub(r5, r3, r5);
+ __ ShiftLeftImm(r6, r5, Operand(kPointerSizeLog2));
+ __ sub(r6, r3, r6);
// Copy the arguments (including the receiver) to the new stack frame.
// r3: copy start address
// r4: function
- // r5: copy end address
+ // r5: expected number of arguments
+ // r6: copy end address
// ip: code entry to call
Label copy;
__ bind(&copy);
__ LoadP(r0, MemOperand(r3, 0));
__ push(r0);
- __ cmp(r3, r5); // Compare before moving to next argument.
+ __ cmp(r3, r6); // Compare before moving to next argument.
__ subi(r3, r3, Operand(kPointerSize));
__ bne(&copy);
@@ -1893,21 +1860,24 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r5: expected number of arguments
// ip: code entry to call
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ ShiftLeftImm(r5, r5, Operand(kPointerSizeLog2));
- __ sub(r5, fp, r5);
+ __ ShiftLeftImm(r6, r5, Operand(kPointerSizeLog2));
+ __ sub(r6, fp, r6);
// Adjust for frame.
- __ subi(r5, r5, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
+ __ subi(r6, r6, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
2 * kPointerSize));
Label fill;
__ bind(&fill);
__ push(r0);
- __ cmp(sp, r5);
+ __ cmp(sp, r6);
__ bne(&fill);
}
// Call the entry point.
__ bind(&invoke);
+ __ mr(r3, r5);
+ // r3 : expected number of arguments
+ // r4 : function (passed through to callee)
__ CallJSEntry(ip);
// Store offset of return address for deoptimizer.
@@ -1928,7 +1898,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
__ bkpt(0);
}
}
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
index 435ac47c00..290159a3e7 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -12,6 +12,7 @@
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
+#include "src/ppc/code-stubs-ppc.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
@@ -706,29 +707,25 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
- if (cc == eq && strict()) {
- __ TailCallRuntime(Runtime::kStrictEquals, 2, 1);
+ if (cc == eq) {
+ __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals, 2,
+ 1);
} else {
- Builtins::JavaScript native;
- if (cc == eq) {
- native = Builtins::EQUALS;
+ int ncr; // NaN compare result
+ if (cc == lt || cc == le) {
+ ncr = GREATER;
} else {
- native =
- is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
- int ncr; // NaN compare result
- if (cc == lt || cc == le) {
- ncr = GREATER;
- } else {
- DCHECK(cc == gt || cc == ge); // remaining cases
- ncr = LESS;
- }
- __ LoadSmiLiteral(r3, Smi::FromInt(ncr));
- __ push(r3);
+ DCHECK(cc == gt || cc == ge); // remaining cases
+ ncr = LESS;
}
+ __ LoadSmiLiteral(r3, Smi::FromInt(ncr));
+ __ push(r3);
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ InvokeBuiltin(native, JUMP_FUNCTION);
+ __ TailCallRuntime(
+ is_strong(strength()) ? Runtime::kCompare_Strong : Runtime::kCompare, 3,
+ 1);
}
__ bind(&miss);
@@ -1366,216 +1363,115 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
}
-// Uses registers r3 to r7.
-// Expected input (depending on whether args are in registers or on the stack):
-// * object: r3 or at sp + 1 * kPointerSize.
-// * function: r4 or at sp.
-//
-// An inlined call site may have been generated before calling this stub.
-// In this case the offset to the inline site to patch is passed in r8.
-// (See LCodeGen::DoInstanceOfKnownGlobal)
-void InstanceofStub::Generate(MacroAssembler* masm) {
- // Call site inlining and patching implies arguments in registers.
- DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
-
- // Fixed register usage throughout the stub:
- const Register object = r3; // Object (lhs).
- Register map = r6; // Map of the object.
- const Register function = r4; // Function (rhs).
- const Register prototype = r7; // Prototype of the function.
- // The map_check_delta was stored in r8
- // The bool_load_delta was stored in r9
- // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
- const Register map_check_delta = r8;
- const Register bool_load_delta = r9;
- const Register inline_site = r10;
- const Register scratch = r5;
- Register scratch3 = no_reg;
- Label slow, loop, is_instance, is_not_instance, not_js_object;
-
- if (!HasArgsInRegisters()) {
- __ LoadP(object, MemOperand(sp, 1 * kPointerSize));
- __ LoadP(function, MemOperand(sp, 0));
- }
-
- // Check that the left hand is a JS object and load map.
- __ JumpIfSmi(object, &not_js_object);
- __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
-
- // If there is a call site cache don't look in the global cache, but do the
- // real lookup and update the call site cache.
- if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
- Label miss;
- __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ bne(&miss);
- __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex);
- __ bne(&miss);
- __ LoadRoot(r3, Heap::kInstanceofCacheAnswerRootIndex);
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&miss);
- }
-
- // Get the prototype of the function.
- __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
+void InstanceOfStub::Generate(MacroAssembler* masm) {
+ Register const object = r4; // Object (lhs).
+ Register const function = r3; // Function (rhs).
+ Register const object_map = r5; // Map of {object}.
+ Register const function_map = r6; // Map of {function}.
+ Register const function_prototype = r7; // Prototype of {function}.
+ Register const scratch = r8;
+
+ DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
+ DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
+
+ // Check if {object} is a smi.
+ Label object_is_smi;
+ __ JumpIfSmi(object, &object_is_smi);
+
+ // Lookup the {function} and the {object} map in the global instanceof cache.
+ // Note: This is safe because we clear the global instanceof cache whenever
+ // we change the prototype of any object.
+ Label fast_case, slow_case;
+ __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+ __ bne(&fast_case);
+ __ CompareRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
+ __ bne(&fast_case);
+ __ LoadRoot(r3, Heap::kInstanceofCacheAnswerRootIndex);
+ __ Ret();
- // Check that the function prototype is a JS object.
- __ JumpIfSmi(prototype, &slow);
- __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
+ // If {object} is a smi we can safely return false if {function} is a JS
+ // function, otherwise we have to miss to the runtime and throw an exception.
+ __ bind(&object_is_smi);
+ __ JumpIfSmi(function, &slow_case);
+ __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
+ __ bne(&slow_case);
+ __ LoadRoot(r3, Heap::kFalseValueRootIndex);
+ __ Ret();
- // Update the global instanceof or call site inlined cache with the current
- // map and function. The cached answer will be set when it is known below.
- if (!HasCallSiteInlineCheck()) {
- __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
- } else {
- DCHECK(HasArgsInRegisters());
- // Patch the (relocated) inlined map check.
-
- const Register offset = map_check_delta;
- __ mflr(inline_site);
- __ sub(inline_site, inline_site, offset);
- // Get the map location in offset and patch it.
- __ GetRelocatedValue(inline_site, offset, scratch);
- __ StoreP(map, FieldMemOperand(offset, Cell::kValueOffset), r0);
-
- __ mr(r11, map);
- __ RecordWriteField(offset, Cell::kValueOffset, r11, function,
- kLRHasNotBeenSaved, kDontSaveFPRegs,
- OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- }
+ // Fast-case: The {function} must be a valid JSFunction.
+ __ bind(&fast_case);
+ __ JumpIfSmi(function, &slow_case);
+ __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
+ __ bne(&slow_case);
- // Register mapping: r6 is object map and r7 is function prototype.
- // Get prototype of object into r5.
- __ LoadP(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
+ // Ensure that {function} has an instance prototype.
+ __ lbz(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
+ __ TestBit(scratch, Map::kHasNonInstancePrototype, r0);
+ __ bne(&slow_case, cr0);
- // We don't need map any more. Use it as a scratch register.
- scratch3 = map;
- map = no_reg;
+ // Ensure that {function} is not bound.
+ Register const shared_info = scratch;
+ __ LoadP(shared_info,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ lwz(scratch, FieldMemOperand(shared_info,
+ SharedFunctionInfo::kCompilerHintsOffset));
+ __ TestBit(scratch,
+#if V8_TARGET_ARCH_PPC64
+ SharedFunctionInfo::kBoundFunction,
+#else
+ SharedFunctionInfo::kBoundFunction + kSmiTagSize,
+#endif
+ r0);
+ __ bne(&slow_case, cr0);
- // Loop through the prototype chain looking for the function prototype.
- __ LoadRoot(scratch3, Heap::kNullValueRootIndex);
+ // Get the "prototype" (or initial map) of the {function}.
+ __ LoadP(function_prototype,
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ __ AssertNotSmi(function_prototype);
+
+ // Resolve the prototype if the {function} has an initial map. Afterwards the
+ // {function_prototype} will be either the JSReceiver prototype object or the
+ // hole value, which means that no instances of the {function} were created so
+ // far and hence we should return false.
+ Label function_prototype_valid;
+ __ CompareObjectType(function_prototype, scratch, scratch, MAP_TYPE);
+ __ bne(&function_prototype_valid);
+ __ LoadP(function_prototype,
+ FieldMemOperand(function_prototype, Map::kPrototypeOffset));
+ __ bind(&function_prototype_valid);
+ __ AssertNotSmi(function_prototype);
+
+ // Update the global instanceof cache with the current {object} map and
+ // {function}. The cached answer will be set when it is known below.
+ __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+ __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
+
+ // Loop through the prototype chain looking for the {function} prototype.
+ // Assume true, and change to false if not found.
+ Register const object_prototype = object_map;
+ Register const null = scratch;
+ Label done, loop;
+ __ LoadRoot(r3, Heap::kTrueValueRootIndex);
+ __ LoadRoot(null, Heap::kNullValueRootIndex);
__ bind(&loop);
- __ cmp(scratch, prototype);
- __ beq(&is_instance);
- __ cmp(scratch, scratch3);
- __ beq(&is_not_instance);
- __ LoadP(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
- __ LoadP(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
- __ b(&loop);
- Factory* factory = isolate()->factory();
-
- __ bind(&is_instance);
- if (!HasCallSiteInlineCheck()) {
- __ LoadSmiLiteral(r3, Smi::FromInt(0));
- __ StoreRoot(r3, Heap::kInstanceofCacheAnswerRootIndex);
- if (ReturnTrueFalseObject()) {
- __ Move(r3, factory->true_value());
- }
- } else {
- // Patch the call site to return true.
- __ LoadRoot(r3, Heap::kTrueValueRootIndex);
- __ add(inline_site, inline_site, bool_load_delta);
- // Get the boolean result location in scratch and patch it.
- __ SetRelocatedValue(inline_site, scratch, r3);
-
- if (!ReturnTrueFalseObject()) {
- __ LoadSmiLiteral(r3, Smi::FromInt(0));
- }
- }
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&is_not_instance);
- if (!HasCallSiteInlineCheck()) {
- __ LoadSmiLiteral(r3, Smi::FromInt(1));
- __ StoreRoot(r3, Heap::kInstanceofCacheAnswerRootIndex);
- if (ReturnTrueFalseObject()) {
- __ Move(r3, factory->false_value());
- }
- } else {
- // Patch the call site to return false.
- __ LoadRoot(r3, Heap::kFalseValueRootIndex);
- __ add(inline_site, inline_site, bool_load_delta);
- // Get the boolean result location in scratch and patch it.
- __ SetRelocatedValue(inline_site, scratch, r3);
-
- if (!ReturnTrueFalseObject()) {
- __ LoadSmiLiteral(r3, Smi::FromInt(1));
- }
- }
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- Label object_not_null, object_not_null_or_smi;
- __ bind(&not_js_object);
- // Before null, smi and string value checks, check that the rhs is a function
- // as for a non-function rhs an exception needs to be thrown.
- __ JumpIfSmi(function, &slow);
- __ CompareObjectType(function, scratch3, scratch, JS_FUNCTION_TYPE);
- __ bne(&slow);
-
- // Null is not instance of anything.
- __ Cmpi(object, Operand(isolate()->factory()->null_value()), r0);
- __ bne(&object_not_null);
- if (ReturnTrueFalseObject()) {
- __ Move(r3, factory->false_value());
- } else {
- __ LoadSmiLiteral(r3, Smi::FromInt(1));
- }
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&object_not_null);
- // Smi values are not instances of anything.
- __ JumpIfNotSmi(object, &object_not_null_or_smi);
- if (ReturnTrueFalseObject()) {
- __ Move(r3, factory->false_value());
- } else {
- __ LoadSmiLiteral(r3, Smi::FromInt(1));
- }
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&object_not_null_or_smi);
- // String values are not instances of anything.
- __ IsObjectJSStringType(object, scratch, &slow);
- if (ReturnTrueFalseObject()) {
- __ Move(r3, factory->false_value());
- } else {
- __ LoadSmiLiteral(r3, Smi::FromInt(1));
- }
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- // Slow-case. Tail call builtin.
- __ bind(&slow);
- if (!ReturnTrueFalseObject()) {
- if (HasArgsInRegisters()) {
- __ Push(r3, r4);
- }
- __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
- } else {
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r3, r4);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
- }
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ cmpi(r3, Operand::Zero());
- __ LoadRoot(r3, Heap::kTrueValueRootIndex);
- __ LoadRoot(r4, Heap::kFalseValueRootIndex);
- __ isel(eq, r3, r3, r4);
- } else {
- Label true_value, done;
- __ cmpi(r3, Operand::Zero());
- __ beq(&true_value);
-
- __ LoadRoot(r3, Heap::kFalseValueRootIndex);
- __ b(&done);
-
- __ bind(&true_value);
- __ LoadRoot(r3, Heap::kTrueValueRootIndex);
+ __ LoadP(object_prototype,
+ FieldMemOperand(object_map, Map::kPrototypeOffset));
+ __ cmp(object_prototype, function_prototype);
+ __ beq(&done);
+ __ cmp(object_prototype, null);
+ __ LoadP(object_map,
+ FieldMemOperand(object_prototype, HeapObject::kMapOffset));
+ __ bne(&loop);
+ __ LoadRoot(r3, Heap::kFalseValueRootIndex);
+ __ bind(&done);
+ __ StoreRoot(r3, Heap::kInstanceofCacheAnswerRootIndex);
+ __ Ret();
- __ bind(&done);
- }
- __ Ret(HasArgsInRegisters() ? 0 : 2);
- }
+ // Slow-case: Call the runtime function.
+ __ bind(&slow_case);
+ __ Push(object, function);
+ __ TailCallRuntime(Runtime::kInstanceOf, 2, 1);
}
@@ -1685,74 +1581,75 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
- // sp[0] : number of parameters
- // sp[1] : receiver displacement
- // sp[2] : function
+ // r4 : function
+ // r5 : number of parameters (tagged)
+ // r6 : parameters pointer
+
+ DCHECK(r4.is(ArgumentsAccessNewDescriptor::function()));
+ DCHECK(r5.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(r6.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
- __ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(r5, MemOperand(r6, StandardFrameConstants::kContextOffset));
- STATIC_ASSERT(StackFrame::ARGUMENTS_ADAPTOR < 0x3fffu);
- __ CmpSmiLiteral(r5, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ LoadP(r7, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(r3, MemOperand(r7, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
__ bne(&runtime);
// Patch the arguments.length and the parameters pointer in the current frame.
- __ LoadP(r5, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ StoreP(r5, MemOperand(sp, 0 * kPointerSize));
- __ SmiToPtrArrayOffset(r5, r5);
- __ add(r6, r6, r5);
+ __ LoadP(r5, MemOperand(r7, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiToPtrArrayOffset(r6, r5);
+ __ add(r6, r6, r7);
__ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
- __ StoreP(r6, MemOperand(sp, 1 * kPointerSize));
__ bind(&runtime);
+ __ Push(r4, r6, r5);
__ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
}
void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
- // Stack layout:
- // sp[0] : number of parameters (tagged)
- // sp[1] : address of receiver argument
- // sp[2] : function
+ // r4 : function
+ // r5 : number of parameters (tagged)
+ // r6 : parameters pointer
// Registers used over whole function:
- // r9 : allocated object (tagged)
- // r11 : mapped parameter count (tagged)
+ // r8 : arguments count (tagged)
+ // r9 : mapped parameter count (tagged)
- __ LoadP(r4, MemOperand(sp, 0 * kPointerSize));
- // r4 = parameter count (tagged)
+ DCHECK(r4.is(ArgumentsAccessNewDescriptor::function()));
+ DCHECK(r5.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(r6.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
// Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- Label adaptor_frame, try_allocate;
- __ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(r5, MemOperand(r6, StandardFrameConstants::kContextOffset));
- STATIC_ASSERT(StackFrame::ARGUMENTS_ADAPTOR < 0x3fffu);
- __ CmpSmiLiteral(r5, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ Label adaptor_frame, try_allocate, runtime;
+ __ LoadP(r7, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(r3, MemOperand(r7, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
__ beq(&adaptor_frame);
// No adaptor, parameter count = argument count.
- __ mr(r5, r4);
+ __ mr(r8, r5);
+ __ mr(r9, r5);
__ b(&try_allocate);
// We have an adaptor frame. Patch the parameters pointer.
__ bind(&adaptor_frame);
- __ LoadP(r5, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiToPtrArrayOffset(r7, r5);
+ __ LoadP(r8, MemOperand(r7, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiToPtrArrayOffset(r6, r8);
__ add(r6, r6, r7);
__ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
- __ StoreP(r6, MemOperand(sp, 1 * kPointerSize));
- // r4 = parameter count (tagged)
- // r5 = argument count (tagged)
- // Compute the mapped parameter count = min(r4, r5) in r4.
- __ cmp(r4, r5);
+ // r8 = argument count (tagged)
+ // r9 = parameter count (tagged)
+ // Compute the mapped parameter count = min(r5, r8) in r9.
+ __ cmp(r5, r8);
if (CpuFeatures::IsSupported(ISELECT)) {
- __ isel(lt, r4, r4, r5);
+ __ isel(lt, r9, r5, r8);
} else {
Label skip;
+ __ mr(r9, r5);
__ blt(&skip);
- __ mr(r4, r5);
+ __ mr(r9, r8);
__ bind(&skip);
}
@@ -1763,9 +1660,9 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
const int kParameterMapHeaderSize =
FixedArray::kHeaderSize + 2 * kPointerSize;
// If there are no mapped parameters, we do not need the parameter_map.
- __ CmpSmiLiteral(r4, Smi::FromInt(0), r0);
+ __ CmpSmiLiteral(r9, Smi::FromInt(0), r0);
if (CpuFeatures::IsSupported(ISELECT)) {
- __ SmiToPtrArrayOffset(r11, r4);
+ __ SmiToPtrArrayOffset(r11, r9);
__ addi(r11, r11, Operand(kParameterMapHeaderSize));
__ isel(eq, r11, r0, r11);
} else {
@@ -1774,13 +1671,13 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ li(r11, Operand::Zero());
__ b(&skip3);
__ bind(&skip2);
- __ SmiToPtrArrayOffset(r11, r4);
+ __ SmiToPtrArrayOffset(r11, r9);
__ addi(r11, r11, Operand(kParameterMapHeaderSize));
__ bind(&skip3);
}
// 2. Backing store.
- __ SmiToPtrArrayOffset(r7, r5);
+ __ SmiToPtrArrayOffset(r7, r8);
__ add(r11, r11, r7);
__ addi(r11, r11, Operand(FixedArray::kHeaderSize));
@@ -1788,7 +1685,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ addi(r11, r11, Operand(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
- __ Allocate(r11, r3, r6, r7, &runtime, TAG_OBJECT);
+ __ Allocate(r11, r3, r7, r11, &runtime, TAG_OBJECT);
// r3 = address of new object(s) (tagged)
// r5 = argument count (smi-tagged)
@@ -1801,7 +1698,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ LoadP(r7,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ LoadP(r7, FieldMemOperand(r7, GlobalObject::kNativeContextOffset));
- __ cmpi(r4, Operand::Zero());
+ __ cmpi(r9, Operand::Zero());
if (CpuFeatures::IsSupported(ISELECT)) {
__ LoadP(r11, MemOperand(r7, kNormalOffset));
__ LoadP(r7, MemOperand(r7, kAliasedOffset));
@@ -1817,28 +1714,27 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
}
// r3 = address of new object (tagged)
- // r4 = mapped parameter count (tagged)
// r5 = argument count (smi-tagged)
// r7 = address of arguments map (tagged)
+ // r9 = mapped parameter count (tagged)
__ StoreP(r7, FieldMemOperand(r3, JSObject::kMapOffset), r0);
- __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(r6, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
- __ StoreP(r6, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
+ __ LoadRoot(r11, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(r11, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
+ __ StoreP(r11, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
// Set up the callee in-object property.
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ LoadP(r6, MemOperand(sp, 2 * kPointerSize));
- __ AssertNotSmi(r6);
+ __ AssertNotSmi(r4);
const int kCalleeOffset =
JSObject::kHeaderSize + Heap::kArgumentsCalleeIndex * kPointerSize;
- __ StoreP(r6, FieldMemOperand(r3, kCalleeOffset), r0);
+ __ StoreP(r4, FieldMemOperand(r3, kCalleeOffset), r0);
// Use the length (smi tagged) and set that as an in-object property too.
- __ AssertSmi(r5);
+ __ AssertSmi(r8);
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
const int kLengthOffset =
JSObject::kHeaderSize + Heap::kArgumentsLengthIndex * kPointerSize;
- __ StoreP(r5, FieldMemOperand(r3, kLengthOffset), r0);
+ __ StoreP(r8, FieldMemOperand(r3, kLengthOffset), r0);
// Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, r7 will point there, otherwise
@@ -1847,35 +1743,35 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
// r3 = address of new object (tagged)
- // r4 = mapped parameter count (tagged)
// r5 = argument count (tagged)
// r7 = address of parameter map or backing store (tagged)
+ // r9 = mapped parameter count (tagged)
// Initialize parameter map. If there are no mapped arguments, we're done.
Label skip_parameter_map;
- __ CmpSmiLiteral(r4, Smi::FromInt(0), r0);
+ __ CmpSmiLiteral(r9, Smi::FromInt(0), r0);
if (CpuFeatures::IsSupported(ISELECT)) {
- __ isel(eq, r6, r7, r6);
+ __ isel(eq, r4, r7, r4);
__ beq(&skip_parameter_map);
} else {
Label skip6;
__ bne(&skip6);
- // Move backing store address to r6, because it is
+ // Move backing store address to r4, because it is
// expected there when filling in the unmapped arguments.
- __ mr(r6, r7);
+ __ mr(r4, r7);
__ b(&skip_parameter_map);
__ bind(&skip6);
}
- __ LoadRoot(r9, Heap::kSloppyArgumentsElementsMapRootIndex);
- __ StoreP(r9, FieldMemOperand(r7, FixedArray::kMapOffset), r0);
- __ AddSmiLiteral(r9, r4, Smi::FromInt(2), r0);
- __ StoreP(r9, FieldMemOperand(r7, FixedArray::kLengthOffset), r0);
+ __ LoadRoot(r8, Heap::kSloppyArgumentsElementsMapRootIndex);
+ __ StoreP(r8, FieldMemOperand(r7, FixedArray::kMapOffset), r0);
+ __ AddSmiLiteral(r8, r9, Smi::FromInt(2), r0);
+ __ StoreP(r8, FieldMemOperand(r7, FixedArray::kLengthOffset), r0);
__ StoreP(cp, FieldMemOperand(r7, FixedArray::kHeaderSize + 0 * kPointerSize),
r0);
- __ SmiToPtrArrayOffset(r9, r4);
- __ add(r9, r7, r9);
- __ addi(r9, r9, Operand(kParameterMapHeaderSize));
- __ StoreP(r9, FieldMemOperand(r7, FixedArray::kHeaderSize + 1 * kPointerSize),
+ __ SmiToPtrArrayOffset(r8, r9);
+ __ add(r8, r8, r7);
+ __ addi(r8, r8, Operand(kParameterMapHeaderSize));
+ __ StoreP(r8, FieldMemOperand(r7, FixedArray::kHeaderSize + 1 * kPointerSize),
r0);
// Copy the parameter slots and the holes in the arguments.
@@ -1886,72 +1782,72 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// MIN_CONTEXT_SLOTS+parameter_count-1 ..
// MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
// We loop from right to left.
- Label parameters_loop, parameters_test;
- __ mr(r9, r4);
- __ LoadP(r11, MemOperand(sp, 0 * kPointerSize));
- __ AddSmiLiteral(r11, r11, Smi::FromInt(Context::MIN_CONTEXT_SLOTS), r0);
- __ sub(r11, r11, r4);
- __ LoadRoot(r10, Heap::kTheHoleValueRootIndex);
- __ SmiToPtrArrayOffset(r6, r9);
- __ add(r6, r7, r6);
- __ addi(r6, r6, Operand(kParameterMapHeaderSize));
-
- // r9 = loop variable (tagged)
- // r4 = mapping index (tagged)
- // r6 = address of backing store (tagged)
+ Label parameters_loop;
+ __ mr(r8, r9);
+ __ AddSmiLiteral(r11, r5, Smi::FromInt(Context::MIN_CONTEXT_SLOTS), r0);
+ __ sub(r11, r11, r9);
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ SmiToPtrArrayOffset(r4, r8);
+ __ add(r4, r4, r7);
+ __ addi(r4, r4, Operand(kParameterMapHeaderSize));
+
+ // r4 = address of backing store (tagged)
// r7 = address of parameter map (tagged)
// r8 = temporary scratch (a.o., for address calculation)
- // r10 = the hole value
- __ b(&parameters_test);
+ // r10 = temporary scratch (a.o., for address calculation)
+ // ip = the hole value
+ __ SmiUntag(r8);
+ __ mtctr(r8);
+ __ ShiftLeftImm(r8, r8, Operand(kPointerSizeLog2));
+ __ add(r10, r4, r8);
+ __ add(r8, r7, r8);
+ __ addi(r10, r10, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ addi(r8, r8, Operand(kParameterMapHeaderSize - kHeapObjectTag));
__ bind(&parameters_loop);
- __ SubSmiLiteral(r9, r9, Smi::FromInt(1), r0);
- __ SmiToPtrArrayOffset(r8, r9);
- __ addi(r8, r8, Operand(kParameterMapHeaderSize - kHeapObjectTag));
- __ StorePX(r11, MemOperand(r8, r7));
- __ subi(r8, r8, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
- __ StorePX(r10, MemOperand(r8, r6));
+ __ StorePU(r11, MemOperand(r8, -kPointerSize));
+ __ StorePU(ip, MemOperand(r10, -kPointerSize));
__ AddSmiLiteral(r11, r11, Smi::FromInt(1), r0);
- __ bind(&parameters_test);
- __ CmpSmiLiteral(r9, Smi::FromInt(0), r0);
- __ bne(&parameters_loop);
+ __ bdnz(&parameters_loop);
+
+ // Restore r8 = argument count (tagged).
+ __ LoadP(r8, FieldMemOperand(r3, kLengthOffset));
__ bind(&skip_parameter_map);
- // r5 = argument count (tagged)
- // r6 = address of backing store (tagged)
- // r8 = scratch
+ // r3 = address of new object (tagged)
+ // r4 = address of backing store (tagged)
+ // r8 = argument count (tagged)
+ // r9 = mapped parameter count (tagged)
+ // r11 = scratch
// Copy arguments header and remaining slots (if there are any).
- __ LoadRoot(r8, Heap::kFixedArrayMapRootIndex);
- __ StoreP(r8, FieldMemOperand(r6, FixedArray::kMapOffset), r0);
- __ StoreP(r5, FieldMemOperand(r6, FixedArray::kLengthOffset), r0);
+ __ LoadRoot(r11, Heap::kFixedArrayMapRootIndex);
+ __ StoreP(r11, FieldMemOperand(r4, FixedArray::kMapOffset), r0);
+ __ StoreP(r8, FieldMemOperand(r4, FixedArray::kLengthOffset), r0);
+ __ sub(r11, r8, r9, LeaveOE, SetRC);
+ __ Ret(eq, cr0);
- Label arguments_loop, arguments_test;
- __ mr(r11, r4);
- __ LoadP(r7, MemOperand(sp, 1 * kPointerSize));
- __ SmiToPtrArrayOffset(r8, r11);
- __ sub(r7, r7, r8);
- __ b(&arguments_test);
+ Label arguments_loop;
+ __ SmiUntag(r11);
+ __ mtctr(r11);
- __ bind(&arguments_loop);
- __ subi(r7, r7, Operand(kPointerSize));
- __ LoadP(r9, MemOperand(r7, 0));
- __ SmiToPtrArrayOffset(r8, r11);
- __ add(r8, r6, r8);
- __ StoreP(r9, FieldMemOperand(r8, FixedArray::kHeaderSize), r0);
- __ AddSmiLiteral(r11, r11, Smi::FromInt(1), r0);
+ __ SmiToPtrArrayOffset(r0, r9);
+ __ sub(r6, r6, r0);
+ __ add(r11, r4, r0);
+ __ addi(r11, r11,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
- __ bind(&arguments_test);
- __ cmp(r11, r5);
- __ blt(&arguments_loop);
+ __ bind(&arguments_loop);
+ __ LoadPU(r7, MemOperand(r6, -kPointerSize));
+ __ StorePU(r7, MemOperand(r11, kPointerSize));
+ __ bdnz(&arguments_loop);
- // Return and remove the on-stack parameters.
- __ addi(sp, sp, Operand(3 * kPointerSize));
+ // Return.
__ Ret();
// Do the runtime call to allocate the arguments object.
- // r5 = argument count (tagged)
+ // r8 = argument count (tagged)
__ bind(&runtime);
- __ StoreP(r5, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
+ __ Push(r4, r6, r8);
__ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
}
@@ -1980,43 +1876,39 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
- // sp[0] : number of parameters
- // sp[4] : receiver displacement
- // sp[8] : function
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kContextOffset));
- STATIC_ASSERT(StackFrame::ARGUMENTS_ADAPTOR < 0x3fffu);
- __ CmpSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
- __ beq(&adaptor_frame);
+ // r4 : function
+ // r5 : number of parameters (tagged)
+ // r6 : parameters pointer
- // Get the length from the frame.
- __ LoadP(r4, MemOperand(sp, 0));
- __ b(&try_allocate);
+ DCHECK(r4.is(ArgumentsAccessNewDescriptor::function()));
+ DCHECK(r5.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(r6.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label try_allocate, runtime;
+ __ LoadP(r7, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(r3, MemOperand(r7, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ bne(&try_allocate);
// Patch the arguments.length and the parameters pointer.
- __ bind(&adaptor_frame);
- __ LoadP(r4, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ StoreP(r4, MemOperand(sp, 0));
- __ SmiToPtrArrayOffset(r6, r4);
- __ add(r6, r5, r6);
+ __ LoadP(r5, MemOperand(r7, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiToPtrArrayOffset(r6, r5);
+ __ add(r6, r6, r7);
__ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
- __ StoreP(r6, MemOperand(sp, 1 * kPointerSize));
// Try the new space allocation. Start out with computing the size
// of the arguments object and the elements array in words.
Label add_arguments_object;
__ bind(&try_allocate);
- __ cmpi(r4, Operand::Zero());
- __ beq(&add_arguments_object);
- __ SmiUntag(r4);
- __ addi(r4, r4, Operand(FixedArray::kHeaderSize / kPointerSize));
+ __ SmiUntag(r11, r5, SetRC);
+ __ beq(&add_arguments_object, cr0);
+ __ addi(r11, r11, Operand(FixedArray::kHeaderSize / kPointerSize));
__ bind(&add_arguments_object);
- __ addi(r4, r4, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
+ __ addi(r11, r11, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
// Do the allocation of both objects in one go.
- __ Allocate(r4, r3, r5, r6, &runtime,
+ __ Allocate(r11, r3, r7, r8, &runtime,
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
// Get the arguments boilerplate from the current native context.
@@ -2028,88 +1920,54 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
MemOperand(r7, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
__ StoreP(r7, FieldMemOperand(r3, JSObject::kMapOffset), r0);
- __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(r6, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
- __ StoreP(r6, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
+ __ LoadRoot(r8, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(r8, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
+ __ StoreP(r8, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
// Get the length (smi tagged) and set that as an in-object property too.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ LoadP(r4, MemOperand(sp, 0 * kPointerSize));
- __ AssertSmi(r4);
- __ StoreP(r4,
+ __ AssertSmi(r5);
+ __ StoreP(r5,
FieldMemOperand(r3, JSObject::kHeaderSize +
Heap::kArgumentsLengthIndex * kPointerSize),
r0);
// If there are no actual arguments, we're done.
- Label done;
- __ cmpi(r4, Operand::Zero());
- __ beq(&done);
-
- // Get the parameters pointer from the stack.
- __ LoadP(r5, MemOperand(sp, 1 * kPointerSize));
+ __ SmiUntag(r9, r5, SetRC);
+ __ Ret(eq, cr0);
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
__ addi(r7, r3, Operand(Heap::kStrictArgumentsObjectSize));
__ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
- __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
- __ StoreP(r6, FieldMemOperand(r7, FixedArray::kMapOffset), r0);
- __ StoreP(r4, FieldMemOperand(r7, FixedArray::kLengthOffset), r0);
- // Untag the length for the loop.
- __ SmiUntag(r4);
+ __ LoadRoot(r8, Heap::kFixedArrayMapRootIndex);
+ __ StoreP(r8, FieldMemOperand(r7, FixedArray::kMapOffset), r0);
+ __ StoreP(r5, FieldMemOperand(r7, FixedArray::kLengthOffset), r0);
// Copy the fixed array slots.
Label loop;
// Set up r7 to point just prior to the first array slot.
__ addi(r7, r7,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
- __ mtctr(r4);
+ __ mtctr(r9);
__ bind(&loop);
- // Pre-decrement r5 with kPointerSize on each iteration.
+ // Pre-decrement r6 with kPointerSize on each iteration.
// Pre-decrement in order to skip receiver.
- __ LoadPU(r6, MemOperand(r5, -kPointerSize));
+ __ LoadPU(r8, MemOperand(r6, -kPointerSize));
// Pre-increment r7 with kPointerSize on each iteration.
- __ StorePU(r6, MemOperand(r7, kPointerSize));
+ __ StorePU(r8, MemOperand(r7, kPointerSize));
__ bdnz(&loop);
- // Return and remove the on-stack parameters.
- __ bind(&done);
- __ addi(sp, sp, Operand(3 * kPointerSize));
+ // Return.
__ Ret();
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
+ __ Push(r4, r6, r5);
__ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
}
-void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
- // Stack layout on entry.
- // sp[0] : language mode
- // sp[4] : index of rest parameter
- // sp[8] : number of parameters
- // sp[12] : receiver displacement
-
- Label runtime;
- __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kContextOffset));
- __ CmpSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
- __ bne(&runtime);
-
- // Patch the arguments.length and the parameters pointer.
- __ LoadP(r4, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ StoreP(r4, MemOperand(sp, 2 * kPointerSize));
- __ SmiToPtrArrayOffset(r6, r4);
- __ add(r6, r5, r6);
- __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
- __ StoreP(r6, MemOperand(sp, 3 * kPointerSize));
-
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewRestParam, 4, 1);
-}
-
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -2605,27 +2463,25 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
__ beq(&done);
__ LoadP(feedback_map, FieldMemOperand(r8, HeapObject::kMapOffset));
__ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
- __ bne(FLAG_pretenuring_call_new ? &miss : &check_allocation_site);
+ __ bne(&check_allocation_site);
// If the weak cell is cleared, we have a new chance to become monomorphic.
__ JumpIfSmi(weak_value, &initialize);
__ b(&megamorphic);
- if (!FLAG_pretenuring_call_new) {
- __ bind(&check_allocation_site);
- // If we came here, we need to see if we are the array function.
- // If we didn't have a matching function, and we didn't find the megamorph
- // sentinel, then we have in the slot either some other function or an
- // AllocationSite.
- __ CompareRoot(feedback_map, Heap::kAllocationSiteMapRootIndex);
- __ bne(&miss);
+ __ bind(&check_allocation_site);
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite.
+ __ CompareRoot(feedback_map, Heap::kAllocationSiteMapRootIndex);
+ __ bne(&miss);
- // Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r8);
- __ cmp(r4, r8);
- __ bne(&megamorphic);
- __ b(&done);
- }
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r8);
+ __ cmp(r4, r8);
+ __ bne(&megamorphic);
+ __ b(&done);
__ bind(&miss);
@@ -2645,24 +2501,22 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// An uninitialized cache is patched with the function
__ bind(&initialize);
- if (!FLAG_pretenuring_call_new) {
- // Make sure the function is the Array() function.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r8);
- __ cmp(r4, r8);
- __ bne(&not_array_function);
-
- // The target function is the Array constructor,
- // Create an AllocationSite if we don't already have it, store it in the
- // slot.
- CreateAllocationSiteStub create_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &create_stub, is_super);
- __ b(&done);
-
- __ bind(&not_array_function);
- }
+ // Make sure the function is the Array() function.
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r8);
+ __ cmp(r4, r8);
+ __ bne(&not_array_function);
- CreateWeakCellStub create_stub(masm->isolate());
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the
+ // slot.
+ CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub, is_super);
+ __ b(&done);
+
+ __ bind(&not_array_function);
+
+ CreateWeakCellStub weak_cell_stub(masm->isolate());
+ CallStubInRecordCallTarget(masm, &weak_cell_stub, is_super);
__ bind(&done);
}
@@ -2692,30 +2546,9 @@ static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
}
-static void EmitSlowCase(MacroAssembler* masm, int argc, Label* non_function) {
- // Check for function proxy.
- STATIC_ASSERT(JS_FUNCTION_PROXY_TYPE < 0xffffu);
- __ cmpi(r7, Operand(JS_FUNCTION_PROXY_TYPE));
- __ bne(non_function);
- __ push(r4); // put proxy as additional argument
- __ li(r3, Operand(argc + 1));
- __ li(r5, Operand::Zero());
- __ GetBuiltinFunction(r4, Builtins::CALL_FUNCTION_PROXY);
- {
- Handle<Code> adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
- __ Jump(adaptor, RelocInfo::CODE_TARGET);
- }
-
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ bind(non_function);
- __ StoreP(r4, MemOperand(sp, argc * kPointerSize), r0);
- __ li(r3, Operand(argc)); // Set up the number of arguments.
- __ li(r5, Operand::Zero());
- __ GetBuiltinFunction(r4, Builtins::CALL_NON_FUNCTION);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
+static void EmitSlowCase(MacroAssembler* masm, int argc) {
+ __ mov(r3, Operand(argc));
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
@@ -2737,12 +2570,12 @@ static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
static void CallFunctionNoFeedback(MacroAssembler* masm, int argc,
bool needs_checks, bool call_as_method) {
// r4 : the function to call
- Label slow, non_function, wrap, cont;
+ Label slow, wrap, cont;
if (needs_checks) {
// Check that the function is really a JavaScript function.
// r4: pushed function (to be verified)
- __ JumpIfSmi(r4, &non_function);
+ __ JumpIfSmi(r4, &slow);
// Goto slow case if we do not have a function.
__ CompareObjectType(r4, r7, r7, JS_FUNCTION_TYPE);
@@ -2777,7 +2610,7 @@ static void CallFunctionNoFeedback(MacroAssembler* masm, int argc,
if (needs_checks) {
// Slow-case: Non-function called.
__ bind(&slow);
- EmitSlowCase(masm, argc, &non_function);
+ EmitSlowCase(masm, argc);
}
if (call_as_method) {
@@ -2798,38 +2631,31 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// r5 : feedback vector
// r6 : slot in feedback vector (Smi, for RecordCallTarget)
// r7 : original constructor (for IsSuperConstructorCall)
- Label slow, non_function_call;
+ Label non_function;
// Check that the function is not a smi.
- __ JumpIfSmi(r4, &non_function_call);
+ __ JumpIfSmi(r4, &non_function);
// Check that the function is a JSFunction.
__ CompareObjectType(r4, r8, r8, JS_FUNCTION_TYPE);
- __ bne(&slow);
+ __ bne(&non_function);
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm, IsSuperConstructorCall());
__ SmiToPtrArrayOffset(r8, r6);
__ add(r8, r5, r8);
- if (FLAG_pretenuring_call_new) {
- // Put the AllocationSite from the feedback vector into r5.
- // By adding kPointerSize we encode that we know the AllocationSite
- // entry is at the feedback vector slot given by r6 + 1.
- __ LoadP(r5, FieldMemOperand(r8, FixedArray::kHeaderSize + kPointerSize));
+ // Put the AllocationSite from the feedback vector into r5, or undefined.
+ __ LoadP(r5, FieldMemOperand(r8, FixedArray::kHeaderSize));
+ __ LoadP(r8, FieldMemOperand(r5, AllocationSite::kMapOffset));
+ __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
+ __ isel(eq, r5, r5, r8);
} else {
- // Put the AllocationSite from the feedback vector into r5, or undefined.
- __ LoadP(r5, FieldMemOperand(r8, FixedArray::kHeaderSize));
- __ LoadP(r8, FieldMemOperand(r5, AllocationSite::kMapOffset));
- __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
- __ isel(eq, r5, r5, r8);
- } else {
- Label feedback_register_initialized;
- __ beq(&feedback_register_initialized);
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- __ bind(&feedback_register_initialized);
- }
+ Label feedback_register_initialized;
+ __ beq(&feedback_register_initialized);
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
}
__ AssertUndefinedOrAllocationSite(r5, r8);
@@ -2842,85 +2668,42 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ mr(r6, r4);
}
- // Jump to the function-specific construct stub.
- Register jmp_reg = r7;
- __ LoadP(jmp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(jmp_reg,
- FieldMemOperand(jmp_reg, SharedFunctionInfo::kConstructStubOffset));
- __ addi(ip, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kConstructStubOffset));
+ __ addi(ip, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
__ JumpToJSEntry(ip);
- // r3: number of arguments
- // r4: called object
- // r8: object type
- Label do_call;
- __ bind(&slow);
- STATIC_ASSERT(JS_FUNCTION_PROXY_TYPE < 0xffffu);
- __ cmpi(r8, Operand(JS_FUNCTION_PROXY_TYPE));
- __ bne(&non_function_call);
- __ GetBuiltinFunction(r4, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
- __ b(&do_call);
-
- __ bind(&non_function_call);
- __ GetBuiltinFunction(r4, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ bind(&do_call);
- // Set expected number of arguments to zero (not changing r3).
- __ li(r5, Operand::Zero());
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-}
-
-
-static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
- __ LoadP(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ LoadP(vector,
- FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(vector,
- FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+ __ bind(&non_function);
+ __ mr(r6, r4);
+ __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
-void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
+void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// r4 - function
// r6 - slot id
// r5 - vector
- Label miss;
- int argc = arg_count();
- ParameterCount actual(argc);
-
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r7);
- __ cmp(r4, r7);
- __ bne(&miss);
+ // r7 - allocation site (loaded from vector[slot])
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r8);
+ __ cmp(r4, r8);
+ __ bne(miss);
__ mov(r3, Operand(arg_count()));
- __ SmiToPtrArrayOffset(r9, r6);
- __ add(r9, r5, r9);
- __ LoadP(r7, FieldMemOperand(r9, FixedArray::kHeaderSize));
-
- // Verify that r7 contains an AllocationSite
- __ LoadP(r8, FieldMemOperand(r7, HeapObject::kMapOffset));
- __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
- __ bne(&miss);
// Increment the call count for monomorphic function calls.
const int count_offset = FixedArray::kHeaderSize + kPointerSize;
- __ LoadP(r6, FieldMemOperand(r9, count_offset));
+ __ SmiToPtrArrayOffset(r8, r6);
+ __ add(r5, r5, r8);
+ __ LoadP(r6, FieldMemOperand(r5, count_offset));
__ AddSmiLiteral(r6, r6, Smi::FromInt(CallICNexus::kCallCountIncrement), r0);
- __ StoreP(r6, FieldMemOperand(r9, count_offset), r0);
+ __ StoreP(r6, FieldMemOperand(r5, count_offset), r0);
__ mr(r5, r7);
__ mr(r6, r4);
ArrayConstructorStub stub(masm->isolate(), arg_count());
__ TailCallStub(&stub);
-
- __ bind(&miss);
- GenerateMiss(masm);
-
- // The slow case, we need this no matter what to complete a call after a miss.
- CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
-
- // Unreachable.
- __ stop("Unexpected code address");
}
@@ -2933,7 +2716,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
const int generic_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
Label extra_checks_or_miss, slow_start;
- Label slow, non_function, wrap, cont;
+ Label slow, wrap, cont;
Label have_js_function;
int argc = arg_count();
ParameterCount actual(argc);
@@ -2987,7 +2770,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ InvokeFunction(r4, actual, JUMP_FUNCTION, NullCallWrapper());
__ bind(&slow);
- EmitSlowCase(masm, argc, &non_function);
+ EmitSlowCase(masm, argc);
if (CallAsMethod()) {
__ bind(&wrap);
@@ -2995,11 +2778,21 @@ void CallICStub::Generate(MacroAssembler* masm) {
}
__ bind(&extra_checks_or_miss);
- Label uninitialized, miss;
+ Label uninitialized, miss, not_allocation_site;
__ CompareRoot(r7, Heap::kmegamorphic_symbolRootIndex);
__ beq(&slow_start);
+ // Verify that r7 contains an AllocationSite
+ __ LoadP(r8, FieldMemOperand(r7, HeapObject::kMapOffset));
+ __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
+ __ bne(&not_allocation_site);
+
+ // We have an allocation site.
+ HandleArrayCase(masm, &miss);
+
+ __ bind(&not_allocation_site);
+
// The following cases attempt to handle MISS cases without going to the
// runtime.
if (FLAG_trace_ic) {
@@ -3072,7 +2865,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&slow_start);
// Check that the function is really a JavaScript function.
// r4: pushed function (to be verified)
- __ JumpIfSmi(r4, &non_function);
+ __ JumpIfSmi(r4, &slow);
// Goto slow case if we do not have a function.
__ CompareObjectType(r4, r7, r7, JS_FUNCTION_TYPE);
@@ -3088,10 +2881,7 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(r4, r5, r6);
// Call the entry.
- Runtime::FunctionId id = GetICState() == DEFAULT
- ? Runtime::kCallIC_Miss
- : Runtime::kCallIC_Customization_Miss;
- __ CallRuntime(id, 3);
+ __ CallRuntime(Runtime::kCallIC_Miss, 3);
// Move result to r4 and exit the internal frame.
__ mr(r4, r3);
@@ -3499,15 +3289,10 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ blr();
__ bind(&not_smi);
- Label not_heap_number;
- __ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ lbz(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- // r3: object
- // r4: instance type.
- __ cmpi(r4, Operand(HEAP_NUMBER_TYPE));
- __ bne(&not_heap_number);
- __ blr();
- __ bind(&not_heap_number);
+ __ CompareObjectType(r3, r4, r4, HEAP_NUMBER_TYPE);
+ // r3: receiver
+ // r4: receiver instance type
+ __ Ret(eq);
Label not_string, slow_string;
__ cmpli(r4, Operand(FIRST_NONSTRING_TYPE));
@@ -3531,7 +3316,37 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ bind(&not_oddball);
__ push(r3); // Push argument.
- __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
+ __ TailCallRuntime(Runtime::kToNumber, 1, 1);
+}
+
+
+void ToStringStub::Generate(MacroAssembler* masm) {
+ // The ToString stub takes one argument in r3.
+ Label is_number;
+ __ JumpIfSmi(r3, &is_number);
+
+ __ CompareObjectType(r3, r4, r4, FIRST_NONSTRING_TYPE);
+ // r3: receiver
+ // r4: receiver instance type
+ __ Ret(lt);
+
+ Label not_heap_number;
+ __ cmpi(r4, Operand(HEAP_NUMBER_TYPE));
+ __ bne(&not_heap_number);
+ __ bind(&is_number);
+ NumberToStringStub stub(isolate());
+ __ TailCallStub(&stub);
+ __ bind(&not_heap_number);
+
+ Label not_oddball;
+ __ cmpi(r4, Operand(ODDBALL_TYPE));
+ __ bne(&not_oddball);
+ __ LoadP(r3, FieldMemOperand(r3, Oddball::kToStringOffset));
+ __ Ret();
+ __ bind(&not_oddball);
+
+ __ push(r3); // Push argument.
+ __ TailCallRuntime(Runtime::kToString, 1, 1);
}
@@ -3655,39 +3470,37 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
void StringCompareStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- Counters* counters = isolate()->counters();
-
- // Stack frame on entry.
- // sp[0]: right string
- // sp[4]: left string
- __ LoadP(r3, MemOperand(sp)); // Load right in r3, left in r4.
- __ LoadP(r4, MemOperand(sp, kPointerSize));
+ // ----------- S t a t e -------------
+ // -- r4 : left
+ // -- r3 : right
+ // -- lr : return address
+ // -----------------------------------
+ __ AssertString(r4);
+ __ AssertString(r3);
Label not_same;
__ cmp(r3, r4);
__ bne(&not_same);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
__ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
- __ IncrementCounter(counters->string_compare_native(), 1, r4, r5);
- __ addi(sp, sp, Operand(2 * kPointerSize));
+ __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r4,
+ r5);
__ Ret();
__ bind(&not_same);
// Check that both objects are sequential one-byte strings.
+ Label runtime;
__ JumpIfNotBothSequentialOneByteStrings(r4, r3, r5, r6, &runtime);
- // Compare flat one-byte strings natively. Remove arguments from stack first.
- __ IncrementCounter(counters->string_compare_native(), 1, r5, r6);
- __ addi(sp, sp, Operand(2 * kPointerSize));
+ // Compare flat one-byte strings natively.
+ __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r5,
+ r6);
StringHelper::GenerateCompareFlatOneByteStrings(masm, r4, r3, r5, r6, r7);
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
+ __ Push(r4, r3);
__ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
@@ -3723,6 +3536,30 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
}
+void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
+ DCHECK_EQ(CompareICState::BOOLEAN, state());
+ Label miss;
+
+ __ CheckMap(r4, r5, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
+ __ CheckMap(r3, r6, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
+ if (op() != Token::EQ_STRICT && is_strong(strength())) {
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ } else {
+ if (!Token::IsEqualityOp(op())) {
+ __ LoadP(r4, FieldMemOperand(r4, Oddball::kToNumberOffset));
+ __ AssertSmi(r4);
+ __ LoadP(r3, FieldMemOperand(r3, Oddball::kToNumberOffset));
+ __ AssertSmi(r3);
+ }
+ __ sub(r3, r4, r3);
+ __ Ret();
+ }
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
void CompareICStub::GenerateSmis(MacroAssembler* masm) {
DCHECK(state() == CompareICState::SMI);
Label miss;
@@ -4033,8 +3870,20 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ cmp(r6, r7);
__ bne(&miss);
- __ sub(r3, r3, r4);
- __ Ret();
+ if (Token::IsEqualityOp(op())) {
+ __ sub(r3, r3, r4);
+ __ Ret();
+ } else if (is_strong(strength())) {
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ } else {
+ if (op() == Token::LT || op() == Token::LTE) {
+ __ LoadSmiLiteral(r5, Smi::FromInt(GREATER));
+ } else {
+ __ LoadSmiLiteral(r5, Smi::FromInt(LESS));
+ }
+ __ Push(r4, r3, r5);
+ __ TailCallRuntime(Runtime::kCompare, 3, 1);
+ }
__ bind(&miss);
GenerateMiss(masm);
@@ -4607,33 +4456,26 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
LoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
KeyedLoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, r5);
+ __ EmitLoadTypeFeedbackVector(r5);
CallICStub stub(isolate(), state());
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
-void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, r5);
- CallIC_ArrayStub stub(isolate(), state());
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
-}
-
-
void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
@@ -4642,11 +4484,10 @@ void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
}
-static void HandleArrayCases(MacroAssembler* masm, Register receiver,
- Register key, Register vector, Register slot,
- Register feedback, Register receiver_map,
- Register scratch1, Register scratch2,
- bool is_polymorphic, Label* miss) {
+static void HandleArrayCases(MacroAssembler* masm, Register feedback,
+ Register receiver_map, Register scratch1,
+ Register scratch2, bool is_polymorphic,
+ Label* miss) {
// feedback initially contains the feedback array
Label next_loop, prepare_next;
Label start_polymorphic;
@@ -4763,8 +4604,7 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
__ bne(&not_array);
- HandleArrayCases(masm, receiver, name, vector, slot, feedback, receiver_map,
- scratch1, r10, true, &miss);
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, r10, true, &miss);
__ bind(&not_array);
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
@@ -4824,8 +4664,7 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
// We have a polymorphic element handler.
Label polymorphic, try_poly_name;
__ bind(&polymorphic);
- HandleArrayCases(masm, receiver, key, vector, slot, feedback, receiver_map,
- scratch1, r10, true, &miss);
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, r10, true, &miss);
__ bind(&not_array);
// Is it generic?
@@ -4845,8 +4684,7 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ add(feedback, vector, r0);
__ LoadP(feedback,
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
- HandleArrayCases(masm, receiver, key, vector, slot, feedback, receiver_map,
- scratch1, r10, false, &miss);
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, r10, false, &miss);
__ bind(&miss);
KeyedLoadIC::GenerateMiss(masm);
@@ -4858,14 +4696,14 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
VectorStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
VectorKeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
@@ -4882,11 +4720,52 @@ void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Label miss;
+ Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // r4
+ Register key = VectorStoreICDescriptor::NameRegister(); // r5
+ Register vector = VectorStoreICDescriptor::VectorRegister(); // r6
+ Register slot = VectorStoreICDescriptor::SlotRegister(); // r7
+ DCHECK(VectorStoreICDescriptor::ValueRegister().is(r3)); // r3
+ Register feedback = r8;
+ Register receiver_map = r9;
+ Register scratch1 = r10;
+
+ __ SmiToPtrArrayOffset(r0, slot);
+ __ add(feedback, vector, r0);
+ __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
+
+ // Try to quickly handle the monomorphic case without knowing for sure
+ // if we have a weak cell in feedback. We do know it's safe to look
+ // at WeakCell::kValueOffset.
+ Label try_array, load_smi_map, compare_map;
+ Label not_array, miss;
+ HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
+ scratch1, &compare_map, &load_smi_map, &try_array);
+
+ // Is it a fixed array?
+ __ bind(&try_array);
+ __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+ __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
+ __ bne(&not_array);
+
+ Register scratch2 = r11;
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, true,
+ &miss);
+
+ __ bind(&not_array);
+ __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
+ __ bne(&miss);
+ Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::STORE_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, code_flags, receiver, key, feedback, receiver_map,
+ scratch1, scratch2);
- // TODO(mvstanton): Implement.
__ bind(&miss);
StoreIC::GenerateMiss(masm);
+
+ __ bind(&load_smi_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+ __ b(&compare_map);
}
@@ -4900,12 +4779,135 @@ void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
}
+static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
+ Register receiver_map, Register scratch1,
+ Register scratch2, Label* miss) {
+ // feedback initially contains the feedback array
+ Label next_loop, prepare_next;
+ Label start_polymorphic;
+ Label transition_call;
+
+ Register cached_map = scratch1;
+ Register too_far = scratch2;
+ Register pointer_reg = feedback;
+ __ LoadP(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
+
+ // +-----+------+------+-----+-----+-----+ ... ----+
+ // | map | len | wm0 | wt0 | h0 | wm1 | hN |
+ // +-----+------+------+-----+-----+ ----+ ... ----+
+ // 0 1 2 len-1
+ // ^ ^
+ // | |
+ // pointer_reg too_far
+ // aka feedback scratch2
+ // also need receiver_map
+ // use cached_map (scratch1) to look in the weak map values.
+ __ SmiToPtrArrayOffset(r0, too_far);
+ __ add(too_far, feedback, r0);
+ __ addi(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ addi(pointer_reg, feedback,
+ Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
+
+ __ bind(&next_loop);
+ __ LoadP(cached_map, MemOperand(pointer_reg));
+ __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
+ __ cmp(receiver_map, cached_map);
+ __ bne(&prepare_next);
+ // Is it a transitioning store?
+ __ LoadP(too_far, MemOperand(pointer_reg, kPointerSize));
+ __ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
+ __ bne(&transition_call);
+ __ LoadP(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
+ __ addi(ip, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(ip);
+
+ __ bind(&transition_call);
+ __ LoadP(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
+ __ JumpIfSmi(too_far, miss);
+
+ __ LoadP(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
+
+ // Load the map into the correct register.
+ DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister()));
+ __ mr(feedback, too_far);
+
+ __ addi(ip, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(ip);
+
+ __ bind(&prepare_next);
+ __ addi(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
+ __ cmpl(pointer_reg, too_far);
+ __ blt(&next_loop);
+
+ // We exhausted our array of map handler pairs.
+ __ b(miss);
+}
+
+
void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Label miss;
+ Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // r4
+ Register key = VectorStoreICDescriptor::NameRegister(); // r5
+ Register vector = VectorStoreICDescriptor::VectorRegister(); // r6
+ Register slot = VectorStoreICDescriptor::SlotRegister(); // r7
+ DCHECK(VectorStoreICDescriptor::ValueRegister().is(r3)); // r3
+ Register feedback = r8;
+ Register receiver_map = r9;
+ Register scratch1 = r10;
+
+ __ SmiToPtrArrayOffset(r0, slot);
+ __ add(feedback, vector, r0);
+ __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
+
+ // Try to quickly handle the monomorphic case without knowing for sure
+ // if we have a weak cell in feedback. We do know it's safe to look
+ // at WeakCell::kValueOffset.
+ Label try_array, load_smi_map, compare_map;
+ Label not_array, miss;
+ HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
+ scratch1, &compare_map, &load_smi_map, &try_array);
+
+ __ bind(&try_array);
+ // Is it a fixed array?
+ __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+ __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
+ __ bne(&not_array);
+
+ // We have a polymorphic element handler.
+ Label polymorphic, try_poly_name;
+ __ bind(&polymorphic);
+
+ Register scratch2 = r11;
+
+ HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
+ &miss);
+
+ __ bind(&not_array);
+ // Is it generic?
+ __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
+ __ bne(&try_poly_name);
+ Handle<Code> megamorphic_stub =
+ KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
+ __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
+
+ __ bind(&try_poly_name);
+ // We might have a name in feedback, and a fixed array in the next slot.
+ __ cmp(key, feedback);
+ __ bne(&miss);
+ // If the name comparison succeeded, we know we have a fixed array with
+ // at least one map/handler pair.
+ __ SmiToPtrArrayOffset(r0, slot);
+ __ add(feedback, vector, r0);
+ __ LoadP(feedback,
+ FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
+ &miss);
- // TODO(mvstanton): Implement.
__ bind(&miss);
KeyedStoreIC::GenerateMiss(masm);
+
+ __ bind(&load_smi_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+ __ b(&compare_map);
}
diff --git a/deps/v8/src/ppc/code-stubs-ppc.h b/deps/v8/src/ppc/code-stubs-ppc.h
index 85f3c9ca98..bc6c26b217 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.h
+++ b/deps/v8/src/ppc/code-stubs-ppc.h
@@ -147,8 +147,9 @@ class RecordWriteStub : public PlatformCodeStub {
break;
}
DCHECK(GetMode(stub) == mode);
- CpuFeatures::FlushICache(stub->instruction_start() + Assembler::kInstrSize,
- 2 * Assembler::kInstrSize);
+ Assembler::FlushICache(stub->GetIsolate(),
+ stub->instruction_start() + Assembler::kInstrSize,
+ 2 * Assembler::kInstrSize);
}
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
diff --git a/deps/v8/src/ppc/codegen-ppc.cc b/deps/v8/src/ppc/codegen-ppc.cc
index 2238695587..b313d11bb3 100644
--- a/deps/v8/src/ppc/codegen-ppc.cc
+++ b/deps/v8/src/ppc/codegen-ppc.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/ppc/codegen-ppc.h"
+
#if V8_TARGET_ARCH_PPC
#include "src/codegen.h"
@@ -60,7 +62,7 @@ UnaryMathFunction CreateExpFunction() {
DCHECK(!RelocInfo::RequiresRelocation(desc));
#endif
- CpuFeatures::FlushICache(buffer, actual_size);
+ Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
@@ -97,7 +99,7 @@ UnaryMathFunction CreateSqrtFunction() {
DCHECK(!RelocInfo::RequiresRelocation(desc));
#endif
- CpuFeatures::FlushICache(buffer, actual_size);
+ Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunction>(buffer);
#endif
@@ -658,7 +660,7 @@ void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Code::Age age,
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
- CpuFeatures::FlushICache(sequence, young_length);
+ Assembler::FlushICache(isolate, sequence, young_length);
} else {
// FIXED_SEQUENCE
Code* stub = GetCodeAgeStub(isolate, age, parity);
diff --git a/deps/v8/src/ppc/cpu-ppc.cc b/deps/v8/src/ppc/cpu-ppc.cc
index 079aa23403..a42fa53960 100644
--- a/deps/v8/src/ppc/cpu-ppc.cc
+++ b/deps/v8/src/ppc/cpu-ppc.cc
@@ -8,26 +8,12 @@
#include "src/assembler.h"
#include "src/macro-assembler.h"
-#include "src/simulator.h" // for cache flushing.
namespace v8 {
namespace internal {
void CpuFeatures::FlushICache(void* buffer, size_t size) {
- // Nothing to do flushing no instructions.
- if (size == 0) {
- return;
- }
-
-#if defined(USE_SIMULATOR)
- // Not generating PPC instructions for C-code. This means that we are
- // building an PPC emulator based target. We should notify the simulator
- // that the Icache was flushed.
- // None of this code ends up in the snapshot so there are no issues
- // around whether or not to generate the code when building snapshots.
- Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), buffer, size);
-#else
-
+#if !defined(USE_SIMULATOR)
if (CpuFeatures::IsSupported(INSTR_AND_DATA_CACHE_COHERENCY)) {
__asm__ __volatile__(
"sync \n"
@@ -54,7 +40,7 @@ void CpuFeatures::FlushICache(void* buffer, size_t size) {
: "r"(pointer));
}
-#endif // USE_SIMULATOR
+#endif // !USE_SIMULATOR
}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/frames-ppc.cc b/deps/v8/src/ppc/frames-ppc.cc
index 4437a167fc..e86ec681ec 100644
--- a/deps/v8/src/ppc/frames-ppc.cc
+++ b/deps/v8/src/ppc/frames-ppc.cc
@@ -10,6 +10,7 @@
#include "src/ppc/assembler-ppc.h"
#include "src/ppc/assembler-ppc-inl.h"
+#include "src/ppc/frames-ppc.h"
#include "src/ppc/macro-assembler-ppc.h"
namespace v8 {
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
index 9f8292f96b..c123e7c602 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -31,6 +31,11 @@ const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return r7; }
const Register VectorStoreICDescriptor::VectorRegister() { return r6; }
+const Register VectorStoreTransitionDescriptor::SlotRegister() { return r7; }
+const Register VectorStoreTransitionDescriptor::VectorRegister() { return r6; }
+const Register VectorStoreTransitionDescriptor::MapRegister() { return r8; }
+
+
const Register StoreTransitionDescriptor::MapRegister() { return r6; }
@@ -41,14 +46,23 @@ const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r5; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r3; }
-const Register InstanceofDescriptor::left() { return r3; }
-const Register InstanceofDescriptor::right() { return r4; }
+const Register InstanceOfDescriptor::LeftRegister() { return r4; }
+const Register InstanceOfDescriptor::RightRegister() { return r3; }
+
+
+const Register StringCompareDescriptor::LeftRegister() { return r4; }
+const Register StringCompareDescriptor::RightRegister() { return r3; }
const Register ArgumentsAccessReadDescriptor::index() { return r4; }
const Register ArgumentsAccessReadDescriptor::parameter_count() { return r3; }
+const Register ArgumentsAccessNewDescriptor::function() { return r4; }
+const Register ArgumentsAccessNewDescriptor::parameter_count() { return r5; }
+const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return r6; }
+
+
const Register ApiGetterDescriptor::function_address() { return r5; }
@@ -64,10 +78,10 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return r3; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r6; }
-void StoreTransitionDescriptor::InitializePlatformSpecific(
+void VectorStoreTransitionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
- MapRegister()};
+ Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
+ SlotRegister(), VectorRegister(), MapRegister()};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -94,6 +108,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
// static
+const Register ToStringDescriptor::ReceiverRegister() { return r3; }
+
+
+// static
const Register ToObjectDescriptor::ReceiverRegister() { return r3; }
@@ -181,6 +199,15 @@ void CallConstructDescriptor::InitializePlatformSpecific(
}
+void CallTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r3 : number of arguments
+ // r4 : the target to call
+ Register registers[] = {r4, r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r5, r4, r3};
@@ -362,6 +389,17 @@ void MathRoundVariantCallFromOptimizedCodeDescriptor::
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+
+
+void PushArgsAndCallDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ r3, // argument count (including receiver)
+ r5, // address of first argument
+ r4 // the target callable to be call
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/lithium-codegen-ppc.cc b/deps/v8/src/ppc/lithium-codegen-ppc.cc
index de416b3fdb..ad6d8db13d 100644
--- a/deps/v8/src/ppc/lithium-codegen-ppc.cc
+++ b/deps/v8/src/ppc/lithium-codegen-ppc.cc
@@ -5,12 +5,12 @@
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
-#include "src/cpu-profiler.h"
#include "src/hydrogen-osr.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/ppc/lithium-codegen-ppc.h"
#include "src/ppc/lithium-gap-resolver-ppc.h"
+#include "src/profiler/cpu-profiler.h"
namespace v8 {
namespace internal {
@@ -120,8 +120,7 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (is_sloppy(info_->language_mode()) && info_->MayUseThis() &&
- !info_->is_native() && info_->scope()->has_this_declaration()) {
+ if (info()->MustReplaceUndefinedReceiverWithGlobalProxy()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
__ LoadP(r5, MemOperand(sp, receiver_offset));
@@ -177,16 +176,27 @@ bool LCodeGen::GeneratePrologue() {
if (info()->saves_caller_doubles()) {
SaveCallerDoubles();
}
+ return !is_aborted();
+}
+
+
+void LCodeGen::DoPrologue(LPrologue* instr) {
+ Comment(";;; Prologue begin");
// Possibly allocate a local context.
- int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
+ if (info()->scope()->num_heap_slots() > 0) {
Comment(";;; Allocate local context");
bool need_write_barrier = true;
// Argument to NewContext is the function, which is in r4.
- DCHECK(!info()->scope()->is_script_scope());
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), heap_slots);
+ int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
+ if (info()->scope()->is_script_scope()) {
+ __ push(r4);
+ __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
+ __ CallRuntime(Runtime::kNewScriptContext, 2);
+ deopt_mode = Safepoint::kLazyDeopt;
+ } else if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
__ CallStub(&stub);
// Result of FastNewContextStub is always in new space.
need_write_barrier = false;
@@ -194,7 +204,8 @@ bool LCodeGen::GeneratePrologue() {
__ push(r4);
__ CallRuntime(Runtime::kNewFunctionContext, 1);
}
- RecordSafepoint(Safepoint::kNoLazyDeopt);
+ RecordSafepoint(deopt_mode);
+
// Context is returned in both r3 and cp. It replaces the context
// passed to us. It's saved in the stack and kept live in cp.
__ mr(cp, r3);
@@ -227,13 +238,7 @@ bool LCodeGen::GeneratePrologue() {
Comment(";;; End allocate local context");
}
- // Trace the call.
- if (FLAG_trace && info()->IsOptimizing()) {
- // We have not executed any compiled code yet, so cp still holds the
- // incoming context.
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
- return !is_aborted();
+ Comment(";;; Prologue end");
}
@@ -750,7 +755,6 @@ void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
DCHECK(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
- DCHECK(info()->IsOptimizing() || info()->IsStub());
Address entry =
Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
if (entry == NULL) {
@@ -980,11 +984,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::StringCompare: {
- StringCompareStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
default:
UNREACHABLE();
}
@@ -2180,6 +2179,13 @@ void LCodeGen::EmitBranch(InstrType instr, Condition cond, CRegister cr) {
template <class InstrType>
+void LCodeGen::EmitTrueBranch(InstrType instr, Condition cond, CRegister cr) {
+ int true_block = instr->TrueDestination(chunk_);
+ __ b(cond, chunk_->GetAssemblyLabel(true_block), cr);
+}
+
+
+template <class InstrType>
void LCodeGen::EmitFalseBranch(InstrType instr, Condition cond, CRegister cr) {
int false_block = instr->FalseDestination(chunk_);
__ b(cond, chunk_->GetAssemblyLabel(false_block), cr);
@@ -2530,42 +2536,6 @@ void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
}
-Condition LCodeGen::EmitIsObject(Register input, Register temp1,
- Label* is_not_object, Label* is_object) {
- Register temp2 = scratch0();
- __ JumpIfSmi(input, is_not_object);
-
- __ LoadRoot(temp2, Heap::kNullValueRootIndex);
- __ cmp(input, temp2);
- __ beq(is_object);
-
- // Load map.
- __ LoadP(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined.
- __ lbz(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
- __ TestBit(temp2, Map::kIsUndetectable, r0);
- __ bne(is_not_object, cr0);
-
- // Load instance type and check that it is in object type range.
- __ lbz(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
- __ cmpi(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ blt(is_not_object);
- __ cmpi(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
- return le;
-}
-
-
-void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp1 = ToRegister(instr->temp());
-
- Condition true_cond = EmitIsObject(reg, temp1, instr->FalseLabel(chunk_),
- instr->TrueLabel(chunk_));
-
- EmitBranch(instr, true_cond);
-}
-
-
Condition LCodeGen::EmitIsString(Register input, Register temp1,
Label* is_not_string,
SmiCheck check_needed = INLINE_SMI_CHECK) {
@@ -2635,17 +2605,14 @@ static Condition ComputeCompareCondition(Token::Value op) {
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- Token::Value op = instr->op();
+ DCHECK(ToRegister(instr->left()).is(r4));
+ DCHECK(ToRegister(instr->right()).is(r3));
- Handle<Code> ic =
- CodeFactory::CompareIC(isolate(), op, Strength::WEAK).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- // This instruction also signals no smi code inlined
+ Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+ CallCode(code, RelocInfo::CODE_TARGET, instr);
__ cmpi(r3, Operand::Zero());
- Condition condition = ComputeCompareCondition(op);
-
- EmitBranch(instr, condition);
+ EmitBranch(instr, ComputeCompareCondition(instr->op()));
}
@@ -2795,157 +2762,42 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->left()).is(r3)); // Object is in r3.
- DCHECK(ToRegister(instr->right()).is(r4)); // Function is in r4.
-
- InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
+ DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
+ DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
+ DCHECK(ToRegister(instr->result()).is(r3));
+ InstanceOfStub stub(isolate());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ mov(r4, Operand(factory()->true_value()));
- __ mov(r5, Operand(factory()->false_value()));
- __ cmpi(r3, Operand::Zero());
- __ isel(eq, r3, r4, r5);
- } else {
- Label equal, done;
- __ cmpi(r3, Operand::Zero());
- __ beq(&equal);
- __ mov(r3, Operand(factory()->false_value()));
- __ b(&done);
-
- __ bind(&equal);
- __ mov(r3, Operand(factory()->true_value()));
- __ bind(&done);
- }
}
-void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal final : public LDeferredCode {
- public:
- DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
- LInstanceOfKnownGlobal* instr)
- : LDeferredCode(codegen), instr_(instr) {}
- void Generate() override {
- codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_,
- &load_bool_);
- }
- LInstruction* instr() override { return instr_; }
- Label* map_check() { return &map_check_; }
- Label* load_bool() { return &load_bool_; }
-
- private:
- LInstanceOfKnownGlobal* instr_;
- Label map_check_;
- Label load_bool_;
- };
-
- DeferredInstanceOfKnownGlobal* deferred;
- deferred = new (zone()) DeferredInstanceOfKnownGlobal(this, instr);
+void LCodeGen::DoHasInPrototypeChainAndBranch(
+ LHasInPrototypeChainAndBranch* instr) {
+ Register const object = ToRegister(instr->object());
+ Register const object_map = scratch0();
+ Register const object_prototype = object_map;
+ Register const prototype = ToRegister(instr->prototype());
- Label done, false_result;
- Register object = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
- Register result = ToRegister(instr->result());
-
- // A Smi is not instance of anything.
- __ JumpIfSmi(object, &false_result);
-
- // This is the inlined call site instanceof cache. The two occurences of the
- // hole value will be patched to the last map/result pair generated by the
- // instanceof stub.
- Label cache_miss;
- Register map = temp;
- __ LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
- {
- // Block trampoline emission to ensure the positions of instructions are
- // as expected by the patcher. See InstanceofStub::Generate().
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- __ bind(deferred->map_check()); // Label for calculating code patching.
- // We use Factory::the_hole_value() on purpose instead of loading from the
- // root array to force relocation to be able to later patch with
- // the cached map.
- Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
- __ mov(ip, Operand(cell));
- __ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset));
- __ cmp(map, ip);
- __ bc_short(ne, &cache_miss);
- __ bind(deferred->load_bool()); // Label for calculating code patching.
- // We use Factory::the_hole_value() on purpose instead of loading from the
- // root array to force relocation to be able to later patch
- // with true or false.
- __ mov(result, Operand(factory()->the_hole_value()));
+ // The {object} must be a spec object. It's sufficient to know that {object}
+ // is not a smi, since all other non-spec objects have {null} prototypes and
+ // will be ruled out below.
+ if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
+ __ TestIfSmi(object, r0);
+ EmitFalseBranch(instr, eq, cr0);
}
- __ b(&done);
-
- // The inlined call site cache did not match. Check null and string before
- // calling the deferred code.
- __ bind(&cache_miss);
- // Null is not instance of anything.
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(object, ip);
- __ beq(&false_result);
-
- // String values is not instance of anything.
- Condition is_string = masm_->IsObjectStringType(object, temp);
- __ b(is_string, &false_result, cr0);
-
- // Go to the deferred code.
- __ b(deferred->entry());
-
- __ bind(&false_result);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
-
- // Here result has either true or false. Deferred code also produces true or
- // false object.
- __ bind(deferred->exit());
- __ bind(&done);
-}
-
-void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check,
- Label* bool_load) {
- InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
- flags = static_cast<InstanceofStub::Flags>(flags |
- InstanceofStub::kArgsInRegisters);
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kCallSiteInlineCheck);
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kReturnTrueFalseObject);
- InstanceofStub stub(isolate(), flags);
-
- PushSafepointRegistersScope scope(this);
- LoadContextFromDeferred(instr->context());
-
- __ Move(InstanceofStub::right(), instr->function());
- {
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- Handle<Code> code = stub.GetCode();
- // Include instructions below in delta: bitwise_mov32 + li + call
- int additional_delta = 3 * Instruction::kInstrSize + masm_->CallSize(code);
- // The labels must be already bound since the code has predictabel size up
- // to the call instruction.
- DCHECK(map_check->is_bound());
- DCHECK(bool_load->is_bound());
- int map_check_delta =
- masm_->InstructionsGeneratedSince(map_check) * Instruction::kInstrSize;
- int bool_load_delta =
- masm_->InstructionsGeneratedSince(bool_load) * Instruction::kInstrSize;
- // r8 is the delta from our callee's lr to the location of the map check.
- __ bitwise_mov32(r8, map_check_delta + additional_delta);
- // r9 is the delta from map check to bool load.
- __ li(r9, Operand(map_check_delta - bool_load_delta));
- CallCodeGeneric(code, RelocInfo::CODE_TARGET, instr,
- RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- DCHECK_EQ((map_check_delta + additional_delta) / Instruction::kInstrSize,
- masm_->InstructionsGeneratedSince(map_check));
- }
- LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
- // Put the result value (r3) into the result register slot and
- // restore all registers.
- __ StoreToSafepointRegisterSlot(r3, ToRegister(instr->result()));
+ // Loop through the {object}s prototype chain looking for the {prototype}.
+ __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
+ Label loop;
+ __ bind(&loop);
+ __ LoadP(object_prototype,
+ FieldMemOperand(object_map, Map::kPrototypeOffset));
+ __ cmp(object_prototype, prototype);
+ EmitTrueBranch(instr, eq);
+ __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
+ EmitFalseBranch(instr, eq);
+ __ LoadP(object_map,
+ FieldMemOperand(object_prototype, HeapObject::kMapOffset));
+ __ b(&loop);
}
@@ -3793,11 +3645,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
// Change context.
__ LoadP(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
- // Set r3 to arguments count if adaption is not needed. Assumes that r3
- // is available to write to at this point.
- if (dont_adapt_arguments) {
- __ mov(r3, Operand(arity));
- }
+ // Always initialize r3 to the number of actual arguments.
+ __ mov(r3, Operand(arity));
bool is_self_call = function.is_identical_to(info()->closure());
@@ -4210,9 +4059,7 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
DCHECK(ToRegister(instr->function()).is(r4));
DCHECK(ToRegister(instr->result()).is(r3));
- if (instr->hydrogen()->pass_argument_count()) {
- __ mov(r3, Operand(instr->arity()));
- }
+ __ mov(r3, Operand(instr->arity()));
// Change context.
__ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
@@ -5848,7 +5695,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
// r3 = regexp literal clone.
// r5 and r7-r9 are used as temporaries.
int literal_offset =
- FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
+ LiteralsArray::OffsetOfLiteralAt(instr->hydrogen()->literal_index());
__ Move(r10, instr->hydrogen()->literals());
__ LoadP(r4, FieldMemOperand(r10, literal_offset));
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
@@ -5883,26 +5730,6 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
}
-void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- bool pretenure = instr->hydrogen()->pretenure();
- if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
- instr->hydrogen()->kind());
- __ mov(r5, Operand(instr->hydrogen()->shared_info()));
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- } else {
- __ mov(r5, Operand(instr->hydrogen()->shared_info()));
- __ mov(r4, Operand(pretenure ? factory()->true_value()
- : factory()->false_value()));
- __ Push(cp, r5, r4);
- CallRuntime(Runtime::kNewClosure, 3, instr);
- }
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->value()).is(r6));
DCHECK(ToRegister(instr->result()).is(r3));
@@ -5969,24 +5796,25 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
final_branch_condition = ne;
} else if (String::Equals(type_name, factory->function_string())) {
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- Register type_reg = scratch;
__ JumpIfSmi(input, false_label);
- __ CompareObjectType(input, scratch, type_reg, JS_FUNCTION_TYPE);
- __ beq(true_label);
- __ cmpi(type_reg, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ andi(scratch, scratch,
+ Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
+ __ cmpi(scratch, Operand(1 << Map::kIsCallable));
final_branch_condition = eq;
} else if (String::Equals(type_name, factory->object_string())) {
- Register map = scratch;
__ JumpIfSmi(input, false_label);
__ CompareRoot(input, Heap::kNullValueRootIndex);
__ beq(true_label);
- __ CheckObjectTypeRange(input, map, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
- LAST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label);
- // Check for undetectable objects => false.
- __ lbz(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
- __ ExtractBit(r0, scratch, Map::kIsUndetectable);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CompareObjectType(input, scratch, ip, FIRST_SPEC_OBJECT_TYPE);
+ __ blt(false_label);
+ // Check for callable or undetectable objects => false.
+ __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ andi(r0, scratch,
+ Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
__ cmpi(r0, Operand::Zero());
final_branch_condition = eq;
@@ -6037,7 +5865,7 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
- if (!info()->IsStub()) {
+ if (info()->ShouldEnsureSpaceForLazyDeopt()) {
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
diff --git a/deps/v8/src/ppc/lithium-codegen-ppc.h b/deps/v8/src/ppc/lithium-codegen-ppc.h
index 392bbf5872..117dc574d5 100644
--- a/deps/v8/src/ppc/lithium-codegen-ppc.h
+++ b/deps/v8/src/ppc/lithium-codegen-ppc.h
@@ -111,8 +111,6 @@ class LCodeGen : public LCodeGenBase {
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
- void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check, Label* bool_load);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, Register result,
Register object, Register index);
@@ -243,6 +241,8 @@ class LCodeGen : public LCodeGenBase {
template <class InstrType>
void EmitBranch(InstrType instr, Condition condition, CRegister cr = cr7);
template <class InstrType>
+ void EmitTrueBranch(InstrType instr, Condition condition, CRegister cr = cr7);
+ template <class InstrType>
void EmitFalseBranch(InstrType instr, Condition condition,
CRegister cr = cr7);
void EmitNumberUntagD(LNumberUntagD* instr, Register input,
@@ -254,12 +254,6 @@ class LCodeGen : public LCodeGenBase {
Condition EmitTypeofIs(Label* true_label, Label* false_label, Register input,
Handle<String> type_name);
- // Emits optimized code for %_IsObject(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsObject(Register input, Register temp1, Label* is_not_object,
- Label* is_object);
-
// Emits optimized code for %_IsString(x). Preserves input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
diff --git a/deps/v8/src/ppc/lithium-ppc.cc b/deps/v8/src/ppc/lithium-ppc.cc
index 6841db5d32..767c771fb3 100644
--- a/deps/v8/src/ppc/lithium-ppc.cc
+++ b/deps/v8/src/ppc/lithium-ppc.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/ppc/lithium-ppc.h"
+
#include <sstream>
#include "src/hydrogen-osr.h"
@@ -183,13 +185,6 @@ void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
}
-void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_object(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_string(");
value()->PrintTo(stream);
@@ -935,28 +930,25 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
}
chunk_->AddInstruction(instr, current_block_);
- if (instr->IsCall()) {
+ if (instr->IsCall() || instr->IsPrologue()) {
HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
- LInstruction* instruction_needing_environment = NULL;
if (hydrogen_val->HasObservableSideEffects()) {
HSimulate* sim = HSimulate::cast(hydrogen_val->next());
- instruction_needing_environment = instr;
sim->ReplayEnvironment(current_block_->last_environment());
hydrogen_value_for_lazy_bailout = sim;
}
LInstruction* bailout = AssignEnvironment(new (zone()) LLazyBailout());
bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
chunk_->AddInstruction(bailout, current_block_);
- if (instruction_needing_environment != NULL) {
- // Store the lazy deopt environment with the instruction if needed.
- // Right now it is only used for LInstanceOfKnownGlobal.
- instruction_needing_environment->SetDeferredLazyDeoptimizationEnvironment(
- bailout->environment());
- }
}
}
+LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
+ return new (zone()) LPrologue();
+}
+
+
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
return new (zone()) LGoto(instr->FirstSuccessor());
}
@@ -1008,19 +1000,21 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+ LOperand* left =
+ UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
+ LOperand* right =
+ UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
LOperand* context = UseFixed(instr->context(), cp);
- LInstanceOf* result = new (zone()) LInstanceOf(
- context, UseFixed(instr->left(), r3), UseFixed(instr->right(), r4));
+ LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
return MarkAsCall(DefineFixed(result, r3), instr);
}
-LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
- HInstanceOfKnownGlobal* instr) {
- LInstanceOfKnownGlobal* result = new (zone())
- LInstanceOfKnownGlobal(UseFixed(instr->context(), cp),
- UseFixed(instr->left(), r3), FixedTemp(r7));
- return MarkAsCall(DefineFixed(result, r3), instr);
+LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
+ HHasInPrototypeChainAndBranch* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* prototype = UseRegister(instr->prototype());
+ return new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
}
@@ -1745,14 +1739,6 @@ LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
}
-LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- return new (zone()) LIsObjectAndBranch(value, temp);
-}
-
-
LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
DCHECK(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
@@ -2460,13 +2446,6 @@ LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
}
-LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(DefineFixed(new (zone()) LFunctionLiteral(context), r3),
- instr);
-}
-
-
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
DCHECK(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
diff --git a/deps/v8/src/ppc/lithium-ppc.h b/deps/v8/src/ppc/lithium-ppc.h
index 99ff9fedb7..e862a11f63 100644
--- a/deps/v8/src/ppc/lithium-ppc.h
+++ b/deps/v8/src/ppc/lithium-ppc.h
@@ -79,19 +79,17 @@ class LCodeGen;
V(FlooringDivI) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
- V(FunctionLiteral) \
V(GetCachedArrayIndex) \
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
+ V(HasInPrototypeChainAndBranch) \
V(HasInstanceTypeAndBranch) \
V(InnerAllocatedObject) \
V(InstanceOf) \
- V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
- V(IsObjectAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@@ -132,6 +130,7 @@ class LCodeGen;
V(OsrEntry) \
V(Parameter) \
V(Power) \
+ V(Prologue) \
V(PushArgument) \
V(RegExpLiteral) \
V(Return) \
@@ -231,8 +230,6 @@ class LInstruction : public ZoneObject {
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {}
-
void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
bool IsCall() const { return IsCallBits::decode(bit_field_); }
@@ -387,6 +384,12 @@ class LGoto final : public LTemplateInstruction<0, 0, 0> {
};
+class LPrologue final : public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue")
+};
+
+
class LLazyBailout final : public LTemplateInstruction<0, 0, 0> {
public:
LLazyBailout() : gap_instructions_size_(0) {}
@@ -973,23 +976,6 @@ class LCompareMinusZeroAndBranch final : public LControlInstruction<1, 1> {
};
-class LIsObjectAndBranch final : public LControlInstruction<1, 1> {
- public:
- LIsObjectAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
class LIsStringAndBranch final : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1147,41 +1133,27 @@ class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
inputs_[2] = right;
}
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
+ LOperand* context() const { return inputs_[0]; }
+ LOperand* left() const { return inputs_[1]; }
+ LOperand* right() const { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
};
-class LInstanceOfKnownGlobal final : public LTemplateInstruction<1, 2, 1> {
+class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> {
public:
- LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
- inputs_[0] = context;
- inputs_[1] = value;
- temps_[0] = temp;
+ LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) {
+ inputs_[0] = object;
+ inputs_[1] = prototype;
}
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
- "instance-of-known-global")
- DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
-
- Handle<JSFunction> function() const { return hydrogen()->function(); }
- LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
- return lazy_deopt_env_;
- }
- virtual void SetDeferredLazyDeoptimizationEnvironment(
- LEnvironment* env) override {
- lazy_deopt_env_ = env;
- }
+ LOperand* object() const { return inputs_[0]; }
+ LOperand* prototype() const { return inputs_[1]; }
- private:
- LEnvironment* lazy_deopt_env_;
+ DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch,
+ "has-in-prototype-chain-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch)
};
@@ -2487,17 +2459,6 @@ class LRegExpLiteral final : public LTemplateInstruction<1, 1, 0> {
};
-class LFunctionLiteral final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LFunctionLiteral(LOperand* context) { inputs_[0] = context; }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
- DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
-};
-
-
class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) { inputs_[0] = value; }
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index 676cb2c60e..e973471572 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -11,8 +11,8 @@
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
-#include "src/cpu-profiler.h"
#include "src/debug/debug.h"
+#include "src/ppc/macro-assembler-ppc.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -753,6 +753,14 @@ void MacroAssembler::Prologue(bool code_pre_aging, int prologue_offset) {
}
+void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
+ LoadP(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ LoadP(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
+ LoadP(vector,
+ FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+}
+
+
void MacroAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
@@ -987,10 +995,10 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
if (expected.is_immediate()) {
DCHECK(actual.is_immediate());
+ mov(r3, Operand(actual.immediate()));
if (expected.immediate() == actual.immediate()) {
definitely_matches = true;
} else {
- mov(r3, Operand(actual.immediate()));
const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
if (expected.immediate() == sentinel) {
// Don't worry about adapting arguments for builtins that
@@ -1005,9 +1013,9 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
} else {
if (actual.is_immediate()) {
+ mov(r3, Operand(actual.immediate()));
cmpi(expected.reg(), Operand(actual.immediate()));
beq(&regular_invoke);
- mov(r3, Operand(actual.immediate()));
} else {
cmp(expected.reg(), actual.reg());
beq(&regular_invoke);
@@ -1122,23 +1130,6 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
}
-void MacroAssembler::IsObjectJSObjectType(Register heap_object, Register map,
- Register scratch, Label* fail) {
- LoadP(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
- IsInstanceJSObjectType(map, scratch, fail);
-}
-
-
-void MacroAssembler::IsInstanceJSObjectType(Register map, Register scratch,
- Label* fail) {
- lbz(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- cmpi(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- blt(fail);
- cmpi(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
- bgt(fail);
-}
-
-
void MacroAssembler::IsObjectJSStringType(Register object, Register scratch,
Label* fail) {
DCHECK(kNotStringTag != 0);
@@ -1701,20 +1692,6 @@ void MacroAssembler::CompareObjectType(Register object, Register map,
}
-void MacroAssembler::CheckObjectTypeRange(Register object, Register map,
- InstanceType min_type,
- InstanceType max_type,
- Label* false_label) {
- STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
- STATIC_ASSERT(LAST_TYPE < 256);
- LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
- lbz(ip, FieldMemOperand(map, Map::kInstanceTypeOffset));
- subi(ip, ip, Operand(min_type));
- cmpli(ip, Operand(max_type - min_type));
- bgt(false_label);
-}
-
-
void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
InstanceType type) {
STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
@@ -1979,36 +1956,7 @@ void MacroAssembler::GetMapConstructor(Register result, Register map,
void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
- Register scratch, Label* miss,
- bool miss_on_bound_function) {
- Label non_instance;
- if (miss_on_bound_function) {
- // Check that the receiver isn't a smi.
- JumpIfSmi(function, miss);
-
- // Check that the function really is a function. Load map into result reg.
- CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
- bne(miss);
-
- LoadP(scratch,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- lwz(scratch,
- FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
- TestBit(scratch,
-#if V8_TARGET_ARCH_PPC64
- SharedFunctionInfo::kBoundFunction,
-#else
- SharedFunctionInfo::kBoundFunction + kSmiTagSize,
-#endif
- r0);
- bne(miss, cr0);
-
- // Make sure that the function has an instance prototype.
- lbz(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
- andi(r0, scratch, Operand(1 << Map::kHasNonInstancePrototype));
- bne(&non_instance, cr0);
- }
-
+ Register scratch, Label* miss) {
// Get the prototype or initial map from the function.
LoadP(result,
FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
@@ -2028,15 +1976,6 @@ void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
// Get the prototype from the initial map.
LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
- if (miss_on_bound_function) {
- b(&done);
-
- // Non-instance prototype: Fetch prototype from constructor field
- // in initial map.
- bind(&non_instance);
- GetMapConstructor(result, result, scratch, ip);
- }
-
// All done.
bind(&done);
}
@@ -2312,12 +2251,12 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
}
-void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag,
+void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a builtin without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- GetBuiltinEntry(ip, id);
+ GetBuiltinEntry(ip, native_context_index);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(ip));
CallJSEntry(ip);
@@ -2330,21 +2269,20 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag,
void MacroAssembler::GetBuiltinFunction(Register target,
- Builtins::JavaScript id) {
+ int native_context_index) {
// Load the builtins object into target register.
LoadP(target,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- LoadP(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
+ LoadP(target, FieldMemOperand(target, GlobalObject::kNativeContextOffset));
// Load the JavaScript builtin function from the builtins object.
- LoadP(target,
- FieldMemOperand(target, JSBuiltinsObject::OffsetOfFunctionWithId(id)),
- r0);
+ LoadP(target, ContextOperand(target, native_context_index), r0);
}
-void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+void MacroAssembler::GetBuiltinEntry(Register target,
+ int native_context_index) {
DCHECK(!target.is(r4));
- GetBuiltinFunction(r4, id);
+ GetBuiltinFunction(r4, native_context_index);
// Load the code entry point from the builtins object.
LoadP(target, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
}
@@ -2468,6 +2406,12 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
+void MacroAssembler::LoadGlobalProxy(Register dst) {
+ LoadP(dst, GlobalObjectOperand());
+ LoadP(dst, FieldMemOperand(dst, GlobalObject::kGlobalProxyOffset));
+}
+
+
void MacroAssembler::LoadTransitionedArrayMapConditional(
ElementsKind expected_kind, ElementsKind transitioned_kind,
Register map_in_out, Register scratch, Label* no_map_match) {
@@ -2644,6 +2588,19 @@ void MacroAssembler::AssertName(Register object) {
}
+void MacroAssembler::AssertFunction(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ TestIfSmi(object, r0);
+ Check(ne, kOperandIsASmiAndNotAFunction, cr0);
+ push(object);
+ CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
+ pop(object);
+ Check(eq, kOperandIsNotAFunction);
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
if (emit_debug_code()) {
@@ -2678,78 +2635,6 @@ void MacroAssembler::JumpIfNotHeapNumber(Register object,
}
-void MacroAssembler::LookupNumberStringCache(Register object, Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch3;
-
- // Load the number string cache.
- LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- LoadP(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
- // Divide length by two (length is a smi).
- ShiftRightArithImm(mask, mask, kSmiTagSize + kSmiShiftSize + 1);
- subi(mask, mask, Operand(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Label is_smi;
- Label load_result_from_cache;
- JumpIfSmi(object, &is_smi);
- CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, not_found,
- DONT_DO_SMI_CHECK);
-
- STATIC_ASSERT(8 == kDoubleSize);
- lwz(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
- lwz(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
- xor_(scratch1, scratch1, scratch2);
- and_(scratch1, scratch1, mask);
-
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- ShiftLeftImm(scratch1, scratch1, Operand(kPointerSizeLog2 + 1));
- add(scratch1, number_string_cache, scratch1);
-
- Register probe = mask;
- LoadP(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- JumpIfSmi(probe, not_found);
- lfd(d0, FieldMemOperand(object, HeapNumber::kValueOffset));
- lfd(d1, FieldMemOperand(probe, HeapNumber::kValueOffset));
- fcmpu(d0, d1);
- bne(not_found); // The cache did not contain this value.
- b(&load_result_from_cache);
-
- bind(&is_smi);
- Register scratch = scratch1;
- SmiUntag(scratch, object);
- and_(scratch, mask, scratch);
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- ShiftLeftImm(scratch, scratch, Operand(kPointerSizeLog2 + 1));
- add(scratch, number_string_cache, scratch);
-
- // Check if the entry is the smi we are looking for.
- LoadP(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
- cmp(object, probe);
- bne(not_found);
-
- // Get the result from the cache.
- bind(&load_result_from_cache);
- LoadP(result,
- FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
- IncrementCounter(isolate()->counters()->number_to_string_native(), 1,
- scratch1, scratch2);
-}
-
-
void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
Register first, Register second, Register scratch1, Register scratch2,
Label* failure) {
@@ -3244,175 +3129,6 @@ void MacroAssembler::DecodeConstantPoolOffset(Register result,
}
-void MacroAssembler::SetRelocatedValue(Register location, Register scratch,
- Register new_value) {
- lwz(scratch, MemOperand(location));
-
- if (FLAG_enable_embedded_constant_pool) {
- if (emit_debug_code()) {
- // Check that the instruction sequence is a load from the constant pool
- ExtractBitMask(scratch, scratch, 0x1f * B16);
- cmpi(scratch, Operand(kConstantPoolRegister.code()));
- Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
- // Scratch was clobbered. Restore it.
- lwz(scratch, MemOperand(location));
- }
- DecodeConstantPoolOffset(scratch, location);
- StorePX(new_value, MemOperand(kConstantPoolRegister, scratch));
- return;
- }
-
- // This code assumes a FIXED_SEQUENCE for lis/ori
-
- // At this point scratch is a lis instruction.
- if (emit_debug_code()) {
- And(scratch, scratch, Operand(kOpcodeMask | (0x1f * B16)));
- Cmpi(scratch, Operand(ADDIS), r0);
- Check(eq, kTheInstructionToPatchShouldBeALis);
- lwz(scratch, MemOperand(location));
- }
-
-// insert new high word into lis instruction
-#if V8_TARGET_ARCH_PPC64
- srdi(ip, new_value, Operand(32));
- rlwimi(scratch, ip, 16, 16, 31);
-#else
- rlwimi(scratch, new_value, 16, 16, 31);
-#endif
-
- stw(scratch, MemOperand(location));
-
- lwz(scratch, MemOperand(location, kInstrSize));
- // scratch is now ori.
- if (emit_debug_code()) {
- And(scratch, scratch, Operand(kOpcodeMask));
- Cmpi(scratch, Operand(ORI), r0);
- Check(eq, kTheInstructionShouldBeAnOri);
- lwz(scratch, MemOperand(location, kInstrSize));
- }
-
-// insert new low word into ori instruction
-#if V8_TARGET_ARCH_PPC64
- rlwimi(scratch, ip, 0, 16, 31);
-#else
- rlwimi(scratch, new_value, 0, 16, 31);
-#endif
- stw(scratch, MemOperand(location, kInstrSize));
-
-#if V8_TARGET_ARCH_PPC64
- if (emit_debug_code()) {
- lwz(scratch, MemOperand(location, 2 * kInstrSize));
- // scratch is now sldi.
- And(scratch, scratch, Operand(kOpcodeMask | kExt5OpcodeMask));
- Cmpi(scratch, Operand(EXT5 | RLDICR), r0);
- Check(eq, kTheInstructionShouldBeASldi);
- }
-
- lwz(scratch, MemOperand(location, 3 * kInstrSize));
- // scratch is now ori.
- if (emit_debug_code()) {
- And(scratch, scratch, Operand(kOpcodeMask));
- Cmpi(scratch, Operand(ORIS), r0);
- Check(eq, kTheInstructionShouldBeAnOris);
- lwz(scratch, MemOperand(location, 3 * kInstrSize));
- }
-
- rlwimi(scratch, new_value, 16, 16, 31);
- stw(scratch, MemOperand(location, 3 * kInstrSize));
-
- lwz(scratch, MemOperand(location, 4 * kInstrSize));
- // scratch is now ori.
- if (emit_debug_code()) {
- And(scratch, scratch, Operand(kOpcodeMask));
- Cmpi(scratch, Operand(ORI), r0);
- Check(eq, kTheInstructionShouldBeAnOri);
- lwz(scratch, MemOperand(location, 4 * kInstrSize));
- }
- rlwimi(scratch, new_value, 0, 16, 31);
- stw(scratch, MemOperand(location, 4 * kInstrSize));
-#endif
-
-// Update the I-cache so the new lis and addic can be executed.
-#if V8_TARGET_ARCH_PPC64
- FlushICache(location, 5 * kInstrSize, scratch);
-#else
- FlushICache(location, 2 * kInstrSize, scratch);
-#endif
-}
-
-
-void MacroAssembler::GetRelocatedValue(Register location, Register result,
- Register scratch) {
- lwz(result, MemOperand(location));
-
- if (FLAG_enable_embedded_constant_pool) {
- if (emit_debug_code()) {
- // Check that the instruction sequence is a load from the constant pool
- ExtractBitMask(result, result, 0x1f * B16);
- cmpi(result, Operand(kConstantPoolRegister.code()));
- Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
- lwz(result, MemOperand(location));
- }
- DecodeConstantPoolOffset(result, location);
- LoadPX(result, MemOperand(kConstantPoolRegister, result));
- return;
- }
-
- // This code assumes a FIXED_SEQUENCE for lis/ori
- if (emit_debug_code()) {
- And(result, result, Operand(kOpcodeMask | (0x1f * B16)));
- Cmpi(result, Operand(ADDIS), r0);
- Check(eq, kTheInstructionShouldBeALis);
- lwz(result, MemOperand(location));
- }
-
- // result now holds a lis instruction. Extract the immediate.
- slwi(result, result, Operand(16));
-
- lwz(scratch, MemOperand(location, kInstrSize));
- if (emit_debug_code()) {
- And(scratch, scratch, Operand(kOpcodeMask));
- Cmpi(scratch, Operand(ORI), r0);
- Check(eq, kTheInstructionShouldBeAnOri);
- lwz(scratch, MemOperand(location, kInstrSize));
- }
- // Copy the low 16bits from ori instruction into result
- rlwimi(result, scratch, 0, 16, 31);
-
-#if V8_TARGET_ARCH_PPC64
- if (emit_debug_code()) {
- lwz(scratch, MemOperand(location, 2 * kInstrSize));
- // scratch is now sldi.
- And(scratch, scratch, Operand(kOpcodeMask | kExt5OpcodeMask));
- Cmpi(scratch, Operand(EXT5 | RLDICR), r0);
- Check(eq, kTheInstructionShouldBeASldi);
- }
-
- lwz(scratch, MemOperand(location, 3 * kInstrSize));
- // scratch is now ori.
- if (emit_debug_code()) {
- And(scratch, scratch, Operand(kOpcodeMask));
- Cmpi(scratch, Operand(ORIS), r0);
- Check(eq, kTheInstructionShouldBeAnOris);
- lwz(scratch, MemOperand(location, 3 * kInstrSize));
- }
- sldi(result, result, Operand(16));
- rldimi(result, scratch, 0, 48);
-
- lwz(scratch, MemOperand(location, 4 * kInstrSize));
- // scratch is now ori.
- if (emit_debug_code()) {
- And(scratch, scratch, Operand(kOpcodeMask));
- Cmpi(scratch, Operand(ORI), r0);
- Check(eq, kTheInstructionShouldBeAnOri);
- lwz(scratch, MemOperand(location, 4 * kInstrSize));
- }
- sldi(result, result, Operand(16));
- rldimi(result, scratch, 0, 48);
-#endif
-}
-
-
void MacroAssembler::CheckPageFlag(
Register object,
Register scratch, // scratch may be same register as object
@@ -4040,6 +3756,25 @@ void MacroAssembler::MovDoubleToInt64(
}
+void MacroAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
+ subi(sp, sp, Operand(kFloatSize));
+ stw(src, MemOperand(sp, 0));
+ nop(GROUP_ENDING_NOP); // LHS/RAW optimization
+ lfs(dst, MemOperand(sp, 0));
+ addi(sp, sp, Operand(kFloatSize));
+}
+
+
+void MacroAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
+ subi(sp, sp, Operand(kFloatSize));
+ frsp(src, src);
+ stfs(src, MemOperand(sp, 0));
+ nop(GROUP_ENDING_NOP); // LHS/RAW optimization
+ lwz(dst, MemOperand(sp, 0));
+ addi(sp, sp, Operand(kFloatSize));
+}
+
+
void MacroAssembler::Add(Register dst, Register src, intptr_t value,
Register scratch) {
if (is_int16(value)) {
@@ -4601,7 +4336,7 @@ CodePatcher::CodePatcher(byte* address, int instructions,
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
if (flush_cache_ == FLUSH) {
- CpuFeatures::FlushICache(address_, size_);
+ Assembler::FlushICacheWithoutIsolate(address_, size_);
}
// Check that the code was patched as expected.
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
index 64396bb3a4..f87c563e72 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -415,6 +415,9 @@ class MacroAssembler : public Assembler {
void LoadContext(Register dst, int context_chain_length);
+ // Load the global proxy from the current context.
+ void LoadGlobalProxy(Register dst);
+
// Conditionally load the cached Array transitioned map of type
// transitioned_kind from the native context if the map in register
// map_in_out is the cached Array map in the native context of
@@ -494,6 +497,8 @@ class MacroAssembler : public Assembler {
Register dst_hi,
#endif
Register dst, DoubleRegister src);
+ void MovIntToFloat(DoubleRegister dst, Register src);
+ void MovFloatToInt(Register dst, DoubleRegister src);
void Add(Register dst, Register src, intptr_t value, Register scratch);
void Cmpi(Register src1, const Operand& src2, Register scratch,
@@ -549,11 +554,6 @@ class MacroAssembler : public Assembler {
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
- void IsObjectJSObjectType(Register heap_object, Register map,
- Register scratch, Label* fail);
-
- void IsInstanceJSObjectType(Register map, Register scratch, Label* fail);
-
void IsObjectJSStringType(Register object, Register scratch, Label* fail);
void IsObjectNameType(Register object, Register scratch, Label* fail);
@@ -704,8 +704,7 @@ class MacroAssembler : public Assembler {
// function register will be untouched; the other registers may be
// clobbered.
void TryGetFunctionPrototype(Register function, Register result,
- Register scratch, Label* miss,
- bool miss_on_bound_function = false);
+ Register scratch, Label* miss);
// Compare object type for heap object. heap_object contains a non-Smi
// whose object type should be compared with the given type. This both
@@ -718,13 +717,6 @@ class MacroAssembler : public Assembler {
void CompareObjectType(Register heap_object, Register map, Register type_reg,
InstanceType type);
- // Compare object type for heap object. Branch to false_label if type
- // is lower than min_type or greater than max_type.
- // Load map into the register map.
- void CheckObjectTypeRange(Register heap_object, Register map,
- InstanceType min_type, InstanceType max_type,
- Label* false_label);
-
// Compare instance type in a map. map contains a valid map object whose
// object type should be compared with the given type. This both
// sets the flags and leaves the object type in the type_reg register.
@@ -794,7 +786,23 @@ class MacroAssembler : public Assembler {
// Compare the object in a register to a value from the root list.
// Uses the ip register as scratch.
void CompareRoot(Register obj, Heap::RootListIndex index);
+ void PushRoot(Heap::RootListIndex index) {
+ LoadRoot(r0, index);
+ Push(r0);
+ }
+
+ // Compare the object in a register to a value and jump if they are equal.
+ void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
+ CompareRoot(with, index);
+ beq(if_equal);
+ }
+ // Compare the object in a register to a value and jump if they are not equal.
+ void JumpIfNotRoot(Register with, Heap::RootListIndex index,
+ Label* if_not_equal) {
+ CompareRoot(with, index);
+ bne(if_not_equal);
+ }
// Load and check the instance type of an object for being a string.
// Loads the type into the second argument register.
@@ -888,21 +896,9 @@ class MacroAssembler : public Assembler {
void BranchOnNoOverflow(Label* label) { bge(label, cr0); }
- void RetOnOverflow(void) {
- Label label;
+ void RetOnOverflow(void) { Ret(lt, cr0); }
- blt(&label, cr0);
- Ret();
- bind(&label);
- }
-
- void RetOnNoOverflow(void) {
- Label label;
-
- bge(&label, cr0);
- Ret();
- bind(&label);
- }
+ void RetOnNoOverflow(void) { Ret(ge, cr0); }
// ---------------------------------------------------------------------------
// Runtime calls
@@ -984,17 +980,16 @@ class MacroAssembler : public Assembler {
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin);
- // Invoke specified builtin JavaScript function. Adds an entry to
- // the unresolved list if the name does not resolve.
- void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag,
+ // Invoke specified builtin JavaScript function.
+ void InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper = NullCallWrapper());
// Store the code object for the given builtin in the target register and
// setup the function in r1.
- void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+ void GetBuiltinEntry(Register target, int native_context_index);
// Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, Builtins::JavaScript id);
+ void GetBuiltinFunction(Register target, int native_context_index);
Handle<Object> CodeObject() {
DCHECK(!code_object_.is_null());
@@ -1314,6 +1309,8 @@ class MacroAssembler : public Assembler {
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
+ void AssertFunction(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
@@ -1331,15 +1328,6 @@ class MacroAssembler : public Assembler {
// ---------------------------------------------------------------------------
// String utilities
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- void LookupNumberStringCache(Register object, Register result,
- Register scratch1, Register scratch2,
- Register scratch3, Label* not_found);
-
// Checks if both objects are sequential one-byte strings and jumps to label
// if either is not. Assumes that neither object is a smi.
void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1,
@@ -1378,11 +1366,6 @@ class MacroAssembler : public Assembler {
// Caller must place the instruction word at <location> in <result>.
void DecodeConstantPoolOffset(Register result, Register location);
- // Retrieve/patch the relocated value (lis/ori pair or constant pool load).
- void GetRelocatedValue(Register location, Register result, Register scratch);
- void SetRelocatedValue(Register location, Register scratch,
- Register new_value);
-
void ClampUint8(Register output_reg, Register input_reg);
// Saturate a value into 8-bit unsigned integer
@@ -1433,6 +1416,9 @@ class MacroAssembler : public Assembler {
DecodeFieldToSmi<Field>(reg, reg);
}
+ // Load the type feedback vector from a JavaScript frame.
+ void EmitLoadTypeFeedbackVector(Register vector);
+
// Activation support.
void EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg = false);