aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/code-stubs.cc
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2016-09-06 22:49:51 +0200
committerMichaël Zasso <targos@protonmail.com>2016-09-22 09:51:19 +0200
commitec02b811a8a5c999bab4de312be2d732b7d9d50b (patch)
treeca3068017254f238cf413a451c57a803572983a4 /deps/v8/src/code-stubs.cc
parentd2eb7ce0105369a9cad82787cb33a665e9bd00ad (diff)
downloadandroid-node-v8-ec02b811a8a5c999bab4de312be2d732b7d9d50b.tar.gz
android-node-v8-ec02b811a8a5c999bab4de312be2d732b7d9d50b.tar.bz2
android-node-v8-ec02b811a8a5c999bab4de312be2d732b7d9d50b.zip
deps: update V8 to 5.4.500.27
Pick up latest commit from the 5.4-lkgr branch. deps: edit V8 gitignore to allow trace event copy deps: update V8 trace event to 315bf1e2d45be7d53346c31cfcc37424a32c30c8 deps: edit V8 gitignore to allow gtest_prod.h copy deps: update V8 gtest to 6f8a66431cb592dad629028a50b3dd418a408c87 PR-URL: https://github.com/nodejs/node/pull/8317 Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com>
Diffstat (limited to 'deps/v8/src/code-stubs.cc')
-rw-r--r--deps/v8/src/code-stubs.cc3579
1 files changed, 2957 insertions, 622 deletions
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index 60b350cd93..2b71716dc3 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -8,14 +8,13 @@
#include "src/bootstrapper.h"
#include "src/code-factory.h"
-#include "src/compiler/code-stub-assembler.h"
+#include "src/code-stub-assembler.h"
#include "src/factory.h"
#include "src/gdb-jit.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/macro-assembler.h"
#include "src/parsing/parser.h"
-#include "src/profiler/cpu-profiler.h"
namespace v8 {
namespace internal {
@@ -26,9 +25,9 @@ RUNTIME_FUNCTION(UnexpectedStubMiss) {
return Smi::FromInt(0);
}
-
CodeStubDescriptor::CodeStubDescriptor(CodeStub* stub)
- : call_descriptor_(stub->GetCallInterfaceDescriptor()),
+ : isolate_(stub->isolate()),
+ call_descriptor_(stub->GetCallInterfaceDescriptor()),
stack_parameter_count_(no_reg),
hint_stack_parameter_count_(-1),
function_mode_(NOT_JS_FUNCTION_STUB_MODE),
@@ -38,9 +37,9 @@ CodeStubDescriptor::CodeStubDescriptor(CodeStub* stub)
stub->InitializeDescriptor(this);
}
-
CodeStubDescriptor::CodeStubDescriptor(Isolate* isolate, uint32_t stub_key)
- : stack_parameter_count_(no_reg),
+ : isolate_(isolate),
+ stack_parameter_count_(no_reg),
hint_stack_parameter_count_(-1),
function_mode_(NOT_JS_FUNCTION_STUB_MODE),
deoptimization_handler_(NULL),
@@ -83,8 +82,8 @@ void CodeStub::RecordCodeGeneration(Handle<Code> code) {
std::ostringstream os;
os << *this;
PROFILE(isolate(),
- CodeCreateEvent(Logger::STUB_TAG, AbstractCode::cast(*code),
- os.str().c_str()));
+ CodeCreateEvent(CodeEventListener::STUB_TAG,
+ AbstractCode::cast(*code), os.str().c_str()));
Counters* counters = isolate()->counters();
counters->total_stubs_code_size()->Increment(code->instruction_size());
#ifdef DEBUG
@@ -99,8 +98,7 @@ Code::Kind CodeStub::GetCodeKind() const {
Code::Flags CodeStub::GetCodeFlags() const {
- return Code::ComputeFlags(GetCodeKind(), GetICState(), GetExtraICState(),
- GetStubType());
+ return Code::ComputeFlags(GetCodeKind(), GetExtraICState());
}
@@ -135,11 +133,7 @@ Handle<Code> PlatformCodeStub::GenerateCode() {
CodeDesc desc;
masm.GetCode(&desc);
// Copy the generated code into a heap object.
- Code::Flags flags = Code::ComputeFlags(
- GetCodeKind(),
- GetICState(),
- GetExtraICState(),
- GetStubType());
+ Code::Flags flags = Code::ComputeFlags(GetCodeKind(), GetExtraICState());
Handle<Code> new_object = factory->NewCode(
desc, flags, masm.CodeObject(), NeedsImmovableCode());
return new_object;
@@ -276,6 +270,7 @@ MaybeHandle<Code> CodeStub::GetCode(Isolate* isolate, uint32_t key) {
// static
void BinaryOpICStub::GenerateAheadOfTime(Isolate* isolate) {
+ if (FLAG_minimal) return;
// Generate the uninitialized versions of the stub.
for (int op = Token::BIT_OR; op <= Token::MOD; ++op) {
BinaryOpICStub stub(isolate, static_cast<Token::Value>(op));
@@ -295,6 +290,7 @@ void BinaryOpICStub::PrintState(std::ostream& os) const { // NOLINT
// static
void BinaryOpICStub::GenerateAheadOfTime(Isolate* isolate,
const BinaryOpICState& state) {
+ if (FLAG_minimal) return;
BinaryOpICStub stub(isolate, state);
stub.GetCode();
}
@@ -377,45 +373,6 @@ Condition CompareICStub::GetCondition() const {
}
-void CompareICStub::AddToSpecialCache(Handle<Code> new_object) {
- DCHECK(*known_map_ != NULL);
- Isolate* isolate = new_object->GetIsolate();
- Factory* factory = isolate->factory();
- return Map::UpdateCodeCache(known_map_,
- strict() ?
- factory->strict_compare_ic_string() :
- factory->compare_ic_string(),
- new_object);
-}
-
-
-bool CompareICStub::FindCodeInSpecialCache(Code** code_out) {
- Factory* factory = isolate()->factory();
- Code::Flags flags = Code::ComputeFlags(
- GetCodeKind(),
- UNINITIALIZED);
- Handle<Object> probe(
- known_map_->FindInCodeCache(
- strict() ?
- *factory->strict_compare_ic_string() :
- *factory->compare_ic_string(),
- flags),
- isolate());
- if (probe->IsCode()) {
- *code_out = Code::cast(*probe);
-#ifdef DEBUG
- CompareICStub decode((*code_out)->stub_key(), isolate());
- DCHECK(op() == decode.op());
- DCHECK(left() == decode.left());
- DCHECK(right() == decode.right());
- DCHECK(state() == decode.state());
-#endif
- return true;
- }
- return false;
-}
-
-
void CompareICStub::Generate(MacroAssembler* masm) {
switch (state()) {
case CompareICState::UNINITIALIZED:
@@ -452,44 +409,109 @@ void CompareICStub::Generate(MacroAssembler* masm) {
}
}
-
Handle<Code> TurboFanCodeStub::GenerateCode() {
const char* name = CodeStub::MajorName(MajorKey());
Zone zone(isolate()->allocator());
CallInterfaceDescriptor descriptor(GetCallInterfaceDescriptor());
- compiler::CodeStubAssembler assembler(isolate(), &zone, descriptor,
- GetCodeFlags(), name);
+ CodeStubAssembler assembler(isolate(), &zone, descriptor, GetCodeFlags(),
+ name);
GenerateAssembly(&assembler);
return assembler.GenerateCode();
}
-void AllocateHeapNumberStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
+void LoadICTrampolineTFStub::GenerateAssembly(
+ CodeStubAssembler* assembler) const {
typedef compiler::Node Node;
- Node* result = assembler->AllocateHeapNumber();
- assembler->Return(result);
+ Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ Node* name = assembler->Parameter(Descriptor::kName);
+ Node* slot = assembler->Parameter(Descriptor::kSlot);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* vector = assembler->LoadTypeFeedbackVectorForStub();
+
+ CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
+ assembler->LoadIC(&p);
}
-void AllocateMutableHeapNumberStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
+void LoadICTFStub::GenerateAssembly(CodeStubAssembler* assembler) const {
typedef compiler::Node Node;
- Node* result = assembler->Allocate(HeapNumber::kSize);
- assembler->StoreMapNoWriteBarrier(
- result,
- assembler->HeapConstant(isolate()->factory()->mutable_heap_number_map()));
+ Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ Node* name = assembler->Parameter(Descriptor::kName);
+ Node* slot = assembler->Parameter(Descriptor::kSlot);
+ Node* vector = assembler->Parameter(Descriptor::kVector);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
+ assembler->LoadIC(&p);
+}
+
+void LoadGlobalICTrampolineStub::GenerateAssembly(
+ CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+
+ Node* slot = assembler->Parameter(Descriptor::kSlot);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* vector = assembler->LoadTypeFeedbackVectorForStub();
+
+ CodeStubAssembler::LoadICParameters p(context, nullptr, nullptr, slot,
+ vector);
+ assembler->LoadGlobalIC(&p);
+}
+
+void LoadGlobalICStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+
+ Node* slot = assembler->Parameter(Descriptor::kSlot);
+ Node* vector = assembler->Parameter(Descriptor::kVector);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ CodeStubAssembler::LoadICParameters p(context, nullptr, nullptr, slot,
+ vector);
+ assembler->LoadGlobalIC(&p);
+}
+
+void KeyedLoadICTrampolineTFStub::GenerateAssembly(
+ CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+
+ Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ Node* name = assembler->Parameter(Descriptor::kName);
+ Node* slot = assembler->Parameter(Descriptor::kSlot);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* vector = assembler->LoadTypeFeedbackVectorForStub();
+
+ CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
+ assembler->KeyedLoadIC(&p);
+}
+
+void KeyedLoadICTFStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+
+ Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ Node* name = assembler->Parameter(Descriptor::kName);
+ Node* slot = assembler->Parameter(Descriptor::kSlot);
+ Node* vector = assembler->Parameter(Descriptor::kVector);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
+ assembler->KeyedLoadIC(&p);
+}
+
+void AllocateHeapNumberStub::GenerateAssembly(
+ CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+
+ Node* result = assembler->AllocateHeapNumber();
assembler->Return(result);
}
#define SIMD128_GEN_ASM(TYPE, Type, type, lane_count, lane_type) \
- void Allocate##Type##Stub::GenerateAssembly( \
- compiler::CodeStubAssembler* assembler) const { \
- compiler::Node* result = assembler->Allocate( \
- Simd128Value::kSize, compiler::CodeStubAssembler::kNone); \
- compiler::Node* map_offset = \
- assembler->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag); \
- compiler::Node* map = assembler->IntPtrAdd(result, map_offset); \
+ void Allocate##Type##Stub::GenerateAssembly(CodeStubAssembler* assembler) \
+ const { \
+ compiler::Node* result = \
+ assembler->Allocate(Simd128Value::kSize, CodeStubAssembler::kNone); \
+ compiler::Node* map = assembler->LoadMap(result); \
assembler->StoreNoWriteBarrier( \
MachineRepresentation::kTagged, map, \
assembler->HeapConstant(isolate()->factory()->type##_map())); \
@@ -498,22 +520,20 @@ void AllocateMutableHeapNumberStub::GenerateAssembly(
SIMD128_TYPES(SIMD128_GEN_ASM)
#undef SIMD128_GEN_ASM
-void StringLengthStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
+void StringLengthStub::GenerateAssembly(CodeStubAssembler* assembler) const {
compiler::Node* value = assembler->Parameter(0);
- compiler::Node* string =
- assembler->LoadObjectField(value, JSValue::kValueOffset);
- compiler::Node* result =
- assembler->LoadObjectField(string, String::kLengthOffset);
+ compiler::Node* string = assembler->LoadJSValueValue(value);
+ compiler::Node* result = assembler->LoadStringLength(string);
assembler->Return(result);
}
-void AddStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
- typedef compiler::CodeStubAssembler::Label Label;
+// static
+compiler::Node* AddStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* left, compiler::Node* right,
+ compiler::Node* context) {
+ typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
- typedef compiler::CodeStubAssembler::Variable Variable;
-
- Node* context = assembler->Parameter(2);
+ typedef CodeStubAssembler::Variable Variable;
// Shared entry for floating point addition.
Label do_fadd(assembler);
@@ -523,11 +543,14 @@ void AddStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
// We might need to loop several times due to ToPrimitive, ToString and/or
// ToNumber conversions.
Variable var_lhs(assembler, MachineRepresentation::kTagged),
- var_rhs(assembler, MachineRepresentation::kTagged);
+ var_rhs(assembler, MachineRepresentation::kTagged),
+ var_result(assembler, MachineRepresentation::kTagged);
Variable* loop_vars[2] = {&var_lhs, &var_rhs};
- Label loop(assembler, 2, loop_vars);
- var_lhs.Bind(assembler->Parameter(0));
- var_rhs.Bind(assembler->Parameter(1));
+ Label loop(assembler, 2, loop_vars), end(assembler),
+ string_add_convert_left(assembler, Label::kDeferred),
+ string_add_convert_right(assembler, Label::kDeferred);
+ var_lhs.Bind(left);
+ var_rhs.Bind(right);
assembler->Goto(&loop);
assembler->Bind(&loop);
{
@@ -564,13 +587,14 @@ void AddStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
}
assembler->Bind(&if_notoverflow);
- assembler->Return(assembler->Projection(0, pair));
+ var_result.Bind(assembler->Projection(0, pair));
+ assembler->Goto(&end);
}
assembler->Bind(&if_rhsisnotsmi);
{
// Load the map of {rhs}.
- Node* rhs_map = assembler->LoadObjectField(rhs, HeapObject::kMapOffset);
+ Node* rhs_map = assembler->LoadMap(rhs);
// Check if the {rhs} is a HeapNumber.
Label if_rhsisnumber(assembler),
@@ -601,11 +625,9 @@ void AddStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
assembler->Bind(&if_rhsisstring);
{
- // Convert {lhs}, which is a Smi, to a String and concatenate the
- // resulting string with the String {rhs}.
- Callable callable = CodeFactory::StringAdd(
- assembler->isolate(), STRING_ADD_CONVERT_LEFT, NOT_TENURED);
- assembler->TailCallStub(callable, context, lhs, rhs);
+ var_lhs.Bind(lhs);
+ var_rhs.Bind(rhs);
+ assembler->Goto(&string_add_convert_left);
}
assembler->Bind(&if_rhsisnotstring);
@@ -622,9 +644,9 @@ void AddStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
assembler->Bind(&if_rhsisreceiver);
{
// Convert {rhs} to a primitive first passing no hint.
- // TODO(bmeurer): Hook up ToPrimitiveStub here, once it's there.
- var_rhs.Bind(
- assembler->CallRuntime(Runtime::kToPrimitive, context, rhs));
+ Callable callable =
+ CodeFactory::NonPrimitiveToPrimitive(assembler->isolate());
+ var_rhs.Bind(assembler->CallStub(callable, context, rhs));
assembler->Goto(&loop);
}
@@ -655,11 +677,9 @@ void AddStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
assembler->Bind(&if_lhsisstring);
{
- // Convert {rhs} to a String (using the sequence of ToPrimitive with
- // no hint followed by ToString) and concatenate the strings.
- Callable callable = CodeFactory::StringAdd(
- assembler->isolate(), STRING_ADD_CONVERT_RIGHT, NOT_TENURED);
- assembler->TailCallStub(callable, context, lhs, rhs);
+ var_lhs.Bind(lhs);
+ var_rhs.Bind(rhs);
+ assembler->Goto(&string_add_convert_right);
}
assembler->Bind(&if_lhsisnotstring);
@@ -702,9 +722,9 @@ void AddStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
assembler->Bind(&if_lhsisreceiver);
{
// Convert {lhs} to a primitive first passing no hint.
- // TODO(bmeurer): Hook up ToPrimitiveStub here, once it's there.
- var_lhs.Bind(
- assembler->CallRuntime(Runtime::kToPrimitive, context, lhs));
+ Callable callable =
+ CodeFactory::NonPrimitiveToPrimitive(assembler->isolate());
+ var_lhs.Bind(assembler->CallStub(callable, context, lhs));
assembler->Goto(&loop);
}
@@ -733,11 +753,9 @@ void AddStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
assembler->Bind(&if_rhsisstring);
{
- // Convert {lhs} to a String (using the sequence of ToPrimitive with
- // no hint followed by ToString) and concatenate the strings.
- Callable callable = CodeFactory::StringAdd(
- assembler->isolate(), STRING_ADD_CONVERT_LEFT, NOT_TENURED);
- assembler->TailCallStub(callable, context, lhs, rhs);
+ var_lhs.Bind(lhs);
+ var_rhs.Bind(rhs);
+ assembler->Goto(&string_add_convert_left);
}
assembler->Bind(&if_rhsisnotstring);
@@ -781,9 +799,9 @@ void AddStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
assembler->Bind(&if_rhsisreceiver);
{
// Convert {rhs} to a primitive first passing no hint.
- // TODO(bmeurer): Hook up ToPrimitiveStub here too.
- var_rhs.Bind(assembler->CallRuntime(Runtime::kToPrimitive,
- context, rhs));
+ Callable callable = CodeFactory::NonPrimitiveToPrimitive(
+ assembler->isolate());
+ var_rhs.Bind(assembler->CallStub(callable, context, rhs));
assembler->Goto(&loop);
}
@@ -812,9 +830,9 @@ void AddStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
assembler->Bind(&if_lhsisreceiver);
{
// Convert {lhs} to a primitive first passing no hint.
- // TODO(bmeurer): Hook up ToPrimitiveStub here, once it's there.
- var_lhs.Bind(assembler->CallRuntime(Runtime::kToPrimitive,
- context, lhs));
+ Callable callable =
+ CodeFactory::NonPrimitiveToPrimitive(assembler->isolate());
+ var_lhs.Bind(assembler->CallStub(callable, context, lhs));
assembler->Goto(&loop);
}
@@ -832,9 +850,9 @@ void AddStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
assembler->Bind(&if_rhsisreceiver);
{
// Convert {rhs} to a primitive first passing no hint.
- // TODO(bmeurer): Hook up ToPrimitiveStub here too.
- var_rhs.Bind(assembler->CallRuntime(Runtime::kToPrimitive,
- context, rhs));
+ Callable callable = CodeFactory::NonPrimitiveToPrimitive(
+ assembler->isolate());
+ var_rhs.Bind(assembler->CallStub(callable, context, rhs));
assembler->Goto(&loop);
}
@@ -853,6 +871,27 @@ void AddStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
}
}
}
+ assembler->Bind(&string_add_convert_left);
+ {
+ // Convert {lhs}, which is a Smi, to a String and concatenate the
+ // resulting string with the String {rhs}.
+ Callable callable = CodeFactory::StringAdd(
+ assembler->isolate(), STRING_ADD_CONVERT_LEFT, NOT_TENURED);
+ var_result.Bind(assembler->CallStub(callable, context, var_lhs.value(),
+ var_rhs.value()));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&string_add_convert_right);
+ {
+ // Convert {lhs}, which is a Smi, to a String and concatenate the
+ // resulting string with the String {rhs}.
+ Callable callable = CodeFactory::StringAdd(
+ assembler->isolate(), STRING_ADD_CONVERT_RIGHT, NOT_TENURED);
+ var_result.Bind(assembler->CallStub(callable, context, var_lhs.value(),
+ var_rhs.value()));
+ assembler->Goto(&end);
+ }
assembler->Bind(&do_fadd);
{
@@ -860,31 +899,169 @@ void AddStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
Node* rhs_value = var_fadd_rhs.value();
Node* value = assembler->Float64Add(lhs_value, rhs_value);
Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
+ var_result.Bind(result);
+ assembler->Goto(&end);
}
+ assembler->Bind(&end);
+ return var_result.value();
}
-void SubtractStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
- typedef compiler::CodeStubAssembler::Label Label;
+// static
+compiler::Node* AddWithFeedbackStub::Generate(
+ CodeStubAssembler* assembler, compiler::Node* lhs, compiler::Node* rhs,
+ compiler::Node* slot_id, compiler::Node* type_feedback_vector,
+ compiler::Node* context) {
+ typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
- typedef compiler::CodeStubAssembler::Variable Variable;
+ typedef CodeStubAssembler::Variable Variable;
- Node* context = assembler->Parameter(2);
+ // Shared entry for floating point addition.
+ Label do_fadd(assembler), end(assembler),
+ call_add_stub(assembler, Label::kDeferred);
+ Variable var_fadd_lhs(assembler, MachineRepresentation::kFloat64),
+ var_fadd_rhs(assembler, MachineRepresentation::kFloat64),
+ var_type_feedback(assembler, MachineRepresentation::kWord32),
+ var_result(assembler, MachineRepresentation::kTagged);
+
+ // Check if the {lhs} is a Smi or a HeapObject.
+ Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+
+ assembler->Bind(&if_lhsissmi);
+ {
+ // Check if the {rhs} is also a Smi.
+ Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+ assembler->Bind(&if_rhsissmi);
+ {
+ // Try fast Smi addition first.
+ Node* pair = assembler->SmiAddWithOverflow(lhs, rhs);
+ Node* overflow = assembler->Projection(1, pair);
+
+ // Check if the Smi additon overflowed.
+ Label if_overflow(assembler), if_notoverflow(assembler);
+ assembler->Branch(overflow, &if_overflow, &if_notoverflow);
+
+ assembler->Bind(&if_overflow);
+ {
+ var_fadd_lhs.Bind(assembler->SmiToFloat64(lhs));
+ var_fadd_rhs.Bind(assembler->SmiToFloat64(rhs));
+ assembler->Goto(&do_fadd);
+ }
+
+ assembler->Bind(&if_notoverflow);
+ {
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall));
+ var_result.Bind(assembler->Projection(0, pair));
+ assembler->Goto(&end);
+ }
+ }
+
+ assembler->Bind(&if_rhsisnotsmi);
+ {
+ // Load the map of {rhs}.
+ Node* rhs_map = assembler->LoadMap(rhs);
+
+ // Check if the {rhs} is a HeapNumber.
+ assembler->GotoUnless(
+ assembler->WordEqual(rhs_map, assembler->HeapNumberMapConstant()),
+ &call_add_stub);
+
+ var_fadd_lhs.Bind(assembler->SmiToFloat64(lhs));
+ var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
+ assembler->Goto(&do_fadd);
+ }
+ }
+
+ assembler->Bind(&if_lhsisnotsmi);
+ {
+ // Load the map of {lhs}.
+ Node* lhs_map = assembler->LoadMap(lhs);
+
+ // Check if {lhs} is a HeapNumber.
+ Label if_lhsisnumber(assembler), if_lhsisnotnumber(assembler);
+ assembler->GotoUnless(
+ assembler->WordEqual(lhs_map, assembler->HeapNumberMapConstant()),
+ &call_add_stub);
+
+ // Check if the {rhs} is Smi.
+ Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+ assembler->Bind(&if_rhsissmi);
+ {
+ var_fadd_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
+ var_fadd_rhs.Bind(assembler->SmiToFloat64(rhs));
+ assembler->Goto(&do_fadd);
+ }
+
+ assembler->Bind(&if_rhsisnotsmi);
+ {
+ // Load the map of {rhs}.
+ Node* rhs_map = assembler->LoadMap(rhs);
+
+ // Check if the {rhs} is a HeapNumber.
+ Node* number_map = assembler->HeapNumberMapConstant();
+ assembler->GotoUnless(assembler->WordEqual(rhs_map, number_map),
+ &call_add_stub);
+
+ var_fadd_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
+ var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
+ assembler->Goto(&do_fadd);
+ }
+ }
+
+ assembler->Bind(&do_fadd);
+ {
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kNumber));
+ Node* value =
+ assembler->Float64Add(var_fadd_lhs.value(), var_fadd_rhs.value());
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ var_result.Bind(result);
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&call_add_stub);
+ {
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kAny));
+ Callable callable = CodeFactory::Add(assembler->isolate());
+ var_result.Bind(assembler->CallStub(callable, context, lhs, rhs));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&end);
+ assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
+ slot_id);
+ return var_result.value();
+}
+
+// static
+compiler::Node* SubtractStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* left,
+ compiler::Node* right,
+ compiler::Node* context) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
// Shared entry for floating point subtraction.
- Label do_fsub(assembler);
+ Label do_fsub(assembler), end(assembler);
Variable var_fsub_lhs(assembler, MachineRepresentation::kFloat64),
var_fsub_rhs(assembler, MachineRepresentation::kFloat64);
// We might need to loop several times due to ToPrimitive and/or ToNumber
// conversions.
Variable var_lhs(assembler, MachineRepresentation::kTagged),
- var_rhs(assembler, MachineRepresentation::kTagged);
+ var_rhs(assembler, MachineRepresentation::kTagged),
+ var_result(assembler, MachineRepresentation::kTagged);
Variable* loop_vars[2] = {&var_lhs, &var_rhs};
Label loop(assembler, 2, loop_vars);
- var_lhs.Bind(assembler->Parameter(0));
- var_rhs.Bind(assembler->Parameter(1));
+ var_lhs.Bind(left);
+ var_rhs.Bind(right);
assembler->Goto(&loop);
assembler->Bind(&loop);
{
@@ -922,7 +1099,8 @@ void SubtractStub::GenerateAssembly(
}
assembler->Bind(&if_notoverflow);
- assembler->Return(assembler->Projection(0, pair));
+ var_result.Bind(assembler->Projection(0, pair));
+ assembler->Goto(&end);
}
assembler->Bind(&if_rhsisnotsmi);
@@ -948,7 +1126,8 @@ void SubtractStub::GenerateAssembly(
assembler->Bind(&if_rhsisnotnumber);
{
// Convert the {rhs} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
var_rhs.Bind(assembler->CallStub(callable, context, rhs));
assembler->Goto(&loop);
}
@@ -1004,7 +1183,8 @@ void SubtractStub::GenerateAssembly(
assembler->Bind(&if_rhsisnotnumber);
{
// Convert the {rhs} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
var_rhs.Bind(assembler->CallStub(callable, context, rhs));
assembler->Goto(&loop);
}
@@ -1014,7 +1194,8 @@ void SubtractStub::GenerateAssembly(
assembler->Bind(&if_lhsisnotnumber);
{
// Convert the {lhs} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
var_lhs.Bind(assembler->CallStub(callable, context, lhs));
assembler->Goto(&loop);
}
@@ -1026,51 +1207,1626 @@ void SubtractStub::GenerateAssembly(
Node* lhs_value = var_fsub_lhs.value();
Node* rhs_value = var_fsub_rhs.value();
Node* value = assembler->Float64Sub(lhs_value, rhs_value);
+ var_result.Bind(assembler->ChangeFloat64ToTagged(value));
+ assembler->Goto(&end);
+ }
+ assembler->Bind(&end);
+ return var_result.value();
+}
+
+// static
+compiler::Node* SubtractWithFeedbackStub::Generate(
+ CodeStubAssembler* assembler, compiler::Node* lhs, compiler::Node* rhs,
+ compiler::Node* slot_id, compiler::Node* type_feedback_vector,
+ compiler::Node* context) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+
+ // Shared entry for floating point subtraction.
+ Label do_fsub(assembler), end(assembler),
+ call_subtract_stub(assembler, Label::kDeferred);
+ Variable var_fsub_lhs(assembler, MachineRepresentation::kFloat64),
+ var_fsub_rhs(assembler, MachineRepresentation::kFloat64),
+ var_type_feedback(assembler, MachineRepresentation::kWord32),
+ var_result(assembler, MachineRepresentation::kTagged);
+
+ // Check if the {lhs} is a Smi or a HeapObject.
+ Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+
+ assembler->Bind(&if_lhsissmi);
+ {
+ // Check if the {rhs} is also a Smi.
+ Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+ assembler->Bind(&if_rhsissmi);
+ {
+ // Try a fast Smi subtraction first.
+ Node* pair = assembler->SmiSubWithOverflow(lhs, rhs);
+ Node* overflow = assembler->Projection(1, pair);
+
+ // Check if the Smi subtraction overflowed.
+ Label if_overflow(assembler), if_notoverflow(assembler);
+ assembler->Branch(overflow, &if_overflow, &if_notoverflow);
+
+ assembler->Bind(&if_overflow);
+ {
+ // lhs, rhs - smi and result - number. combined - number.
+ // The result doesn't fit into Smi range.
+ var_fsub_lhs.Bind(assembler->SmiToFloat64(lhs));
+ var_fsub_rhs.Bind(assembler->SmiToFloat64(rhs));
+ assembler->Goto(&do_fsub);
+ }
+
+ assembler->Bind(&if_notoverflow);
+ // lhs, rhs, result smi. combined - smi.
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall));
+ var_result.Bind(assembler->Projection(0, pair));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&if_rhsisnotsmi);
+ {
+ // Load the map of the {rhs}.
+ Node* rhs_map = assembler->LoadMap(rhs);
+
+ // Check if {rhs} is a HeapNumber.
+ assembler->GotoUnless(
+ assembler->WordEqual(rhs_map, assembler->HeapNumberMapConstant()),
+ &call_subtract_stub);
+
+ // Perform a floating point subtraction.
+ var_fsub_lhs.Bind(assembler->SmiToFloat64(lhs));
+ var_fsub_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
+ assembler->Goto(&do_fsub);
+ }
+ }
+
+ assembler->Bind(&if_lhsisnotsmi);
+ {
+ // Load the map of the {lhs}.
+ Node* lhs_map = assembler->LoadMap(lhs);
+
+ // Check if the {lhs} is a HeapNumber.
+ assembler->GotoUnless(
+ assembler->WordEqual(lhs_map, assembler->HeapNumberMapConstant()),
+ &call_subtract_stub);
+
+ // Check if the {rhs} is a Smi.
+ Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+ assembler->Bind(&if_rhsissmi);
+ {
+ // Perform a floating point subtraction.
+ var_fsub_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
+ var_fsub_rhs.Bind(assembler->SmiToFloat64(rhs));
+ assembler->Goto(&do_fsub);
+ }
+
+ assembler->Bind(&if_rhsisnotsmi);
+ {
+ // Load the map of the {rhs}.
+ Node* rhs_map = assembler->LoadMap(rhs);
+
+ // Check if the {rhs} is a HeapNumber.
+ assembler->GotoUnless(
+ assembler->WordEqual(rhs_map, assembler->HeapNumberMapConstant()),
+ &call_subtract_stub);
+
+ // Perform a floating point subtraction.
+ var_fsub_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
+ var_fsub_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
+ assembler->Goto(&do_fsub);
+ }
+ }
+
+ assembler->Bind(&do_fsub);
+ {
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kNumber));
+ Node* lhs_value = var_fsub_lhs.value();
+ Node* rhs_value = var_fsub_rhs.value();
+ Node* value = assembler->Float64Sub(lhs_value, rhs_value);
+ var_result.Bind(assembler->ChangeFloat64ToTagged(value));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&call_subtract_stub);
+ {
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kAny));
+ Callable callable = CodeFactory::Subtract(assembler->isolate());
+ var_result.Bind(assembler->CallStub(callable, context, lhs, rhs));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&end);
+ assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
+ slot_id);
+ return var_result.value();
+}
+
+// static
+compiler::Node* MultiplyStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* left,
+ compiler::Node* right,
+ compiler::Node* context) {
+ using compiler::Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ // Shared entry point for floating point multiplication.
+ Label do_fmul(assembler), return_result(assembler);
+ Variable var_lhs_float64(assembler, MachineRepresentation::kFloat64),
+ var_rhs_float64(assembler, MachineRepresentation::kFloat64);
+
+ Node* number_map = assembler->HeapNumberMapConstant();
+
+ // We might need to loop one or two times due to ToNumber conversions.
+ Variable var_lhs(assembler, MachineRepresentation::kTagged),
+ var_rhs(assembler, MachineRepresentation::kTagged),
+ var_result(assembler, MachineRepresentation::kTagged);
+ Variable* loop_variables[] = {&var_lhs, &var_rhs};
+ Label loop(assembler, 2, loop_variables);
+ var_lhs.Bind(left);
+ var_rhs.Bind(right);
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ Node* lhs = var_lhs.value();
+ Node* rhs = var_rhs.value();
+
+ Label lhs_is_smi(assembler), lhs_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
+
+ assembler->Bind(&lhs_is_smi);
+ {
+ Label rhs_is_smi(assembler), rhs_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(rhs), &rhs_is_smi,
+ &rhs_is_not_smi);
+
+ assembler->Bind(&rhs_is_smi);
+ {
+ // Both {lhs} and {rhs} are Smis. The result is not necessarily a smi,
+ // in case of overflow.
+ var_result.Bind(assembler->SmiMul(lhs, rhs));
+ assembler->Goto(&return_result);
+ }
+
+ assembler->Bind(&rhs_is_not_smi);
+ {
+ Node* rhs_map = assembler->LoadMap(rhs);
+
+ // Check if {rhs} is a HeapNumber.
+ Label rhs_is_number(assembler),
+ rhs_is_not_number(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(rhs_map, number_map),
+ &rhs_is_number, &rhs_is_not_number);
+
+ assembler->Bind(&rhs_is_number);
+ {
+ // Convert {lhs} to a double and multiply it with the value of {rhs}.
+ var_lhs_float64.Bind(assembler->SmiToFloat64(lhs));
+ var_rhs_float64.Bind(assembler->LoadHeapNumberValue(rhs));
+ assembler->Goto(&do_fmul);
+ }
+
+ assembler->Bind(&rhs_is_not_number);
+ {
+ // Multiplication is commutative, swap {lhs} with {rhs} and loop.
+ var_lhs.Bind(rhs);
+ var_rhs.Bind(lhs);
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&lhs_is_not_smi);
+ {
+ Node* lhs_map = assembler->LoadMap(lhs);
+
+ // Check if {lhs} is a HeapNumber.
+ Label lhs_is_number(assembler),
+ lhs_is_not_number(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(lhs_map, number_map),
+ &lhs_is_number, &lhs_is_not_number);
+
+ assembler->Bind(&lhs_is_number);
+ {
+ // Check if {rhs} is a Smi.
+ Label rhs_is_smi(assembler), rhs_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(rhs), &rhs_is_smi,
+ &rhs_is_not_smi);
+
+ assembler->Bind(&rhs_is_smi);
+ {
+ // Convert {rhs} to a double and multiply it with the value of {lhs}.
+ var_lhs_float64.Bind(assembler->LoadHeapNumberValue(lhs));
+ var_rhs_float64.Bind(assembler->SmiToFloat64(rhs));
+ assembler->Goto(&do_fmul);
+ }
+
+ assembler->Bind(&rhs_is_not_smi);
+ {
+ Node* rhs_map = assembler->LoadMap(rhs);
+
+ // Check if {rhs} is a HeapNumber.
+ Label rhs_is_number(assembler),
+ rhs_is_not_number(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(rhs_map, number_map),
+ &rhs_is_number, &rhs_is_not_number);
+
+ assembler->Bind(&rhs_is_number);
+ {
+ // Both {lhs} and {rhs} are HeapNumbers. Load their values and
+ // multiply them.
+ var_lhs_float64.Bind(assembler->LoadHeapNumberValue(lhs));
+ var_rhs_float64.Bind(assembler->LoadHeapNumberValue(rhs));
+ assembler->Goto(&do_fmul);
+ }
+
+ assembler->Bind(&rhs_is_not_number);
+ {
+ // Multiplication is commutative, swap {lhs} with {rhs} and loop.
+ var_lhs.Bind(rhs);
+ var_rhs.Bind(lhs);
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&lhs_is_not_number);
+ {
+ // Convert {lhs} to a Number and loop.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_lhs.Bind(assembler->CallStub(callable, context, lhs));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&do_fmul);
+ {
+ Node* value =
+ assembler->Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
+ var_result.Bind(result);
+ assembler->Goto(&return_result);
}
+
+ assembler->Bind(&return_result);
+ return var_result.value();
}
-void BitwiseAndStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
+// static
+compiler::Node* MultiplyWithFeedbackStub::Generate(
+ CodeStubAssembler* assembler, compiler::Node* lhs, compiler::Node* rhs,
+ compiler::Node* slot_id, compiler::Node* type_feedback_vector,
+ compiler::Node* context) {
using compiler::Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
- Node* lhs = assembler->Parameter(0);
- Node* rhs = assembler->Parameter(1);
- Node* context = assembler->Parameter(2);
- Node* lhs_value = assembler->TruncateTaggedToWord32(context, lhs);
- Node* rhs_value = assembler->TruncateTaggedToWord32(context, rhs);
+ // Shared entry point for floating point multiplication.
+ Label do_fmul(assembler), end(assembler),
+ call_multiply_stub(assembler, Label::kDeferred);
+ Variable var_lhs_float64(assembler, MachineRepresentation::kFloat64),
+ var_rhs_float64(assembler, MachineRepresentation::kFloat64),
+ var_result(assembler, MachineRepresentation::kTagged),
+ var_type_feedback(assembler, MachineRepresentation::kWord32);
+
+ Node* number_map = assembler->HeapNumberMapConstant();
+
+ Label lhs_is_smi(assembler), lhs_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
+
+ assembler->Bind(&lhs_is_smi);
+ {
+ Label rhs_is_smi(assembler), rhs_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(rhs), &rhs_is_smi, &rhs_is_not_smi);
+
+ assembler->Bind(&rhs_is_smi);
+ {
+ // Both {lhs} and {rhs} are Smis. The result is not necessarily a smi,
+ // in case of overflow.
+ var_result.Bind(assembler->SmiMul(lhs, rhs));
+ var_type_feedback.Bind(assembler->Select(
+ assembler->WordIsSmi(var_result.value()),
+ assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall),
+ assembler->Int32Constant(BinaryOperationFeedback::kNumber),
+ MachineRepresentation::kWord32));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&rhs_is_not_smi);
+ {
+ Node* rhs_map = assembler->LoadMap(rhs);
+
+ // Check if {rhs} is a HeapNumber.
+ assembler->GotoUnless(assembler->WordEqual(rhs_map, number_map),
+ &call_multiply_stub);
+
+ // Convert {lhs} to a double and multiply it with the value of {rhs}.
+ var_lhs_float64.Bind(assembler->SmiToFloat64(lhs));
+ var_rhs_float64.Bind(assembler->LoadHeapNumberValue(rhs));
+ assembler->Goto(&do_fmul);
+ }
+ }
+
+ assembler->Bind(&lhs_is_not_smi);
+ {
+ Node* lhs_map = assembler->LoadMap(lhs);
+
+ // Check if {lhs} is a HeapNumber.
+ assembler->GotoUnless(assembler->WordEqual(lhs_map, number_map),
+ &call_multiply_stub);
+
+ // Check if {rhs} is a Smi.
+ Label rhs_is_smi(assembler), rhs_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(rhs), &rhs_is_smi, &rhs_is_not_smi);
+
+ assembler->Bind(&rhs_is_smi);
+ {
+ // Convert {rhs} to a double and multiply it with the value of {lhs}.
+ var_lhs_float64.Bind(assembler->LoadHeapNumberValue(lhs));
+ var_rhs_float64.Bind(assembler->SmiToFloat64(rhs));
+ assembler->Goto(&do_fmul);
+ }
+
+ assembler->Bind(&rhs_is_not_smi);
+ {
+ Node* rhs_map = assembler->LoadMap(rhs);
+
+ // Check if {rhs} is a HeapNumber.
+ assembler->GotoUnless(assembler->WordEqual(rhs_map, number_map),
+ &call_multiply_stub);
+
+ // Both {lhs} and {rhs} are HeapNumbers. Load their values and
+ // multiply them.
+ var_lhs_float64.Bind(assembler->LoadHeapNumberValue(lhs));
+ var_rhs_float64.Bind(assembler->LoadHeapNumberValue(rhs));
+ assembler->Goto(&do_fmul);
+ }
+ }
+
+ assembler->Bind(&do_fmul);
+ {
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kNumber));
+ Node* value =
+ assembler->Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ var_result.Bind(result);
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&call_multiply_stub);
+ {
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kAny));
+ Callable callable = CodeFactory::Multiply(assembler->isolate());
+ var_result.Bind(assembler->CallStub(callable, context, lhs, rhs));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&end);
+ assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
+ slot_id);
+ return var_result.value();
+}
+
+// static
+compiler::Node* DivideStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* left,
+ compiler::Node* right,
+ compiler::Node* context) {
+ using compiler::Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ // Shared entry point for floating point division.
+ Label do_fdiv(assembler), end(assembler);
+ Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
+ var_divisor_float64(assembler, MachineRepresentation::kFloat64);
+
+ Node* number_map = assembler->HeapNumberMapConstant();
+
+ // We might need to loop one or two times due to ToNumber conversions.
+ Variable var_dividend(assembler, MachineRepresentation::kTagged),
+ var_divisor(assembler, MachineRepresentation::kTagged),
+ var_result(assembler, MachineRepresentation::kTagged);
+ Variable* loop_variables[] = {&var_dividend, &var_divisor};
+ Label loop(assembler, 2, loop_variables);
+ var_dividend.Bind(left);
+ var_divisor.Bind(right);
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ Node* dividend = var_dividend.value();
+ Node* divisor = var_divisor.value();
+
+ Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(dividend), &dividend_is_smi,
+ &dividend_is_not_smi);
+
+ assembler->Bind(&dividend_is_smi);
+ {
+ Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
+ &divisor_is_not_smi);
+
+ assembler->Bind(&divisor_is_smi);
+ {
+ Label bailout(assembler);
+
+ // Do floating point division if {divisor} is zero.
+ assembler->GotoIf(
+ assembler->WordEqual(divisor, assembler->IntPtrConstant(0)),
+ &bailout);
+
+ // Do floating point division {dividend} is zero and {divisor} is
+ // negative.
+ Label dividend_is_zero(assembler), dividend_is_not_zero(assembler);
+ assembler->Branch(
+ assembler->WordEqual(dividend, assembler->IntPtrConstant(0)),
+ &dividend_is_zero, &dividend_is_not_zero);
+
+ assembler->Bind(&dividend_is_zero);
+ {
+ assembler->GotoIf(
+ assembler->IntPtrLessThan(divisor, assembler->IntPtrConstant(0)),
+ &bailout);
+ assembler->Goto(&dividend_is_not_zero);
+ }
+ assembler->Bind(&dividend_is_not_zero);
+
+ Node* untagged_divisor = assembler->SmiUntag(divisor);
+ Node* untagged_dividend = assembler->SmiUntag(dividend);
+
+ // Do floating point division if {dividend} is kMinInt (or kMinInt - 1
+ // if the Smi size is 31) and {divisor} is -1.
+ Label divisor_is_minus_one(assembler),
+ divisor_is_not_minus_one(assembler);
+ assembler->Branch(assembler->Word32Equal(untagged_divisor,
+ assembler->Int32Constant(-1)),
+ &divisor_is_minus_one, &divisor_is_not_minus_one);
+
+ assembler->Bind(&divisor_is_minus_one);
+ {
+ assembler->GotoIf(
+ assembler->Word32Equal(
+ untagged_dividend,
+ assembler->Int32Constant(
+ kSmiValueSize == 32 ? kMinInt : (kMinInt >> 1))),
+ &bailout);
+ assembler->Goto(&divisor_is_not_minus_one);
+ }
+ assembler->Bind(&divisor_is_not_minus_one);
+
+ // TODO(epertoso): consider adding a machine instruction that returns
+ // both the result and the remainder.
+ Node* untagged_result =
+ assembler->Int32Div(untagged_dividend, untagged_divisor);
+ Node* truncated =
+ assembler->IntPtrMul(untagged_result, untagged_divisor);
+ // Do floating point division if the remainder is not 0.
+ assembler->GotoIf(
+ assembler->Word32NotEqual(untagged_dividend, truncated), &bailout);
+ var_result.Bind(assembler->SmiTag(untagged_result));
+ assembler->Goto(&end);
+
+ // Bailout: convert {dividend} and {divisor} to double and do double
+ // division.
+ assembler->Bind(&bailout);
+ {
+ var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
+ var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
+ assembler->Goto(&do_fdiv);
+ }
+ }
+
+ assembler->Bind(&divisor_is_not_smi);
+ {
+ Node* divisor_map = assembler->LoadMap(divisor);
+
+ // Check if {divisor} is a HeapNumber.
+ Label divisor_is_number(assembler),
+ divisor_is_not_number(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(divisor_map, number_map),
+ &divisor_is_number, &divisor_is_not_number);
+
+ assembler->Bind(&divisor_is_number);
+ {
+ // Convert {dividend} to a double and divide it with the value of
+ // {divisor}.
+ var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
+ var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
+ assembler->Goto(&do_fdiv);
+ }
+
+ assembler->Bind(&divisor_is_not_number);
+ {
+ // Convert {divisor} to a number and loop.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_divisor.Bind(assembler->CallStub(callable, context, divisor));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&dividend_is_not_smi);
+ {
+ Node* dividend_map = assembler->LoadMap(dividend);
+
+ // Check if {dividend} is a HeapNumber.
+ Label dividend_is_number(assembler),
+ dividend_is_not_number(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(dividend_map, number_map),
+ &dividend_is_number, &dividend_is_not_number);
+
+ assembler->Bind(&dividend_is_number);
+ {
+ // Check if {divisor} is a Smi.
+ Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
+ &divisor_is_not_smi);
+
+ assembler->Bind(&divisor_is_smi);
+ {
+ // Convert {divisor} to a double and use it for a floating point
+ // division.
+ var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
+ var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
+ assembler->Goto(&do_fdiv);
+ }
+
+ assembler->Bind(&divisor_is_not_smi);
+ {
+ Node* divisor_map = assembler->LoadMap(divisor);
+
+ // Check if {divisor} is a HeapNumber.
+ Label divisor_is_number(assembler),
+ divisor_is_not_number(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(divisor_map, number_map),
+ &divisor_is_number, &divisor_is_not_number);
+
+ assembler->Bind(&divisor_is_number);
+ {
+ // Both {dividend} and {divisor} are HeapNumbers. Load their values
+ // and divide them.
+ var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
+ var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
+ assembler->Goto(&do_fdiv);
+ }
+
+ assembler->Bind(&divisor_is_not_number);
+ {
+ // Convert {divisor} to a number and loop.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_divisor.Bind(assembler->CallStub(callable, context, divisor));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&dividend_is_not_number);
+ {
+ // Convert {dividend} to a Number and loop.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_dividend.Bind(assembler->CallStub(callable, context, dividend));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&do_fdiv);
+ {
+ Node* value = assembler->Float64Div(var_dividend_float64.value(),
+ var_divisor_float64.value());
+ var_result.Bind(assembler->ChangeFloat64ToTagged(value));
+ assembler->Goto(&end);
+ }
+ assembler->Bind(&end);
+ return var_result.value();
+}
+
+// static
+compiler::Node* DivideWithFeedbackStub::Generate(
+ CodeStubAssembler* assembler, compiler::Node* dividend,
+ compiler::Node* divisor, compiler::Node* slot_id,
+ compiler::Node* type_feedback_vector, compiler::Node* context) {
+ using compiler::Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ // Shared entry point for floating point division.
+ Label do_fdiv(assembler), end(assembler), call_divide_stub(assembler);
+ Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
+ var_divisor_float64(assembler, MachineRepresentation::kFloat64),
+ var_result(assembler, MachineRepresentation::kTagged),
+ var_type_feedback(assembler, MachineRepresentation::kWord32);
+
+ Node* number_map = assembler->HeapNumberMapConstant();
+
+ Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(dividend), &dividend_is_smi,
+ &dividend_is_not_smi);
+
+ assembler->Bind(&dividend_is_smi);
+ {
+ Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
+ &divisor_is_not_smi);
+
+ assembler->Bind(&divisor_is_smi);
+ {
+ Label bailout(assembler);
+
+ // Do floating point division if {divisor} is zero.
+ assembler->GotoIf(
+ assembler->WordEqual(divisor, assembler->IntPtrConstant(0)),
+ &bailout);
+
+ // Do floating point division {dividend} is zero and {divisor} is
+ // negative.
+ Label dividend_is_zero(assembler), dividend_is_not_zero(assembler);
+ assembler->Branch(
+ assembler->WordEqual(dividend, assembler->IntPtrConstant(0)),
+ &dividend_is_zero, &dividend_is_not_zero);
+
+ assembler->Bind(&dividend_is_zero);
+ {
+ assembler->GotoIf(
+ assembler->IntPtrLessThan(divisor, assembler->IntPtrConstant(0)),
+ &bailout);
+ assembler->Goto(&dividend_is_not_zero);
+ }
+ assembler->Bind(&dividend_is_not_zero);
+
+ Node* untagged_divisor = assembler->SmiUntag(divisor);
+ Node* untagged_dividend = assembler->SmiUntag(dividend);
+
+ // Do floating point division if {dividend} is kMinInt (or kMinInt - 1
+ // if the Smi size is 31) and {divisor} is -1.
+ Label divisor_is_minus_one(assembler),
+ divisor_is_not_minus_one(assembler);
+ assembler->Branch(assembler->Word32Equal(untagged_divisor,
+ assembler->Int32Constant(-1)),
+ &divisor_is_minus_one, &divisor_is_not_minus_one);
+
+ assembler->Bind(&divisor_is_minus_one);
+ {
+ assembler->GotoIf(
+ assembler->Word32Equal(
+ untagged_dividend,
+ assembler->Int32Constant(kSmiValueSize == 32 ? kMinInt
+ : (kMinInt >> 1))),
+ &bailout);
+ assembler->Goto(&divisor_is_not_minus_one);
+ }
+ assembler->Bind(&divisor_is_not_minus_one);
+
+ Node* untagged_result =
+ assembler->Int32Div(untagged_dividend, untagged_divisor);
+ Node* truncated = assembler->IntPtrMul(untagged_result, untagged_divisor);
+ // Do floating point division if the remainder is not 0.
+ assembler->GotoIf(assembler->Word32NotEqual(untagged_dividend, truncated),
+ &bailout);
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall));
+ var_result.Bind(assembler->SmiTag(untagged_result));
+ assembler->Goto(&end);
+
+ // Bailout: convert {dividend} and {divisor} to double and do double
+ // division.
+ assembler->Bind(&bailout);
+ {
+ var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
+ var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
+ assembler->Goto(&do_fdiv);
+ }
+ }
+
+ assembler->Bind(&divisor_is_not_smi);
+ {
+ Node* divisor_map = assembler->LoadMap(divisor);
+
+ // Check if {divisor} is a HeapNumber.
+ assembler->GotoUnless(assembler->WordEqual(divisor_map, number_map),
+ &call_divide_stub);
+
+ // Convert {dividend} to a double and divide it with the value of
+ // {divisor}.
+ var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
+ var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
+ assembler->Goto(&do_fdiv);
+ }
+
+ assembler->Bind(&dividend_is_not_smi);
+ {
+ Node* dividend_map = assembler->LoadMap(dividend);
+
+ // Check if {dividend} is a HeapNumber.
+ assembler->GotoUnless(assembler->WordEqual(dividend_map, number_map),
+ &call_divide_stub);
+
+ // Check if {divisor} is a Smi.
+ Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
+ &divisor_is_not_smi);
+
+ assembler->Bind(&divisor_is_smi);
+ {
+ // Convert {divisor} to a double and use it for a floating point
+ // division.
+ var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
+ var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
+ assembler->Goto(&do_fdiv);
+ }
+
+ assembler->Bind(&divisor_is_not_smi);
+ {
+ Node* divisor_map = assembler->LoadMap(divisor);
+
+ // Check if {divisor} is a HeapNumber.
+ assembler->GotoUnless(assembler->WordEqual(divisor_map, number_map),
+ &call_divide_stub);
+
+ // Both {dividend} and {divisor} are HeapNumbers. Load their values
+ // and divide them.
+ var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
+ var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
+ assembler->Goto(&do_fdiv);
+ }
+ }
+ }
+
+ assembler->Bind(&do_fdiv);
+ {
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kNumber));
+ Node* value = assembler->Float64Div(var_dividend_float64.value(),
+ var_divisor_float64.value());
+ var_result.Bind(assembler->ChangeFloat64ToTagged(value));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&call_divide_stub);
+ {
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kAny));
+ Callable callable = CodeFactory::Divide(assembler->isolate());
+ var_result.Bind(assembler->CallStub(callable, context, dividend, divisor));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&end);
+ assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
+ slot_id);
+ return var_result.value();
+}
+
+// static
+compiler::Node* ModulusStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* left,
+ compiler::Node* right,
+ compiler::Node* context) {
+ using compiler::Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Variable var_result(assembler, MachineRepresentation::kTagged);
+ Label return_result(assembler, &var_result);
+
+ // Shared entry point for floating point modulus.
+ Label do_fmod(assembler);
+ Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
+ var_divisor_float64(assembler, MachineRepresentation::kFloat64);
+
+ Node* number_map = assembler->HeapNumberMapConstant();
+
+ // We might need to loop one or two times due to ToNumber conversions.
+ Variable var_dividend(assembler, MachineRepresentation::kTagged),
+ var_divisor(assembler, MachineRepresentation::kTagged);
+ Variable* loop_variables[] = {&var_dividend, &var_divisor};
+ Label loop(assembler, 2, loop_variables);
+ var_dividend.Bind(left);
+ var_divisor.Bind(right);
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ Node* dividend = var_dividend.value();
+ Node* divisor = var_divisor.value();
+
+ Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(dividend), &dividend_is_smi,
+ &dividend_is_not_smi);
+
+ assembler->Bind(&dividend_is_smi);
+ {
+ Label dividend_is_not_zero(assembler);
+ Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
+ &divisor_is_not_smi);
+
+ assembler->Bind(&divisor_is_smi);
+ {
+ // Compute the modulus of two Smis.
+ var_result.Bind(assembler->SmiMod(dividend, divisor));
+ assembler->Goto(&return_result);
+ }
+
+ assembler->Bind(&divisor_is_not_smi);
+ {
+ Node* divisor_map = assembler->LoadMap(divisor);
+
+ // Check if {divisor} is a HeapNumber.
+ Label divisor_is_number(assembler),
+ divisor_is_not_number(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(divisor_map, number_map),
+ &divisor_is_number, &divisor_is_not_number);
+
+ assembler->Bind(&divisor_is_number);
+ {
+ // Convert {dividend} to a double and compute its modulus with the
+ // value of {dividend}.
+ var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
+ var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
+ assembler->Goto(&do_fmod);
+ }
+
+ assembler->Bind(&divisor_is_not_number);
+ {
+ // Convert {divisor} to a number and loop.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_divisor.Bind(assembler->CallStub(callable, context, divisor));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&dividend_is_not_smi);
+ {
+ Node* dividend_map = assembler->LoadMap(dividend);
+
+ // Check if {dividend} is a HeapNumber.
+ Label dividend_is_number(assembler),
+ dividend_is_not_number(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(dividend_map, number_map),
+ &dividend_is_number, &dividend_is_not_number);
+
+ assembler->Bind(&dividend_is_number);
+ {
+ // Check if {divisor} is a Smi.
+ Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
+ &divisor_is_not_smi);
+
+ assembler->Bind(&divisor_is_smi);
+ {
+ // Convert {divisor} to a double and compute {dividend}'s modulus with
+ // it.
+ var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
+ var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
+ assembler->Goto(&do_fmod);
+ }
+
+ assembler->Bind(&divisor_is_not_smi);
+ {
+ Node* divisor_map = assembler->LoadMap(divisor);
+
+ // Check if {divisor} is a HeapNumber.
+ Label divisor_is_number(assembler),
+ divisor_is_not_number(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(divisor_map, number_map),
+ &divisor_is_number, &divisor_is_not_number);
+
+ assembler->Bind(&divisor_is_number);
+ {
+ // Both {dividend} and {divisor} are HeapNumbers. Load their values
+ // and compute their modulus.
+ var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
+ var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
+ assembler->Goto(&do_fmod);
+ }
+
+ assembler->Bind(&divisor_is_not_number);
+ {
+ // Convert {divisor} to a number and loop.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_divisor.Bind(assembler->CallStub(callable, context, divisor));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&dividend_is_not_number);
+ {
+ // Convert {dividend} to a Number and loop.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_dividend.Bind(assembler->CallStub(callable, context, dividend));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&do_fmod);
+ {
+ Node* value = assembler->Float64Mod(var_dividend_float64.value(),
+ var_divisor_float64.value());
+ var_result.Bind(assembler->ChangeFloat64ToTagged(value));
+ assembler->Goto(&return_result);
+ }
+
+ assembler->Bind(&return_result);
+ return var_result.value();
+}
+
+// static
+compiler::Node* ModulusWithFeedbackStub::Generate(
+ CodeStubAssembler* assembler, compiler::Node* dividend,
+ compiler::Node* divisor, compiler::Node* slot_id,
+ compiler::Node* type_feedback_vector, compiler::Node* context) {
+ using compiler::Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ // Shared entry point for floating point division.
+ Label do_fmod(assembler), end(assembler), call_modulus_stub(assembler);
+ Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
+ var_divisor_float64(assembler, MachineRepresentation::kFloat64),
+ var_result(assembler, MachineRepresentation::kTagged),
+ var_type_feedback(assembler, MachineRepresentation::kWord32);
+
+ Node* number_map = assembler->HeapNumberMapConstant();
+
+ Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(dividend), &dividend_is_smi,
+ &dividend_is_not_smi);
+
+ assembler->Bind(&dividend_is_smi);
+ {
+ Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
+ &divisor_is_not_smi);
+
+ assembler->Bind(&divisor_is_smi);
+ {
+ var_result.Bind(assembler->SmiMod(dividend, divisor));
+ var_type_feedback.Bind(assembler->Select(
+ assembler->WordIsSmi(var_result.value()),
+ assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall),
+ assembler->Int32Constant(BinaryOperationFeedback::kNumber)));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&divisor_is_not_smi);
+ {
+ Node* divisor_map = assembler->LoadMap(divisor);
+
+ // Check if {divisor} is a HeapNumber.
+ assembler->GotoUnless(assembler->WordEqual(divisor_map, number_map),
+ &call_modulus_stub);
+
+ // Convert {dividend} to a double and divide it with the value of
+ // {divisor}.
+ var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
+ var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
+ assembler->Goto(&do_fmod);
+ }
+ }
+
+ assembler->Bind(&dividend_is_not_smi);
+ {
+ Node* dividend_map = assembler->LoadMap(dividend);
+
+ // Check if {dividend} is a HeapNumber.
+ assembler->GotoUnless(assembler->WordEqual(dividend_map, number_map),
+ &call_modulus_stub);
+
+ // Check if {divisor} is a Smi.
+ Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
+ &divisor_is_not_smi);
+
+ assembler->Bind(&divisor_is_smi);
+ {
+ // Convert {divisor} to a double and use it for a floating point
+ // division.
+ var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
+ var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
+ assembler->Goto(&do_fmod);
+ }
+
+ assembler->Bind(&divisor_is_not_smi);
+ {
+ Node* divisor_map = assembler->LoadMap(divisor);
+
+ // Check if {divisor} is a HeapNumber.
+ assembler->GotoUnless(assembler->WordEqual(divisor_map, number_map),
+ &call_modulus_stub);
+
+ // Both {dividend} and {divisor} are HeapNumbers. Load their values
+ // and divide them.
+ var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
+ var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
+ assembler->Goto(&do_fmod);
+ }
+ }
+
+ assembler->Bind(&do_fmod);
+ {
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kNumber));
+ Node* value = assembler->Float64Mod(var_dividend_float64.value(),
+ var_divisor_float64.value());
+ var_result.Bind(assembler->ChangeFloat64ToTagged(value));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&call_modulus_stub);
+ {
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kAny));
+ Callable callable = CodeFactory::Modulus(assembler->isolate());
+ var_result.Bind(assembler->CallStub(callable, context, dividend, divisor));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&end);
+ assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
+ slot_id);
+ return var_result.value();
+}
+// static
+compiler::Node* ShiftLeftStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* left,
+ compiler::Node* right,
+ compiler::Node* context) {
+ using compiler::Node;
+
+ Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
+ Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
+ Node* shift_count =
+ assembler->Word32And(rhs_value, assembler->Int32Constant(0x1f));
+ Node* value = assembler->Word32Shl(lhs_value, shift_count);
+ Node* result = assembler->ChangeInt32ToTagged(value);
+ return result;
+}
+
+// static
+compiler::Node* ShiftRightStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* left,
+ compiler::Node* right,
+ compiler::Node* context) {
+ using compiler::Node;
+
+ Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
+ Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
+ Node* shift_count =
+ assembler->Word32And(rhs_value, assembler->Int32Constant(0x1f));
+ Node* value = assembler->Word32Sar(lhs_value, shift_count);
+ Node* result = assembler->ChangeInt32ToTagged(value);
+ return result;
+}
+
+// static
+compiler::Node* ShiftRightLogicalStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* left,
+ compiler::Node* right,
+ compiler::Node* context) {
+ using compiler::Node;
+
+ Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
+ Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
+ Node* shift_count =
+ assembler->Word32And(rhs_value, assembler->Int32Constant(0x1f));
+ Node* value = assembler->Word32Shr(lhs_value, shift_count);
+ Node* result = assembler->ChangeUint32ToTagged(value);
+ return result;
+}
+
+// static
+compiler::Node* BitwiseAndStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* left,
+ compiler::Node* right,
+ compiler::Node* context) {
+ using compiler::Node;
+
+ Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
+ Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
Node* value = assembler->Word32And(lhs_value, rhs_value);
Node* result = assembler->ChangeInt32ToTagged(value);
- assembler->Return(result);
+ return result;
}
-void BitwiseOrStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
+// static
+compiler::Node* BitwiseOrStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* left,
+ compiler::Node* right,
+ compiler::Node* context) {
using compiler::Node;
- Node* lhs = assembler->Parameter(0);
- Node* rhs = assembler->Parameter(1);
- Node* context = assembler->Parameter(2);
- Node* lhs_value = assembler->TruncateTaggedToWord32(context, lhs);
- Node* rhs_value = assembler->TruncateTaggedToWord32(context, rhs);
+ Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
+ Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
Node* value = assembler->Word32Or(lhs_value, rhs_value);
Node* result = assembler->ChangeInt32ToTagged(value);
- assembler->Return(result);
+ return result;
}
-void BitwiseXorStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
+// static
+compiler::Node* BitwiseXorStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* left,
+ compiler::Node* right,
+ compiler::Node* context) {
using compiler::Node;
- Node* lhs = assembler->Parameter(0);
- Node* rhs = assembler->Parameter(1);
- Node* context = assembler->Parameter(2);
- Node* lhs_value = assembler->TruncateTaggedToWord32(context, lhs);
- Node* rhs_value = assembler->TruncateTaggedToWord32(context, rhs);
+ Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
+ Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
Node* value = assembler->Word32Xor(lhs_value, rhs_value);
Node* result = assembler->ChangeInt32ToTagged(value);
- assembler->Return(result);
+ return result;
+}
+
+// static
+compiler::Node* IncStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* value,
+ compiler::Node* context,
+ compiler::Node* type_feedback_vector,
+ compiler::Node* slot_id) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+
+ // Shared entry for floating point increment.
+ Label do_finc(assembler), end(assembler);
+ Variable var_finc_value(assembler, MachineRepresentation::kFloat64);
+
+ // We might need to try again due to ToNumber conversion.
+ Variable value_var(assembler, MachineRepresentation::kTagged);
+ Variable result_var(assembler, MachineRepresentation::kTagged);
+ Variable var_type_feedback(assembler, MachineRepresentation::kWord32);
+ Variable* loop_vars[] = {&value_var, &var_type_feedback};
+ Label start(assembler, 2, loop_vars);
+ value_var.Bind(value);
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kNone));
+ assembler->Goto(&start);
+ assembler->Bind(&start);
+ {
+ value = value_var.value();
+
+ Label if_issmi(assembler), if_isnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(value), &if_issmi, &if_isnotsmi);
+
+ assembler->Bind(&if_issmi);
+ {
+ // Try fast Smi addition first.
+ Node* one = assembler->SmiConstant(Smi::FromInt(1));
+ Node* pair = assembler->SmiAddWithOverflow(value, one);
+ Node* overflow = assembler->Projection(1, pair);
+
+ // Check if the Smi addition overflowed.
+ Label if_overflow(assembler), if_notoverflow(assembler);
+ assembler->Branch(overflow, &if_overflow, &if_notoverflow);
+
+ assembler->Bind(&if_notoverflow);
+ var_type_feedback.Bind(assembler->Word32Or(
+ var_type_feedback.value(),
+ assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall)));
+ result_var.Bind(assembler->Projection(0, pair));
+ assembler->Goto(&end);
+
+ assembler->Bind(&if_overflow);
+ {
+ var_finc_value.Bind(assembler->SmiToFloat64(value));
+ assembler->Goto(&do_finc);
+ }
+ }
+
+ assembler->Bind(&if_isnotsmi);
+ {
+ // Check if the value is a HeapNumber.
+ Label if_valueisnumber(assembler),
+ if_valuenotnumber(assembler, Label::kDeferred);
+ Node* value_map = assembler->LoadMap(value);
+ Node* number_map = assembler->HeapNumberMapConstant();
+ assembler->Branch(assembler->WordEqual(value_map, number_map),
+ &if_valueisnumber, &if_valuenotnumber);
+
+ assembler->Bind(&if_valueisnumber);
+ {
+ // Load the HeapNumber value.
+ var_finc_value.Bind(assembler->LoadHeapNumberValue(value));
+ assembler->Goto(&do_finc);
+ }
+
+ assembler->Bind(&if_valuenotnumber);
+ {
+ // Convert to a Number first and try again.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kAny));
+ value_var.Bind(assembler->CallStub(callable, context, value));
+ assembler->Goto(&start);
+ }
+ }
+ }
+
+ assembler->Bind(&do_finc);
+ {
+ Node* finc_value = var_finc_value.value();
+ Node* one = assembler->Float64Constant(1.0);
+ Node* finc_result = assembler->Float64Add(finc_value, one);
+ var_type_feedback.Bind(assembler->Word32Or(
+ var_type_feedback.value(),
+ assembler->Int32Constant(BinaryOperationFeedback::kNumber)));
+ result_var.Bind(assembler->ChangeFloat64ToTagged(finc_result));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&end);
+ assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
+ slot_id);
+ return result_var.value();
+}
+
+// static
+compiler::Node* DecStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* value,
+ compiler::Node* context,
+ compiler::Node* type_feedback_vector,
+ compiler::Node* slot_id) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+
+ // Shared entry for floating point decrement.
+ Label do_fdec(assembler), end(assembler);
+ Variable var_fdec_value(assembler, MachineRepresentation::kFloat64);
+
+ // We might need to try again due to ToNumber conversion.
+ Variable value_var(assembler, MachineRepresentation::kTagged);
+ Variable result_var(assembler, MachineRepresentation::kTagged);
+ Variable var_type_feedback(assembler, MachineRepresentation::kWord32);
+ Variable* loop_vars[] = {&value_var, &var_type_feedback};
+ Label start(assembler, 2, loop_vars);
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kNone));
+ value_var.Bind(value);
+ assembler->Goto(&start);
+ assembler->Bind(&start);
+ {
+ value = value_var.value();
+
+ Label if_issmi(assembler), if_isnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(value), &if_issmi, &if_isnotsmi);
+
+ assembler->Bind(&if_issmi);
+ {
+ // Try fast Smi subtraction first.
+ Node* one = assembler->SmiConstant(Smi::FromInt(1));
+ Node* pair = assembler->SmiSubWithOverflow(value, one);
+ Node* overflow = assembler->Projection(1, pair);
+
+ // Check if the Smi subtraction overflowed.
+ Label if_overflow(assembler), if_notoverflow(assembler);
+ assembler->Branch(overflow, &if_overflow, &if_notoverflow);
+
+ assembler->Bind(&if_notoverflow);
+ var_type_feedback.Bind(assembler->Word32Or(
+ var_type_feedback.value(),
+ assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall)));
+ result_var.Bind(assembler->Projection(0, pair));
+ assembler->Goto(&end);
+
+ assembler->Bind(&if_overflow);
+ {
+ var_fdec_value.Bind(assembler->SmiToFloat64(value));
+ assembler->Goto(&do_fdec);
+ }
+ }
+
+ assembler->Bind(&if_isnotsmi);
+ {
+ // Check if the value is a HeapNumber.
+ Label if_valueisnumber(assembler),
+ if_valuenotnumber(assembler, Label::kDeferred);
+ Node* value_map = assembler->LoadMap(value);
+ Node* number_map = assembler->HeapNumberMapConstant();
+ assembler->Branch(assembler->WordEqual(value_map, number_map),
+ &if_valueisnumber, &if_valuenotnumber);
+
+ assembler->Bind(&if_valueisnumber);
+ {
+ // Load the HeapNumber value.
+ var_fdec_value.Bind(assembler->LoadHeapNumberValue(value));
+ assembler->Goto(&do_fdec);
+ }
+
+ assembler->Bind(&if_valuenotnumber);
+ {
+ // Convert to a Number first and try again.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kAny));
+ value_var.Bind(assembler->CallStub(callable, context, value));
+ assembler->Goto(&start);
+ }
+ }
+ }
+
+ assembler->Bind(&do_fdec);
+ {
+ Node* fdec_value = var_fdec_value.value();
+ Node* one = assembler->Float64Constant(1.0);
+ Node* fdec_result = assembler->Float64Sub(fdec_value, one);
+ var_type_feedback.Bind(assembler->Word32Or(
+ var_type_feedback.value(),
+ assembler->Int32Constant(BinaryOperationFeedback::kNumber)));
+ result_var.Bind(assembler->ChangeFloat64ToTagged(fdec_result));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&end);
+ assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
+ slot_id);
+ return result_var.value();
+}
+
+// ES6 section 7.1.13 ToObject (argument)
+void ToObjectStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Label if_number(assembler, Label::kDeferred), if_notsmi(assembler),
+ if_jsreceiver(assembler), if_noconstructor(assembler, Label::kDeferred),
+ if_wrapjsvalue(assembler);
+
+ Node* object = assembler->Parameter(Descriptor::kArgument);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ Variable constructor_function_index_var(assembler,
+ MachineRepresentation::kWord32);
+
+ assembler->Branch(assembler->WordIsSmi(object), &if_number, &if_notsmi);
+
+ assembler->Bind(&if_notsmi);
+ Node* map = assembler->LoadMap(object);
+
+ assembler->GotoIf(
+ assembler->WordEqual(map, assembler->HeapNumberMapConstant()),
+ &if_number);
+
+ Node* instance_type = assembler->LoadMapInstanceType(map);
+ assembler->GotoIf(
+ assembler->Int32GreaterThanOrEqual(
+ instance_type, assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE)),
+ &if_jsreceiver);
+
+ Node* constructor_function_index = assembler->LoadObjectField(
+ map, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset,
+ MachineType::Uint8());
+ assembler->GotoIf(
+ assembler->Word32Equal(
+ constructor_function_index,
+ assembler->Int32Constant(Map::kNoConstructorFunctionIndex)),
+ &if_noconstructor);
+ constructor_function_index_var.Bind(constructor_function_index);
+ assembler->Goto(&if_wrapjsvalue);
+
+ assembler->Bind(&if_number);
+ constructor_function_index_var.Bind(
+ assembler->Int32Constant(Context::NUMBER_FUNCTION_INDEX));
+ assembler->Goto(&if_wrapjsvalue);
+
+ assembler->Bind(&if_wrapjsvalue);
+ Node* native_context = assembler->LoadNativeContext(context);
+ Node* constructor = assembler->LoadFixedArrayElement(
+ native_context, constructor_function_index_var.value());
+ Node* initial_map = assembler->LoadObjectField(
+ constructor, JSFunction::kPrototypeOrInitialMapOffset);
+ Node* js_value = assembler->Allocate(JSValue::kSize);
+ assembler->StoreMapNoWriteBarrier(js_value, initial_map);
+ assembler->StoreObjectFieldRoot(js_value, JSValue::kPropertiesOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ assembler->StoreObjectFieldRoot(js_value, JSObject::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ assembler->StoreObjectField(js_value, JSValue::kValueOffset, object);
+ assembler->Return(js_value);
+
+ assembler->Bind(&if_noconstructor);
+ assembler->TailCallRuntime(
+ Runtime::kThrowUndefinedOrNullToObject, context,
+ assembler->HeapConstant(assembler->factory()->NewStringFromAsciiChecked(
+ "ToObject", TENURED)));
+
+ assembler->Bind(&if_jsreceiver);
+ assembler->Return(object);
+}
+
+// static
+// ES6 section 12.5.5 typeof operator
+compiler::Node* TypeofStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* value,
+ compiler::Node* context) {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Variable result_var(assembler, MachineRepresentation::kTagged);
+
+ Label return_number(assembler, Label::kDeferred), if_oddball(assembler),
+ return_function(assembler), return_undefined(assembler),
+ return_object(assembler), return_string(assembler),
+ return_result(assembler);
+
+ assembler->GotoIf(assembler->WordIsSmi(value), &return_number);
+
+ Node* map = assembler->LoadMap(value);
+
+ assembler->GotoIf(
+ assembler->WordEqual(map, assembler->HeapNumberMapConstant()),
+ &return_number);
+
+ Node* instance_type = assembler->LoadMapInstanceType(map);
+
+ assembler->GotoIf(assembler->Word32Equal(
+ instance_type, assembler->Int32Constant(ODDBALL_TYPE)),
+ &if_oddball);
+
+ Node* callable_or_undetectable_mask =
+ assembler->Word32And(assembler->LoadMapBitField(map),
+ assembler->Int32Constant(1 << Map::kIsCallable |
+ 1 << Map::kIsUndetectable));
+
+ assembler->GotoIf(
+ assembler->Word32Equal(callable_or_undetectable_mask,
+ assembler->Int32Constant(1 << Map::kIsCallable)),
+ &return_function);
+
+ assembler->GotoUnless(assembler->Word32Equal(callable_or_undetectable_mask,
+ assembler->Int32Constant(0)),
+ &return_undefined);
+
+ assembler->GotoIf(
+ assembler->Int32GreaterThanOrEqual(
+ instance_type, assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE)),
+ &return_object);
+
+ assembler->GotoIf(
+ assembler->Int32LessThan(instance_type,
+ assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+ &return_string);
+
+#define SIMD128_BRANCH(TYPE, Type, type, lane_count, lane_type) \
+ Label return_##type(assembler); \
+ Node* type##_map = \
+ assembler->HeapConstant(assembler->factory()->type##_map()); \
+ assembler->GotoIf(assembler->WordEqual(map, type##_map), &return_##type);
+ SIMD128_TYPES(SIMD128_BRANCH)
+#undef SIMD128_BRANCH
+
+ assembler->Assert(assembler->Word32Equal(
+ instance_type, assembler->Int32Constant(SYMBOL_TYPE)));
+ result_var.Bind(assembler->HeapConstant(
+ assembler->isolate()->factory()->symbol_string()));
+ assembler->Goto(&return_result);
+
+ assembler->Bind(&return_number);
+ {
+ result_var.Bind(assembler->HeapConstant(
+ assembler->isolate()->factory()->number_string()));
+ assembler->Goto(&return_result);
+ }
+
+ assembler->Bind(&if_oddball);
+ {
+ Node* type = assembler->LoadObjectField(value, Oddball::kTypeOfOffset);
+ result_var.Bind(type);
+ assembler->Goto(&return_result);
+ }
+
+ assembler->Bind(&return_function);
+ {
+ result_var.Bind(assembler->HeapConstant(
+ assembler->isolate()->factory()->function_string()));
+ assembler->Goto(&return_result);
+ }
+
+ assembler->Bind(&return_undefined);
+ {
+ result_var.Bind(assembler->HeapConstant(
+ assembler->isolate()->factory()->undefined_string()));
+ assembler->Goto(&return_result);
+ }
+
+ assembler->Bind(&return_object);
+ {
+ result_var.Bind(assembler->HeapConstant(
+ assembler->isolate()->factory()->object_string()));
+ assembler->Goto(&return_result);
+ }
+
+ assembler->Bind(&return_string);
+ {
+ result_var.Bind(assembler->HeapConstant(
+ assembler->isolate()->factory()->string_string()));
+ assembler->Goto(&return_result);
+ }
+
+#define SIMD128_BIND_RETURN(TYPE, Type, type, lane_count, lane_type) \
+ assembler->Bind(&return_##type); \
+ { \
+ result_var.Bind(assembler->HeapConstant( \
+ assembler->isolate()->factory()->type##_string())); \
+ assembler->Goto(&return_result); \
+ }
+ SIMD128_TYPES(SIMD128_BIND_RETURN)
+#undef SIMD128_BIND_RETURN
+
+ assembler->Bind(&return_result);
+ return result_var.value();
+}
+
+// static
+compiler::Node* InstanceOfStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* object,
+ compiler::Node* callable,
+ compiler::Node* context) {
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Label return_runtime(assembler, Label::kDeferred), end(assembler);
+ Variable result(assembler, MachineRepresentation::kTagged);
+
+ // Check if no one installed @@hasInstance somewhere.
+ assembler->GotoUnless(
+ assembler->WordEqual(
+ assembler->LoadObjectField(
+ assembler->LoadRoot(Heap::kHasInstanceProtectorRootIndex),
+ PropertyCell::kValueOffset),
+ assembler->SmiConstant(Smi::FromInt(Isolate::kArrayProtectorValid))),
+ &return_runtime);
+
+ // Check if {callable} is a valid receiver.
+ assembler->GotoIf(assembler->WordIsSmi(callable), &return_runtime);
+ assembler->GotoIf(
+ assembler->Word32Equal(
+ assembler->Word32And(
+ assembler->LoadMapBitField(assembler->LoadMap(callable)),
+ assembler->Int32Constant(1 << Map::kIsCallable)),
+ assembler->Int32Constant(0)),
+ &return_runtime);
+
+ // Use the inline OrdinaryHasInstance directly.
+ result.Bind(assembler->OrdinaryHasInstance(context, callable, object));
+ assembler->Goto(&end);
+
+ // TODO(bmeurer): Use GetPropertyStub here once available.
+ assembler->Bind(&return_runtime);
+ {
+ result.Bind(assembler->CallRuntime(Runtime::kInstanceOf, context, object,
+ callable));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&end);
+ return result.value();
}
namespace {
@@ -1082,15 +2838,15 @@ enum RelationalComparisonMode {
kGreaterThanOrEqual
};
-void GenerateAbstractRelationalComparison(
- compiler::CodeStubAssembler* assembler, RelationalComparisonMode mode) {
- typedef compiler::CodeStubAssembler::Label Label;
+compiler::Node* GenerateAbstractRelationalComparison(
+ CodeStubAssembler* assembler, RelationalComparisonMode mode,
+ compiler::Node* lhs, compiler::Node* rhs, compiler::Node* context) {
+ typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
- typedef compiler::CodeStubAssembler::Variable Variable;
-
- Node* context = assembler->Parameter(2);
+ typedef CodeStubAssembler::Variable Variable;
- Label return_true(assembler), return_false(assembler);
+ Label return_true(assembler), return_false(assembler), end(assembler);
+ Variable result(assembler, MachineRepresentation::kTagged);
// Shared entry for floating point comparison.
Label do_fcmp(assembler);
@@ -1103,14 +2859,14 @@ void GenerateAbstractRelationalComparison(
var_rhs(assembler, MachineRepresentation::kTagged);
Variable* loop_vars[2] = {&var_lhs, &var_rhs};
Label loop(assembler, 2, loop_vars);
- var_lhs.Bind(assembler->Parameter(0));
- var_rhs.Bind(assembler->Parameter(1));
+ var_lhs.Bind(lhs);
+ var_rhs.Bind(rhs);
assembler->Goto(&loop);
assembler->Bind(&loop);
{
// Load the current {lhs} and {rhs} values.
- Node* lhs = var_lhs.value();
- Node* rhs = var_rhs.value();
+ lhs = var_lhs.value();
+ rhs = var_rhs.value();
// Check if the {lhs} is a Smi or a HeapObject.
Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
@@ -1283,7 +3039,7 @@ void GenerateAbstractRelationalComparison(
Node* rhs_instance_type = assembler->LoadMapInstanceType(rhs_map);
// Check if {rhs} is also a String.
- Label if_rhsisstring(assembler),
+ Label if_rhsisstring(assembler, Label::kDeferred),
if_rhsisnotstring(assembler, Label::kDeferred);
assembler->Branch(assembler->Int32LessThan(
rhs_instance_type, assembler->Int32Constant(
@@ -1295,24 +3051,29 @@ void GenerateAbstractRelationalComparison(
// Both {lhs} and {rhs} are strings.
switch (mode) {
case kLessThan:
- assembler->TailCallStub(
+ result.Bind(assembler->CallStub(
CodeFactory::StringLessThan(assembler->isolate()),
- context, lhs, rhs);
+ context, lhs, rhs));
+ assembler->Goto(&end);
break;
case kLessThanOrEqual:
- assembler->TailCallStub(
+ result.Bind(assembler->CallStub(
CodeFactory::StringLessThanOrEqual(assembler->isolate()),
- context, lhs, rhs);
+ context, lhs, rhs));
+ assembler->Goto(&end);
break;
case kGreaterThan:
- assembler->TailCallStub(
+ result.Bind(assembler->CallStub(
CodeFactory::StringGreaterThan(assembler->isolate()),
- context, lhs, rhs);
+ context, lhs, rhs));
+ assembler->Goto(&end);
break;
case kGreaterThanOrEqual:
- assembler->TailCallStub(CodeFactory::StringGreaterThanOrEqual(
+ result.Bind(
+ assembler->CallStub(CodeFactory::StringGreaterThanOrEqual(
assembler->isolate()),
- context, lhs, rhs);
+ context, lhs, rhs));
+ assembler->Goto(&end);
break;
}
}
@@ -1335,9 +3096,9 @@ void GenerateAbstractRelationalComparison(
assembler->Bind(&if_rhsisreceiver);
{
// Convert {rhs} to a primitive first passing Number hint.
- // TODO(bmeurer): Hook up ToPrimitiveStub here, once it's there.
- var_rhs.Bind(assembler->CallRuntime(
- Runtime::kToPrimitive_Number, context, rhs));
+ Callable callable = CodeFactory::NonPrimitiveToPrimitive(
+ assembler->isolate(), ToPrimitiveHint::kNumber);
+ var_rhs.Bind(assembler->CallStub(callable, context, rhs));
assembler->Goto(&loop);
}
@@ -1369,9 +3130,9 @@ void GenerateAbstractRelationalComparison(
assembler->Bind(&if_lhsisreceiver);
{
// Convert {lhs} to a primitive first passing Number hint.
- // TODO(bmeurer): Hook up ToPrimitiveStub here, once it's there.
- var_lhs.Bind(assembler->CallRuntime(Runtime::kToPrimitive_Number,
- context, lhs));
+ Callable callable = CodeFactory::NonPrimitiveToPrimitive(
+ assembler->isolate(), ToPrimitiveHint::kNumber);
+ var_lhs.Bind(assembler->CallStub(callable, context, lhs));
assembler->Goto(&loop);
}
@@ -1417,25 +3178,33 @@ void GenerateAbstractRelationalComparison(
}
assembler->Bind(&return_true);
- assembler->Return(assembler->BooleanConstant(true));
+ {
+ result.Bind(assembler->BooleanConstant(true));
+ assembler->Goto(&end);
+ }
assembler->Bind(&return_false);
- assembler->Return(assembler->BooleanConstant(false));
+ {
+ result.Bind(assembler->BooleanConstant(false));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&end);
+ return result.value();
}
enum ResultMode { kDontNegateResult, kNegateResult };
-void GenerateEqual_Same(compiler::CodeStubAssembler* assembler,
- compiler::Node* value,
- compiler::CodeStubAssembler::Label* if_equal,
- compiler::CodeStubAssembler::Label* if_notequal) {
+void GenerateEqual_Same(CodeStubAssembler* assembler, compiler::Node* value,
+ CodeStubAssembler::Label* if_equal,
+ CodeStubAssembler::Label* if_notequal) {
// In case of abstract or strict equality checks, we need additional checks
// for NaN values because they are not considered equal, even if both the
// left and the right hand side reference exactly the same value.
// TODO(bmeurer): This seems to violate the SIMD.js specification, but it
// seems to be what is tested in the current SIMD.js testsuite.
- typedef compiler::CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
// Check if {value} is a Smi or a HeapObject.
@@ -1472,97 +3241,28 @@ void GenerateEqual_Same(compiler::CodeStubAssembler* assembler,
}
void GenerateEqual_Simd128Value_HeapObject(
- compiler::CodeStubAssembler* assembler, compiler::Node* lhs,
- compiler::Node* lhs_map, compiler::Node* rhs, compiler::Node* rhs_map,
- compiler::CodeStubAssembler::Label* if_equal,
- compiler::CodeStubAssembler::Label* if_notequal) {
- typedef compiler::CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
-
- // Check if {lhs} and {rhs} have the same map.
- Label if_mapsame(assembler), if_mapnotsame(assembler);
- assembler->Branch(assembler->WordEqual(lhs_map, rhs_map), &if_mapsame,
- &if_mapnotsame);
-
- assembler->Bind(&if_mapsame);
- {
- // Both {lhs} and {rhs} are Simd128Values with the same map, need special
- // handling for Float32x4 because of NaN comparisons.
- Label if_float32x4(assembler), if_notfloat32x4(assembler);
- Node* float32x4_map =
- assembler->HeapConstant(assembler->factory()->float32x4_map());
- assembler->Branch(assembler->WordEqual(lhs_map, float32x4_map),
- &if_float32x4, &if_notfloat32x4);
-
- assembler->Bind(&if_float32x4);
- {
- // Both {lhs} and {rhs} are Float32x4, compare the lanes individually
- // using a floating point comparison.
- for (int offset = Float32x4::kValueOffset - kHeapObjectTag;
- offset < Float32x4::kSize - kHeapObjectTag;
- offset += sizeof(float)) {
- // Load the floating point values for {lhs} and {rhs}.
- Node* lhs_value = assembler->Load(MachineType::Float32(), lhs,
- assembler->IntPtrConstant(offset));
- Node* rhs_value = assembler->Load(MachineType::Float32(), rhs,
- assembler->IntPtrConstant(offset));
-
- // Perform a floating point comparison.
- Label if_valueequal(assembler), if_valuenotequal(assembler);
- assembler->Branch(assembler->Float32Equal(lhs_value, rhs_value),
- &if_valueequal, &if_valuenotequal);
- assembler->Bind(&if_valuenotequal);
- assembler->Goto(if_notequal);
- assembler->Bind(&if_valueequal);
- }
-
- // All 4 lanes match, {lhs} and {rhs} considered equal.
- assembler->Goto(if_equal);
- }
-
- assembler->Bind(&if_notfloat32x4);
- {
- // For other Simd128Values we just perform a bitwise comparison.
- for (int offset = Simd128Value::kValueOffset - kHeapObjectTag;
- offset < Simd128Value::kSize - kHeapObjectTag;
- offset += kPointerSize) {
- // Load the word values for {lhs} and {rhs}.
- Node* lhs_value = assembler->Load(MachineType::Pointer(), lhs,
- assembler->IntPtrConstant(offset));
- Node* rhs_value = assembler->Load(MachineType::Pointer(), rhs,
- assembler->IntPtrConstant(offset));
-
- // Perform a bitwise word-comparison.
- Label if_valueequal(assembler), if_valuenotequal(assembler);
- assembler->Branch(assembler->WordEqual(lhs_value, rhs_value),
- &if_valueequal, &if_valuenotequal);
- assembler->Bind(&if_valuenotequal);
- assembler->Goto(if_notequal);
- assembler->Bind(&if_valueequal);
- }
-
- // Bitwise comparison succeeded, {lhs} and {rhs} considered equal.
- assembler->Goto(if_equal);
- }
- }
-
- assembler->Bind(&if_mapnotsame);
- assembler->Goto(if_notequal);
+ CodeStubAssembler* assembler, compiler::Node* lhs, compiler::Node* lhs_map,
+ compiler::Node* rhs, compiler::Node* rhs_map,
+ CodeStubAssembler::Label* if_equal, CodeStubAssembler::Label* if_notequal) {
+ assembler->BranchIfSimd128Equal(lhs, lhs_map, rhs, rhs_map, if_equal,
+ if_notequal);
}
// ES6 section 7.2.12 Abstract Equality Comparison
-void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
+compiler::Node* GenerateEqual(CodeStubAssembler* assembler, ResultMode mode,
+ compiler::Node* lhs, compiler::Node* rhs,
+ compiler::Node* context) {
// This is a slightly optimized version of Object::Equals represented as
// scheduled TurboFan graph utilizing the CodeStubAssembler. Whenever you
// change something functionality wise in here, remember to update the
// Object::Equals method as well.
- typedef compiler::CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
- typedef compiler::CodeStubAssembler::Variable Variable;
-
- Node* context = assembler->Parameter(2);
+ typedef CodeStubAssembler::Variable Variable;
- Label if_equal(assembler), if_notequal(assembler);
+ Label if_equal(assembler), if_notequal(assembler),
+ do_rhsstringtonumber(assembler, Label::kDeferred), end(assembler);
+ Variable result(assembler, MachineRepresentation::kTagged);
// Shared entry for floating point comparison.
Label do_fcmp(assembler);
@@ -1575,14 +3275,14 @@ void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
var_rhs(assembler, MachineRepresentation::kTagged);
Variable* loop_vars[2] = {&var_lhs, &var_rhs};
Label loop(assembler, 2, loop_vars);
- var_lhs.Bind(assembler->Parameter(0));
- var_rhs.Bind(assembler->Parameter(1));
+ var_lhs.Bind(lhs);
+ var_rhs.Bind(rhs);
assembler->Goto(&loop);
assembler->Bind(&loop);
{
// Load the current {lhs} and {rhs} values.
- Node* lhs = var_lhs.value();
- Node* rhs = var_rhs.value();
+ lhs = var_lhs.value();
+ rhs = var_rhs.value();
// Check if {lhs} and {rhs} refer to the same object.
Label if_same(assembler), if_notsame(assembler);
@@ -1610,6 +3310,8 @@ void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
&if_rhsisnotsmi);
assembler->Bind(&if_rhsissmi);
+ // We have already checked for {lhs} and {rhs} being the same value, so
+ // if both are Smis when we get here they must not be equal.
assembler->Goto(&if_notequal);
assembler->Bind(&if_rhsisnotsmi);
@@ -1619,8 +3321,7 @@ void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
// Check if {rhs} is a HeapNumber.
Node* number_map = assembler->HeapNumberMapConstant();
- Label if_rhsisnumber(assembler),
- if_rhsisnotnumber(assembler, Label::kDeferred);
+ Label if_rhsisnumber(assembler), if_rhsisnotnumber(assembler);
assembler->Branch(assembler->WordEqual(rhs_map, number_map),
&if_rhsisnumber, &if_rhsisnotnumber);
@@ -1640,7 +3341,7 @@ void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
// Check if the {rhs} is a String.
Label if_rhsisstring(assembler, Label::kDeferred),
- if_rhsisnotstring(assembler, Label::kDeferred);
+ if_rhsisnotstring(assembler);
assembler->Branch(assembler->Int32LessThan(
rhs_instance_type, assembler->Int32Constant(
FIRST_NONSTRING_TYPE)),
@@ -1648,19 +3349,17 @@ void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
assembler->Bind(&if_rhsisstring);
{
- // Convert the {rhs} to a Number.
- Callable callable =
- CodeFactory::StringToNumber(assembler->isolate());
- var_rhs.Bind(assembler->CallStub(callable, context, rhs));
- assembler->Goto(&loop);
+ // The {rhs} is a String and the {lhs} is a Smi; we need
+ // to convert the {rhs} to a Number and compare the output to
+ // the Number on the {lhs}.
+ assembler->Goto(&do_rhsstringtonumber);
}
assembler->Bind(&if_rhsisnotstring);
{
// Check if the {rhs} is a Boolean.
Node* boolean_map = assembler->BooleanMapConstant();
- Label if_rhsisboolean(assembler, Label::kDeferred),
- if_rhsisnotboolean(assembler, Label::kDeferred);
+ Label if_rhsisboolean(assembler), if_rhsisnotboolean(assembler);
assembler->Branch(assembler->WordEqual(rhs_map, boolean_map),
&if_rhsisboolean, &if_rhsisnotboolean);
@@ -1677,7 +3376,7 @@ void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
// Check if the {rhs} is a Receiver.
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
Label if_rhsisreceiver(assembler, Label::kDeferred),
- if_rhsisnotreceiver(assembler, Label::kDeferred);
+ if_rhsisnotreceiver(assembler);
assembler->Branch(
assembler->Int32LessThanOrEqual(
assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
@@ -1687,9 +3386,9 @@ void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
assembler->Bind(&if_rhsisreceiver);
{
// Convert {rhs} to a primitive first (passing no hint).
- // TODO(bmeurer): Hook up ToPrimitiveStub here once it exists.
- var_rhs.Bind(assembler->CallRuntime(Runtime::kToPrimitive,
- context, rhs));
+ Callable callable = CodeFactory::NonPrimitiveToPrimitive(
+ assembler->isolate());
+ var_rhs.Bind(assembler->CallStub(callable, context, rhs));
assembler->Goto(&loop);
}
@@ -1761,8 +3460,8 @@ void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
assembler->Bind(&if_lhsisstring);
{
// Check if {rhs} is also a String.
- Label if_rhsisstring(assembler),
- if_rhsisnotstring(assembler, Label::kDeferred);
+ Label if_rhsisstring(assembler, Label::kDeferred),
+ if_rhsisnotstring(assembler);
assembler->Branch(assembler->Int32LessThan(
rhs_instance_type, assembler->Int32Constant(
FIRST_NONSTRING_TYPE)),
@@ -1776,7 +3475,8 @@ void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
(mode == kDontNegateResult)
? CodeFactory::StringEqual(assembler->isolate())
: CodeFactory::StringNotEqual(assembler->isolate());
- assembler->TailCallStub(callable, context, lhs, rhs);
+ result.Bind(assembler->CallStub(callable, context, lhs, rhs));
+ assembler->Goto(&end);
}
assembler->Bind(&if_rhsisnotstring);
@@ -1794,8 +3494,7 @@ void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
assembler->Bind(&if_lhsisnumber);
{
// Check if {rhs} is also a HeapNumber.
- Label if_rhsisnumber(assembler),
- if_rhsisnotnumber(assembler, Label::kDeferred);
+ Label if_rhsisnumber(assembler), if_rhsisnotnumber(assembler);
assembler->Branch(
assembler->Word32Equal(lhs_instance_type, rhs_instance_type),
&if_rhsisnumber, &if_rhsisnotnumber);
@@ -1825,16 +3524,13 @@ void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
// The {rhs} is a String and the {lhs} is a HeapNumber; we need
// to convert the {rhs} to a Number and compare the output to
// the Number on the {lhs}.
- Callable callable =
- CodeFactory::StringToNumber(assembler->isolate());
- var_rhs.Bind(assembler->CallStub(callable, context, rhs));
- assembler->Goto(&loop);
+ assembler->Goto(&do_rhsstringtonumber);
}
assembler->Bind(&if_rhsisnotstring);
{
// Check if the {rhs} is a JSReceiver.
- Label if_rhsisreceiver(assembler, Label::kDeferred),
+ Label if_rhsisreceiver(assembler),
if_rhsisnotreceiver(assembler);
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
assembler->Branch(
@@ -1926,8 +3622,7 @@ void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
assembler->Bind(&if_lhsissymbol);
{
// Check if the {rhs} is a JSReceiver.
- Label if_rhsisreceiver(assembler, Label::kDeferred),
- if_rhsisnotreceiver(assembler);
+ Label if_rhsisreceiver(assembler), if_rhsisnotreceiver(assembler);
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
assembler->Branch(
assembler->Int32LessThanOrEqual(
@@ -1974,8 +3669,7 @@ void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
assembler->Bind(&if_rhsisnotsimd128value);
{
// Check if the {rhs} is a JSReceiver.
- Label if_rhsisreceiver(assembler, Label::kDeferred),
- if_rhsisnotreceiver(assembler);
+ Label if_rhsisreceiver(assembler), if_rhsisnotreceiver(assembler);
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
assembler->Branch(
assembler->Int32LessThanOrEqual(
@@ -2050,9 +3744,9 @@ void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
{
// The {rhs} is some Primitive different from Null and
// Undefined, need to convert {lhs} to Primitive first.
- // TODO(bmeurer): Hook up ToPrimitiveStub here once it exists.
- var_lhs.Bind(assembler->CallRuntime(Runtime::kToPrimitive,
- context, lhs));
+ Callable callable =
+ CodeFactory::NonPrimitiveToPrimitive(assembler->isolate());
+ var_lhs.Bind(assembler->CallStub(callable, context, lhs));
assembler->Goto(&loop);
}
}
@@ -2060,6 +3754,13 @@ void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
}
}
}
+
+ assembler->Bind(&do_rhsstringtonumber);
+ {
+ Callable callable = CodeFactory::StringToNumber(assembler->isolate());
+ var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+ assembler->Goto(&loop);
+ }
}
assembler->Bind(&do_fcmp);
@@ -2073,14 +3774,25 @@ void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
}
assembler->Bind(&if_equal);
- assembler->Return(assembler->BooleanConstant(mode == kDontNegateResult));
+ {
+ result.Bind(assembler->BooleanConstant(mode == kDontNegateResult));
+ assembler->Goto(&end);
+ }
assembler->Bind(&if_notequal);
- assembler->Return(assembler->BooleanConstant(mode == kNegateResult));
+ {
+ result.Bind(assembler->BooleanConstant(mode == kNegateResult));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&end);
+ return result.value();
}
-void GenerateStrictEqual(compiler::CodeStubAssembler* assembler,
- ResultMode mode) {
+compiler::Node* GenerateStrictEqual(CodeStubAssembler* assembler,
+ ResultMode mode, compiler::Node* lhs,
+ compiler::Node* rhs,
+ compiler::Node* context) {
// Here's pseudo-code for the algorithm below in case of kDontNegateResult
// mode; for kNegateResult mode we properly negate the result.
//
@@ -2129,14 +3841,12 @@ void GenerateStrictEqual(compiler::CodeStubAssembler* assembler,
// }
// }
- typedef compiler::CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
typedef compiler::Node Node;
- Node* lhs = assembler->Parameter(0);
- Node* rhs = assembler->Parameter(1);
- Node* context = assembler->Parameter(2);
-
- Label if_equal(assembler), if_notequal(assembler);
+ Label if_equal(assembler), if_notequal(assembler), end(assembler);
+ Variable result(assembler, MachineRepresentation::kTagged);
// Check if {lhs} and {rhs} refer to the same object.
Label if_same(assembler), if_notsame(assembler);
@@ -2241,7 +3951,8 @@ void GenerateStrictEqual(compiler::CodeStubAssembler* assembler,
Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
// Check if {rhs} is also a String.
- Label if_rhsisstring(assembler), if_rhsisnotstring(assembler);
+ Label if_rhsisstring(assembler, Label::kDeferred),
+ if_rhsisnotstring(assembler);
assembler->Branch(assembler->Int32LessThan(
rhs_instance_type, assembler->Int32Constant(
FIRST_NONSTRING_TYPE)),
@@ -2253,7 +3964,8 @@ void GenerateStrictEqual(compiler::CodeStubAssembler* assembler,
(mode == kDontNegateResult)
? CodeFactory::StringEqual(assembler->isolate())
: CodeFactory::StringNotEqual(assembler->isolate());
- assembler->TailCallStub(callable, context, lhs, rhs);
+ result.Bind(assembler->CallStub(callable, context, lhs, rhs));
+ assembler->Goto(&end);
}
assembler->Bind(&if_rhsisnotstring);
@@ -2330,17 +4042,26 @@ void GenerateStrictEqual(compiler::CodeStubAssembler* assembler,
}
assembler->Bind(&if_equal);
- assembler->Return(assembler->BooleanConstant(mode == kDontNegateResult));
+ {
+ result.Bind(assembler->BooleanConstant(mode == kDontNegateResult));
+ assembler->Goto(&end);
+ }
assembler->Bind(&if_notequal);
- assembler->Return(assembler->BooleanConstant(mode == kNegateResult));
+ {
+ result.Bind(assembler->BooleanConstant(mode == kNegateResult));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&end);
+ return result.value();
}
-void GenerateStringRelationalComparison(compiler::CodeStubAssembler* assembler,
+void GenerateStringRelationalComparison(CodeStubAssembler* assembler,
RelationalComparisonMode mode) {
- typedef compiler::CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
- typedef compiler::CodeStubAssembler::Variable Variable;
+ typedef CodeStubAssembler::Variable Variable;
Node* lhs = assembler->Parameter(0);
Node* rhs = assembler->Parameter(1);
@@ -2386,8 +4107,8 @@ void GenerateStringRelationalComparison(compiler::CodeStubAssembler* assembler,
assembler->Bind(&if_bothonebyteseqstrings);
{
// Load the length of {lhs} and {rhs}.
- Node* lhs_length = assembler->LoadObjectField(lhs, String::kLengthOffset);
- Node* rhs_length = assembler->LoadObjectField(rhs, String::kLengthOffset);
+ Node* lhs_length = assembler->LoadStringLength(lhs);
+ Node* rhs_length = assembler->LoadStringLength(rhs);
// Determine the minimum length.
Node* length = assembler->SmiMin(lhs_length, rhs_length);
@@ -2519,8 +4240,7 @@ void GenerateStringRelationalComparison(compiler::CodeStubAssembler* assembler,
}
}
-void GenerateStringEqual(compiler::CodeStubAssembler* assembler,
- ResultMode mode) {
+void GenerateStringEqual(CodeStubAssembler* assembler, ResultMode mode) {
// Here's pseudo-code for the algorithm below in case of kDontNegateResult
// mode; for kNegateResult mode we properly negate the result.
//
@@ -2537,9 +4257,9 @@ void GenerateStringEqual(compiler::CodeStubAssembler* assembler,
// }
// return %StringEqual(lhs, rhs);
- typedef compiler::CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
- typedef compiler::CodeStubAssembler::Variable Variable;
+ typedef CodeStubAssembler::Variable Variable;
Node* lhs = assembler->Parameter(0);
Node* rhs = assembler->Parameter(1);
@@ -2559,8 +4279,8 @@ void GenerateStringEqual(compiler::CodeStubAssembler* assembler,
// The {lhs} and {rhs} don't refer to the exact same String object.
// Load the length of {lhs} and {rhs}.
- Node* lhs_length = assembler->LoadObjectField(lhs, String::kLengthOffset);
- Node* rhs_length = assembler->LoadObjectField(rhs, String::kLengthOffset);
+ Node* lhs_length = assembler->LoadStringLength(lhs);
+ Node* rhs_length = assembler->LoadStringLength(rhs);
// Check if the lengths of {lhs} and {rhs} are equal.
Label if_lengthisequal(assembler), if_lengthisnotequal(assembler);
@@ -2698,80 +4418,118 @@ void GenerateStringEqual(compiler::CodeStubAssembler* assembler,
} // namespace
-void LessThanStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
- GenerateAbstractRelationalComparison(assembler, kLessThan);
+void LoadApiGetterStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+ Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ // For now we only support receiver_is_holder.
+ DCHECK(receiver_is_holder());
+ Node* holder = receiver;
+ Node* map = assembler->LoadMap(receiver);
+ Node* descriptors = assembler->LoadMapDescriptors(map);
+ Node* offset =
+ assembler->Int32Constant(DescriptorArray::ToValueIndex(index()));
+ Node* callback = assembler->LoadFixedArrayElement(descriptors, offset);
+ assembler->TailCallStub(CodeFactory::ApiGetter(isolate()), context, receiver,
+ holder, callback);
}
-void LessThanOrEqualStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
- GenerateAbstractRelationalComparison(assembler, kLessThanOrEqual);
+// static
+compiler::Node* LessThanStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* lhs, compiler::Node* rhs,
+ compiler::Node* context) {
+ return GenerateAbstractRelationalComparison(assembler, kLessThan, lhs, rhs,
+ context);
}
-void GreaterThanStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
- GenerateAbstractRelationalComparison(assembler, kGreaterThan);
+// static
+compiler::Node* LessThanOrEqualStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* lhs,
+ compiler::Node* rhs,
+ compiler::Node* context) {
+ return GenerateAbstractRelationalComparison(assembler, kLessThanOrEqual, lhs,
+ rhs, context);
}
-void GreaterThanOrEqualStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
- GenerateAbstractRelationalComparison(assembler, kGreaterThanOrEqual);
+// static
+compiler::Node* GreaterThanStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* lhs,
+ compiler::Node* rhs,
+ compiler::Node* context) {
+ return GenerateAbstractRelationalComparison(assembler, kGreaterThan, lhs, rhs,
+ context);
}
-void EqualStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
- GenerateEqual(assembler, kDontNegateResult);
+// static
+compiler::Node* GreaterThanOrEqualStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* lhs,
+ compiler::Node* rhs,
+ compiler::Node* context) {
+ return GenerateAbstractRelationalComparison(assembler, kGreaterThanOrEqual,
+ lhs, rhs, context);
}
-void NotEqualStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
- GenerateEqual(assembler, kNegateResult);
+// static
+compiler::Node* EqualStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* lhs, compiler::Node* rhs,
+ compiler::Node* context) {
+ return GenerateEqual(assembler, kDontNegateResult, lhs, rhs, context);
+}
+
+// static
+compiler::Node* NotEqualStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* lhs, compiler::Node* rhs,
+ compiler::Node* context) {
+ return GenerateEqual(assembler, kNegateResult, lhs, rhs, context);
}
-void StrictEqualStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
- GenerateStrictEqual(assembler, kDontNegateResult);
+// static
+compiler::Node* StrictEqualStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* lhs,
+ compiler::Node* rhs,
+ compiler::Node* context) {
+ return GenerateStrictEqual(assembler, kDontNegateResult, lhs, rhs, context);
}
-void StrictNotEqualStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
- GenerateStrictEqual(assembler, kNegateResult);
+// static
+compiler::Node* StrictNotEqualStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* lhs,
+ compiler::Node* rhs,
+ compiler::Node* context) {
+ return GenerateStrictEqual(assembler, kNegateResult, lhs, rhs, context);
}
-void StringEqualStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
+void StringEqualStub::GenerateAssembly(CodeStubAssembler* assembler) const {
GenerateStringEqual(assembler, kDontNegateResult);
}
-void StringNotEqualStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
+void StringNotEqualStub::GenerateAssembly(CodeStubAssembler* assembler) const {
GenerateStringEqual(assembler, kNegateResult);
}
-void StringLessThanStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
+void StringLessThanStub::GenerateAssembly(CodeStubAssembler* assembler) const {
GenerateStringRelationalComparison(assembler, kLessThan);
}
void StringLessThanOrEqualStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
+ CodeStubAssembler* assembler) const {
GenerateStringRelationalComparison(assembler, kLessThanOrEqual);
}
void StringGreaterThanStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
+ CodeStubAssembler* assembler) const {
GenerateStringRelationalComparison(assembler, kGreaterThan);
}
void StringGreaterThanOrEqualStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
+ CodeStubAssembler* assembler) const {
GenerateStringRelationalComparison(assembler, kGreaterThanOrEqual);
}
-void ToLengthStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
- typedef compiler::CodeStubAssembler::Label Label;
+void ToLengthStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+ typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
- typedef compiler::CodeStubAssembler::Variable Variable;
+ typedef CodeStubAssembler::Variable Variable;
Node* context = assembler->Parameter(1);
@@ -2844,150 +4602,10 @@ void ToLengthStub::GenerateAssembly(
}
}
-void ToBooleanStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
+void ToIntegerStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+ typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
- typedef compiler::CodeStubAssembler::Label Label;
-
- Node* value = assembler->Parameter(0);
- Label if_valueissmi(assembler), if_valueisnotsmi(assembler);
-
- // Check if {value} is a Smi or a HeapObject.
- assembler->Branch(assembler->WordIsSmi(value), &if_valueissmi,
- &if_valueisnotsmi);
-
- assembler->Bind(&if_valueissmi);
- {
- // The {value} is a Smi, only need to check against zero.
- Label if_valueiszero(assembler), if_valueisnotzero(assembler);
- assembler->Branch(assembler->SmiEqual(value, assembler->SmiConstant(0)),
- &if_valueiszero, &if_valueisnotzero);
-
- assembler->Bind(&if_valueiszero);
- assembler->Return(assembler->BooleanConstant(false));
-
- assembler->Bind(&if_valueisnotzero);
- assembler->Return(assembler->BooleanConstant(true));
- }
-
- assembler->Bind(&if_valueisnotsmi);
- {
- Label if_valueisstring(assembler), if_valueisheapnumber(assembler),
- if_valueisoddball(assembler), if_valueisother(assembler);
-
- // The {value} is a HeapObject, load its map.
- Node* value_map = assembler->LoadMap(value);
-
- // Load the {value}s instance type.
- Node* value_instance_type = assembler->Load(
- MachineType::Uint8(), value_map,
- assembler->IntPtrConstant(Map::kInstanceTypeOffset - kHeapObjectTag));
-
- // Dispatch based on the instance type; we distinguish all String instance
- // types, the HeapNumber type and the Oddball type.
- size_t const kNumCases = FIRST_NONSTRING_TYPE + 2;
- Label* case_labels[kNumCases];
- int32_t case_values[kNumCases];
- for (int32_t i = 0; i < FIRST_NONSTRING_TYPE; ++i) {
- case_labels[i] = new Label(assembler);
- case_values[i] = i;
- }
- case_labels[FIRST_NONSTRING_TYPE + 0] = &if_valueisheapnumber;
- case_values[FIRST_NONSTRING_TYPE + 0] = HEAP_NUMBER_TYPE;
- case_labels[FIRST_NONSTRING_TYPE + 1] = &if_valueisoddball;
- case_values[FIRST_NONSTRING_TYPE + 1] = ODDBALL_TYPE;
- assembler->Switch(value_instance_type, &if_valueisother, case_values,
- case_labels, arraysize(case_values));
- for (int32_t i = 0; i < FIRST_NONSTRING_TYPE; ++i) {
- assembler->Bind(case_labels[i]);
- assembler->Goto(&if_valueisstring);
- delete case_labels[i];
- }
-
- assembler->Bind(&if_valueisstring);
- {
- // Load the string length field of the {value}.
- Node* value_length =
- assembler->LoadObjectField(value, String::kLengthOffset);
-
- // Check if the {value} is the empty string.
- Label if_valueisempty(assembler), if_valueisnotempty(assembler);
- assembler->Branch(
- assembler->SmiEqual(value_length, assembler->SmiConstant(0)),
- &if_valueisempty, &if_valueisnotempty);
-
- assembler->Bind(&if_valueisempty);
- assembler->Return(assembler->BooleanConstant(false));
-
- assembler->Bind(&if_valueisnotempty);
- assembler->Return(assembler->BooleanConstant(true));
- }
-
- assembler->Bind(&if_valueisheapnumber);
- {
- Node* value_value = assembler->Load(
- MachineType::Float64(), value,
- assembler->IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag));
-
- Label if_valueispositive(assembler), if_valueisnotpositive(assembler),
- if_valueisnegative(assembler), if_valueisnanorzero(assembler);
- assembler->Branch(assembler->Float64LessThan(
- assembler->Float64Constant(0.0), value_value),
- &if_valueispositive, &if_valueisnotpositive);
-
- assembler->Bind(&if_valueispositive);
- assembler->Return(assembler->BooleanConstant(true));
-
- assembler->Bind(&if_valueisnotpositive);
- assembler->Branch(assembler->Float64LessThan(
- value_value, assembler->Float64Constant(0.0)),
- &if_valueisnegative, &if_valueisnanorzero);
-
- assembler->Bind(&if_valueisnegative);
- assembler->Return(assembler->BooleanConstant(true));
-
- assembler->Bind(&if_valueisnanorzero);
- assembler->Return(assembler->BooleanConstant(false));
- }
-
- assembler->Bind(&if_valueisoddball);
- {
- // The {value} is an Oddball, and every Oddball knows its boolean value.
- Node* value_toboolean =
- assembler->LoadObjectField(value, Oddball::kToBooleanOffset);
- assembler->Return(value_toboolean);
- }
-
- assembler->Bind(&if_valueisother);
- {
- Node* value_map_bitfield = assembler->Load(
- MachineType::Uint8(), value_map,
- assembler->IntPtrConstant(Map::kBitFieldOffset - kHeapObjectTag));
- Node* value_map_undetectable = assembler->Word32And(
- value_map_bitfield,
- assembler->Int32Constant(1 << Map::kIsUndetectable));
-
- // Check if the {value} is undetectable.
- Label if_valueisundetectable(assembler),
- if_valueisnotundetectable(assembler);
- assembler->Branch(assembler->Word32Equal(value_map_undetectable,
- assembler->Int32Constant(0)),
- &if_valueisnotundetectable, &if_valueisundetectable);
-
- assembler->Bind(&if_valueisundetectable);
- assembler->Return(assembler->BooleanConstant(false));
-
- assembler->Bind(&if_valueisnotundetectable);
- assembler->Return(assembler->BooleanConstant(true));
- }
- }
-}
-
-void ToIntegerStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
- typedef compiler::CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef compiler::CodeStubAssembler::Variable Variable;
+ typedef CodeStubAssembler::Variable Variable;
Node* context = assembler->Parameter(1);
@@ -3046,25 +4664,27 @@ void ToIntegerStub::GenerateAssembly(
}
void StoreInterceptorStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
+ CodeStubAssembler* assembler) const {
typedef compiler::Node Node;
- Node* receiver = assembler->Parameter(0);
- Node* name = assembler->Parameter(1);
- Node* value = assembler->Parameter(2);
- Node* context = assembler->Parameter(3);
+
+ Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ Node* name = assembler->Parameter(Descriptor::kName);
+ Node* value = assembler->Parameter(Descriptor::kValue);
+ Node* context = assembler->Parameter(Descriptor::kContext);
assembler->TailCallRuntime(Runtime::kStorePropertyWithInterceptor, context,
receiver, name, value);
}
void LoadIndexedInterceptorStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
+ CodeStubAssembler* assembler) const {
typedef compiler::Node Node;
- typedef compiler::CodeStubAssembler::Label Label;
- Node* receiver = assembler->Parameter(0);
- Node* key = assembler->Parameter(1);
- Node* slot = assembler->Parameter(2);
- Node* vector = assembler->Parameter(3);
- Node* context = assembler->Parameter(4);
+ typedef CodeStubAssembler::Label Label;
+
+ Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ Node* key = assembler->Parameter(Descriptor::kName);
+ Node* slot = assembler->Parameter(Descriptor::kSlot);
+ Node* vector = assembler->Parameter(Descriptor::kVector);
+ Node* context = assembler->Parameter(Descriptor::kContext);
Label if_keyispositivesmi(assembler), if_keyisinvalid(assembler);
assembler->Branch(assembler->WordIsPositiveSmi(key), &if_keyispositivesmi,
@@ -3078,6 +4698,133 @@ void LoadIndexedInterceptorStub::GenerateAssembly(
slot, vector);
}
+// static
+bool FastCloneShallowObjectStub::IsSupported(ObjectLiteral* expr) {
+ // FastCloneShallowObjectStub doesn't copy elements, and object literals don't
+ // support copy-on-write (COW) elements for now.
+ // TODO(mvstanton): make object literals support COW elements.
+ return expr->fast_elements() && expr->has_shallow_properties() &&
+ expr->properties_count() <= kMaximumClonedProperties;
+}
+
+// static
+int FastCloneShallowObjectStub::PropertiesCount(int literal_length) {
+ // This heuristic of setting empty literals to have
+ // kInitialGlobalObjectUnusedPropertiesCount must remain in-sync with the
+ // runtime.
+ // TODO(verwaest): Unify this with the heuristic in the runtime.
+ return literal_length == 0
+ ? JSObject::kInitialGlobalObjectUnusedPropertiesCount
+ : literal_length;
+}
+
+// static
+compiler::Node* FastCloneShallowObjectStub::GenerateFastPath(
+ CodeStubAssembler* assembler, compiler::CodeAssembler::Label* call_runtime,
+ compiler::Node* closure, compiler::Node* literals_index,
+ compiler::Node* properties_count) {
+ typedef compiler::Node Node;
+ typedef compiler::CodeAssembler::Label Label;
+ typedef compiler::CodeAssembler::Variable Variable;
+
+ Node* undefined = assembler->UndefinedConstant();
+ Node* literals_array =
+ assembler->LoadObjectField(closure, JSFunction::kLiteralsOffset);
+ Node* allocation_site = assembler->LoadFixedArrayElement(
+ literals_array, literals_index,
+ LiteralsArray::kFirstLiteralIndex * kPointerSize,
+ CodeStubAssembler::SMI_PARAMETERS);
+ assembler->GotoIf(assembler->WordEqual(allocation_site, undefined),
+ call_runtime);
+
+ // Calculate the object and allocation size based on the properties count.
+ Node* object_size = assembler->IntPtrAdd(
+ assembler->WordShl(properties_count, kPointerSizeLog2),
+ assembler->IntPtrConstant(JSObject::kHeaderSize));
+ Node* allocation_size = object_size;
+ if (FLAG_allocation_site_pretenuring) {
+ allocation_size = assembler->IntPtrAdd(
+ object_size, assembler->IntPtrConstant(AllocationMemento::kSize));
+ }
+ Node* boilerplate = assembler->LoadObjectField(
+ allocation_site, AllocationSite::kTransitionInfoOffset);
+ Node* boilerplate_map = assembler->LoadMap(boilerplate);
+ Node* instance_size = assembler->LoadMapInstanceSize(boilerplate_map);
+ Node* size_in_words = assembler->WordShr(object_size, kPointerSizeLog2);
+ assembler->GotoUnless(assembler->Word32Equal(instance_size, size_in_words),
+ call_runtime);
+
+ Node* copy = assembler->Allocate(allocation_size);
+
+ // Copy boilerplate elements.
+ Variable offset(assembler, MachineType::PointerRepresentation());
+ offset.Bind(assembler->IntPtrConstant(-kHeapObjectTag));
+ Node* end_offset = assembler->IntPtrAdd(object_size, offset.value());
+ Label loop_body(assembler, &offset), loop_check(assembler, &offset);
+ // We should always have an object size greater than zero.
+ assembler->Goto(&loop_body);
+ assembler->Bind(&loop_body);
+ {
+ // The Allocate above guarantees that the copy lies in new space. This
+ // allows us to skip write barriers. This is necessary since we may also be
+ // copying unboxed doubles.
+ Node* field =
+ assembler->Load(MachineType::IntPtr(), boilerplate, offset.value());
+ assembler->StoreNoWriteBarrier(MachineType::PointerRepresentation(), copy,
+ offset.value(), field);
+ assembler->Goto(&loop_check);
+ }
+ assembler->Bind(&loop_check);
+ {
+ offset.Bind(assembler->IntPtrAdd(offset.value(),
+ assembler->IntPtrConstant(kPointerSize)));
+ assembler->GotoUnless(
+ assembler->IntPtrGreaterThanOrEqual(offset.value(), end_offset),
+ &loop_body);
+ }
+
+ if (FLAG_allocation_site_pretenuring) {
+ Node* memento = assembler->InnerAllocate(copy, object_size);
+ assembler->StoreObjectFieldNoWriteBarrier(
+ memento, HeapObject::kMapOffset,
+ assembler->LoadRoot(Heap::kAllocationMementoMapRootIndex));
+ assembler->StoreObjectFieldNoWriteBarrier(
+ memento, AllocationMemento::kAllocationSiteOffset, allocation_site);
+ Node* memento_create_count = assembler->LoadObjectField(
+ allocation_site, AllocationSite::kPretenureCreateCountOffset);
+ memento_create_count = assembler->SmiAdd(
+ memento_create_count, assembler->SmiConstant(Smi::FromInt(1)));
+ assembler->StoreObjectFieldNoWriteBarrier(
+ allocation_site, AllocationSite::kPretenureCreateCountOffset,
+ memento_create_count);
+ }
+
+ // TODO(verwaest): Allocate and fill in double boxes.
+ return copy;
+}
+
+void FastCloneShallowObjectStub::GenerateAssembly(
+ CodeStubAssembler* assembler) const {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ Label call_runtime(assembler);
+ Node* closure = assembler->Parameter(0);
+ Node* literals_index = assembler->Parameter(1);
+
+ Node* properties_count =
+ assembler->IntPtrConstant(PropertiesCount(this->length()));
+ Node* copy = GenerateFastPath(assembler, &call_runtime, closure,
+ literals_index, properties_count);
+ assembler->Return(copy);
+
+ assembler->Bind(&call_runtime);
+ Node* constant_properties = assembler->Parameter(2);
+ Node* flags = assembler->Parameter(3);
+ Node* context = assembler->Parameter(4);
+ assembler->TailCallRuntime(Runtime::kCreateObjectLiteral, context, closure,
+ literals_index, constant_properties, flags);
+}
+
template<class StateType>
void HydrogenCodeStub::TraceTransition(StateType from, StateType to) {
// Note: Although a no-op transition is semantically OK, it is hinting at a
@@ -3156,7 +4903,7 @@ CallInterfaceDescriptor HandlerStub::GetCallInterfaceDescriptor() const {
return LoadWithVectorDescriptor(isolate());
} else {
DCHECK(kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC);
- return VectorStoreICDescriptor(isolate());
+ return StoreWithVectorDescriptor(isolate());
}
}
@@ -3174,45 +4921,15 @@ void ElementsTransitionAndStoreStub::InitializeDescriptor(
FUNCTION_ADDR(Runtime_ElementsTransitionAndStoreIC_Miss));
}
-
-void ToObjectStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- descriptor->Initialize(Runtime::FunctionForId(Runtime::kToObject)->entry);
-}
-
-
-CallInterfaceDescriptor StoreTransitionStub::GetCallInterfaceDescriptor()
- const {
- return VectorStoreTransitionDescriptor(isolate());
-}
-
-
-CallInterfaceDescriptor
-ElementsTransitionAndStoreStub::GetCallInterfaceDescriptor() const {
- return VectorStoreTransitionDescriptor(isolate());
-}
-
-
-void FastNewClosureStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- descriptor->Initialize(Runtime::FunctionForId(Runtime::kNewClosure)->entry);
+void StoreTransitionStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+ descriptor->Initialize(
+ FUNCTION_ADDR(Runtime_TransitionStoreIC_MissFromStubFailure));
}
-
-void FastNewContextStub::InitializeDescriptor(CodeStubDescriptor* d) {}
-
-
-void TypeofStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {}
-
-
void NumberToStringStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
descriptor->Initialize(
Runtime::FunctionForId(Runtime::kNumberToString)->entry);
-}
-
-
-void FastCloneRegExpStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- FastCloneRegExpDescriptor call_descriptor(isolate());
- descriptor->Initialize(
- Runtime::FunctionForId(Runtime::kCreateRegExpLiteral)->entry);
+ descriptor->SetMissHandler(Runtime::kNumberToString);
}
@@ -3221,27 +4938,14 @@ void FastCloneShallowArrayStub::InitializeDescriptor(
FastCloneShallowArrayDescriptor call_descriptor(isolate());
descriptor->Initialize(
Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry);
+ descriptor->SetMissHandler(Runtime::kCreateArrayLiteralStubBailout);
}
-
-void FastCloneShallowObjectStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- FastCloneShallowObjectDescriptor call_descriptor(isolate());
- descriptor->Initialize(
- Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry);
-}
-
-
-void CreateAllocationSiteStub::InitializeDescriptor(CodeStubDescriptor* d) {}
-
-
-void CreateWeakCellStub::InitializeDescriptor(CodeStubDescriptor* d) {}
-
-
void RegExpConstructResultStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
descriptor->Initialize(
Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry);
+ descriptor->SetMissHandler(Runtime::kRegExpConstructResult);
}
@@ -3259,11 +4963,6 @@ void AllocateHeapNumberStub::InitializeDescriptor(
}
-void AllocateMutableHeapNumberStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- descriptor->Initialize();
-}
-
#define SIMD128_INIT_DESC(TYPE, Type, type, lane_count, lane_type) \
void Allocate##Type##Stub::InitializeDescriptor( \
CodeStubDescriptor* descriptor) { \
@@ -3273,22 +4972,15 @@ void AllocateMutableHeapNumberStub::InitializeDescriptor(
SIMD128_TYPES(SIMD128_INIT_DESC)
#undef SIMD128_INIT_DESC
-void AllocateInNewSpaceStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- descriptor->Initialize();
-}
-
void ToBooleanICStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
descriptor->Initialize(FUNCTION_ADDR(Runtime_ToBooleanIC_Miss));
- descriptor->SetMissHandler(ExternalReference(
- Runtime::FunctionForId(Runtime::kToBooleanIC_Miss), isolate()));
+ descriptor->SetMissHandler(Runtime::kToBooleanIC_Miss);
}
void BinaryOpICStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
descriptor->Initialize(FUNCTION_ADDR(Runtime_BinaryOpIC_Miss));
- descriptor->SetMissHandler(ExternalReference(
- Runtime::FunctionForId(Runtime::kBinaryOpIC_Miss), isolate()));
+ descriptor->SetMissHandler(Runtime::kBinaryOpIC_Miss);
}
@@ -3301,21 +4993,461 @@ void BinaryOpWithAllocationSiteStub::InitializeDescriptor(
void StringAddStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
descriptor->Initialize(Runtime::FunctionForId(Runtime::kStringAdd)->entry);
+ descriptor->SetMissHandler(Runtime::kStringAdd);
}
+namespace {
-void GrowArrayElementsStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- descriptor->Initialize(
- Runtime::FunctionForId(Runtime::kGrowArrayElements)->entry);
+compiler::Node* GenerateHasProperty(
+ CodeStubAssembler* assembler, compiler::Node* object, compiler::Node* key,
+ compiler::Node* context, Runtime::FunctionId fallback_runtime_function_id) {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Label call_runtime(assembler, Label::kDeferred), return_true(assembler),
+ return_false(assembler), end(assembler);
+
+ CodeStubAssembler::LookupInHolder lookup_property_in_holder =
+ [assembler, &return_true](Node* receiver, Node* holder, Node* holder_map,
+ Node* holder_instance_type, Node* unique_name,
+ Label* next_holder, Label* if_bailout) {
+ assembler->TryHasOwnProperty(holder, holder_map, holder_instance_type,
+ unique_name, &return_true, next_holder,
+ if_bailout);
+ };
+
+ CodeStubAssembler::LookupInHolder lookup_element_in_holder =
+ [assembler, &return_true](Node* receiver, Node* holder, Node* holder_map,
+ Node* holder_instance_type, Node* index,
+ Label* next_holder, Label* if_bailout) {
+ assembler->TryLookupElement(holder, holder_map, holder_instance_type,
+ index, &return_true, next_holder,
+ if_bailout);
+ };
+
+ assembler->TryPrototypeChainLookup(object, key, lookup_property_in_holder,
+ lookup_element_in_holder, &return_false,
+ &call_runtime);
+
+ Variable result(assembler, MachineRepresentation::kTagged);
+ assembler->Bind(&return_true);
+ {
+ result.Bind(assembler->BooleanConstant(true));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&return_false);
+ {
+ result.Bind(assembler->BooleanConstant(false));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&call_runtime);
+ {
+ result.Bind(assembler->CallRuntime(fallback_runtime_function_id, context,
+ object, key));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&end);
+ return result.value();
}
+} // namespace
-void TypeofStub::GenerateAheadOfTime(Isolate* isolate) {
- TypeofStub stub(isolate);
- stub.GetCode();
+// static
+compiler::Node* HasPropertyStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* key,
+ compiler::Node* object,
+ compiler::Node* context) {
+ return GenerateHasProperty(assembler, object, key, context,
+ Runtime::kHasProperty);
+}
+
+// static
+compiler::Node* ForInFilterStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* key,
+ compiler::Node* object,
+ compiler::Node* context) {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Label return_undefined(assembler, Label::kDeferred),
+ return_to_name(assembler), end(assembler);
+
+ Variable var_result(assembler, MachineRepresentation::kTagged);
+
+ Node* has_property = GenerateHasProperty(assembler, object, key, context,
+ Runtime::kForInHasProperty);
+
+ assembler->Branch(
+ assembler->WordEqual(has_property, assembler->BooleanConstant(true)),
+ &return_to_name, &return_undefined);
+
+ assembler->Bind(&return_to_name);
+ {
+ // TODO(cbruni): inline ToName here.
+ Callable callable = CodeFactory::ToName(assembler->isolate());
+ var_result.Bind(assembler->CallStub(callable, context, key));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&return_undefined);
+ {
+ var_result.Bind(assembler->UndefinedConstant());
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&end);
+ return var_result.value();
+}
+
+void GetPropertyStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Label call_runtime(assembler, Label::kDeferred), return_undefined(assembler),
+ end(assembler);
+
+ Node* object = assembler->Parameter(0);
+ Node* key = assembler->Parameter(1);
+ Node* context = assembler->Parameter(2);
+ Variable var_result(assembler, MachineRepresentation::kTagged);
+
+ CodeStubAssembler::LookupInHolder lookup_property_in_holder =
+ [assembler, context, &var_result, &end](
+ Node* receiver, Node* holder, Node* holder_map,
+ Node* holder_instance_type, Node* unique_name, Label* next_holder,
+ Label* if_bailout) {
+ Variable var_value(assembler, MachineRepresentation::kTagged);
+ Label if_found(assembler);
+ assembler->TryGetOwnProperty(
+ context, receiver, holder, holder_map, holder_instance_type,
+ unique_name, &if_found, &var_value, next_holder, if_bailout);
+ assembler->Bind(&if_found);
+ {
+ var_result.Bind(var_value.value());
+ assembler->Goto(&end);
+ }
+ };
+
+ CodeStubAssembler::LookupInHolder lookup_element_in_holder =
+ [assembler, context, &var_result, &end](
+ Node* receiver, Node* holder, Node* holder_map,
+ Node* holder_instance_type, Node* index, Label* next_holder,
+ Label* if_bailout) {
+ // Not supported yet.
+ assembler->Use(next_holder);
+ assembler->Goto(if_bailout);
+ };
+
+ assembler->TryPrototypeChainLookup(object, key, lookup_property_in_holder,
+ lookup_element_in_holder,
+ &return_undefined, &call_runtime);
+
+ assembler->Bind(&return_undefined);
+ {
+ var_result.Bind(assembler->UndefinedConstant());
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&call_runtime);
+ {
+ var_result.Bind(
+ assembler->CallRuntime(Runtime::kGetProperty, context, object, key));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&end);
+ assembler->Return(var_result.value());
+}
+
+// static
+compiler::Node* FastNewClosureStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* shared_info,
+ compiler::Node* context) {
+ typedef compiler::Node Node;
+ typedef compiler::CodeAssembler::Label Label;
+ typedef compiler::CodeAssembler::Variable Variable;
+
+ Isolate* isolate = assembler->isolate();
+ Factory* factory = assembler->isolate()->factory();
+ assembler->IncrementCounter(isolate->counters()->fast_new_closure_total(), 1);
+
+ // Create a new closure from the given function info in new space
+ Node* result = assembler->Allocate(JSFunction::kSize);
+
+ // Calculate the index of the map we should install on the function based on
+ // the FunctionKind and LanguageMode of the function.
+ // Note: Must be kept in sync with Context::FunctionMapIndex
+ Node* compiler_hints = assembler->LoadObjectField(
+ shared_info, SharedFunctionInfo::kCompilerHintsOffset,
+ MachineType::Uint32());
+ Node* is_strict = assembler->Word32And(
+ compiler_hints,
+ assembler->Int32Constant(1 << SharedFunctionInfo::kStrictModeBit));
+
+ Label if_normal(assembler), if_generator(assembler), if_async(assembler),
+ if_class_constructor(assembler), if_function_without_prototype(assembler),
+ load_map(assembler);
+ Variable map_index(assembler, MachineRepresentation::kTagged);
+
+ Node* is_not_normal = assembler->Word32And(
+ compiler_hints,
+ assembler->Int32Constant(SharedFunctionInfo::kFunctionKindMaskBits));
+ assembler->GotoUnless(is_not_normal, &if_normal);
+
+ Node* is_generator = assembler->Word32And(
+ compiler_hints,
+ assembler->Int32Constant(1 << SharedFunctionInfo::kIsGeneratorBit));
+ assembler->GotoIf(is_generator, &if_generator);
+
+ Node* is_async = assembler->Word32And(
+ compiler_hints,
+ assembler->Int32Constant(1 << SharedFunctionInfo::kIsAsyncFunctionBit));
+ assembler->GotoIf(is_async, &if_async);
+
+ Node* is_class_constructor = assembler->Word32And(
+ compiler_hints,
+ assembler->Int32Constant(SharedFunctionInfo::kClassConstructorBits));
+ assembler->GotoIf(is_class_constructor, &if_class_constructor);
+
+ if (FLAG_debug_code) {
+ // Function must be a function without a prototype.
+ assembler->Assert(assembler->Word32And(
+ compiler_hints, assembler->Int32Constant(
+ SharedFunctionInfo::kAccessorFunctionBits |
+ (1 << SharedFunctionInfo::kIsArrowBit) |
+ (1 << SharedFunctionInfo::kIsConciseMethodBit))));
+ }
+ assembler->Goto(&if_function_without_prototype);
+
+ assembler->Bind(&if_normal);
+ {
+ map_index.Bind(assembler->Select(
+ is_strict, assembler->Int32Constant(Context::STRICT_FUNCTION_MAP_INDEX),
+ assembler->Int32Constant(Context::SLOPPY_FUNCTION_MAP_INDEX)));
+ assembler->Goto(&load_map);
+ }
+
+ assembler->Bind(&if_generator);
+ {
+ map_index.Bind(assembler->Select(
+ is_strict,
+ assembler->Int32Constant(Context::STRICT_GENERATOR_FUNCTION_MAP_INDEX),
+ assembler->Int32Constant(
+ Context::SLOPPY_GENERATOR_FUNCTION_MAP_INDEX)));
+ assembler->Goto(&load_map);
+ }
+
+ assembler->Bind(&if_async);
+ {
+ map_index.Bind(assembler->Select(
+ is_strict,
+ assembler->Int32Constant(Context::STRICT_ASYNC_FUNCTION_MAP_INDEX),
+ assembler->Int32Constant(Context::SLOPPY_ASYNC_FUNCTION_MAP_INDEX)));
+ assembler->Goto(&load_map);
+ }
+
+ assembler->Bind(&if_class_constructor);
+ {
+ map_index.Bind(
+ assembler->Int32Constant(Context::STRICT_FUNCTION_MAP_INDEX));
+ assembler->Goto(&load_map);
+ }
+
+ assembler->Bind(&if_function_without_prototype);
+ {
+ map_index.Bind(assembler->Int32Constant(
+ Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX));
+ assembler->Goto(&load_map);
+ }
+
+ assembler->Bind(&load_map);
+
+ // Get the function map in the current native context and set that
+ // as the map of the allocated object.
+ Node* native_context = assembler->LoadNativeContext(context);
+ Node* map_slot_value =
+ assembler->LoadFixedArrayElement(native_context, map_index.value());
+ assembler->StoreMapNoWriteBarrier(result, map_slot_value);
+
+ // Initialize the rest of the function.
+ Node* empty_fixed_array =
+ assembler->HeapConstant(factory->empty_fixed_array());
+ Node* empty_literals_array =
+ assembler->HeapConstant(factory->empty_literals_array());
+ assembler->StoreObjectFieldNoWriteBarrier(result, JSObject::kPropertiesOffset,
+ empty_fixed_array);
+ assembler->StoreObjectFieldNoWriteBarrier(result, JSObject::kElementsOffset,
+ empty_fixed_array);
+ assembler->StoreObjectFieldNoWriteBarrier(result, JSFunction::kLiteralsOffset,
+ empty_literals_array);
+ assembler->StoreObjectFieldNoWriteBarrier(
+ result, JSFunction::kPrototypeOrInitialMapOffset,
+ assembler->TheHoleConstant());
+ assembler->StoreObjectFieldNoWriteBarrier(
+ result, JSFunction::kSharedFunctionInfoOffset, shared_info);
+ assembler->StoreObjectFieldNoWriteBarrier(result, JSFunction::kContextOffset,
+ context);
+ Handle<Code> lazy_builtin_handle(
+ assembler->isolate()->builtins()->builtin(Builtins::kCompileLazy));
+ Node* lazy_builtin = assembler->HeapConstant(lazy_builtin_handle);
+ Node* lazy_builtin_entry = assembler->IntPtrAdd(
+ lazy_builtin,
+ assembler->IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
+ assembler->StoreObjectFieldNoWriteBarrier(
+ result, JSFunction::kCodeEntryOffset, lazy_builtin_entry);
+ assembler->StoreObjectFieldNoWriteBarrier(result,
+ JSFunction::kNextFunctionLinkOffset,
+ assembler->UndefinedConstant());
+
+ return result;
}
+void FastNewClosureStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+ assembler->Return(
+ Generate(assembler, assembler->Parameter(0), assembler->Parameter(1)));
+}
+
+// static
+compiler::Node* FastNewFunctionContextStub::Generate(
+ CodeStubAssembler* assembler, compiler::Node* function,
+ compiler::Node* slots, compiler::Node* context) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Node* min_context_slots =
+ assembler->Int32Constant(Context::MIN_CONTEXT_SLOTS);
+ Node* length = assembler->Int32Add(slots, min_context_slots);
+ Node* size = assembler->Int32Add(
+ assembler->Word32Shl(length, assembler->Int32Constant(kPointerSizeLog2)),
+ assembler->Int32Constant(FixedArray::kHeaderSize));
+
+ // Create a new closure from the given function info in new space
+ Node* function_context = assembler->Allocate(size);
+
+ Isolate* isolate = assembler->isolate();
+ assembler->StoreMapNoWriteBarrier(
+ function_context,
+ assembler->HeapConstant(isolate->factory()->function_context_map()));
+ assembler->StoreObjectFieldNoWriteBarrier(function_context,
+ Context::kLengthOffset,
+ assembler->SmiFromWord32(length));
+
+ // Set up the fixed slots.
+ assembler->StoreFixedArrayElement(
+ function_context, assembler->Int32Constant(Context::CLOSURE_INDEX),
+ function, SKIP_WRITE_BARRIER);
+ assembler->StoreFixedArrayElement(
+ function_context, assembler->Int32Constant(Context::PREVIOUS_INDEX),
+ context, SKIP_WRITE_BARRIER);
+ assembler->StoreFixedArrayElement(
+ function_context, assembler->Int32Constant(Context::EXTENSION_INDEX),
+ assembler->TheHoleConstant(), SKIP_WRITE_BARRIER);
+
+ // Copy the native context from the previous context.
+ Node* native_context = assembler->LoadNativeContext(context);
+ assembler->StoreFixedArrayElement(
+ function_context, assembler->Int32Constant(Context::NATIVE_CONTEXT_INDEX),
+ native_context, SKIP_WRITE_BARRIER);
+
+ // Initialize the rest of the slots to undefined.
+ Node* undefined = assembler->UndefinedConstant();
+ Variable var_slot_index(assembler, MachineRepresentation::kWord32);
+ var_slot_index.Bind(min_context_slots);
+ Label loop(assembler, &var_slot_index), after_loop(assembler);
+ assembler->Goto(&loop);
+
+ assembler->Bind(&loop);
+ {
+ Node* slot_index = var_slot_index.value();
+ assembler->GotoUnless(assembler->Int32LessThan(slot_index, length),
+ &after_loop);
+ assembler->StoreFixedArrayElement(function_context, slot_index, undefined,
+ SKIP_WRITE_BARRIER);
+ Node* one = assembler->Int32Constant(1);
+ Node* next_index = assembler->Int32Add(slot_index, one);
+ var_slot_index.Bind(next_index);
+ assembler->Goto(&loop);
+ }
+ assembler->Bind(&after_loop);
+
+ return function_context;
+}
+
+void FastNewFunctionContextStub::GenerateAssembly(
+ CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+ Node* function = assembler->Parameter(Descriptor::kFunction);
+ Node* slots = assembler->Parameter(FastNewFunctionContextDescriptor::kSlots);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ assembler->Return(Generate(assembler, function, slots, context));
+}
+
+// static
+compiler::Node* FastCloneRegExpStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* closure,
+ compiler::Node* literal_index,
+ compiler::Node* pattern,
+ compiler::Node* flags,
+ compiler::Node* context) {
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+ typedef compiler::Node Node;
+
+ Label call_runtime(assembler, Label::kDeferred), end(assembler);
+
+ Variable result(assembler, MachineRepresentation::kTagged);
+
+ Node* undefined = assembler->UndefinedConstant();
+ Node* literals_array =
+ assembler->LoadObjectField(closure, JSFunction::kLiteralsOffset);
+ Node* boilerplate = assembler->LoadFixedArrayElement(
+ literals_array, literal_index,
+ LiteralsArray::kFirstLiteralIndex * kPointerSize,
+ CodeStubAssembler::SMI_PARAMETERS);
+ assembler->GotoIf(assembler->WordEqual(boilerplate, undefined),
+ &call_runtime);
+
+ {
+ int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+ Node* copy = assembler->Allocate(size);
+ for (int offset = 0; offset < size; offset += kPointerSize) {
+ Node* value = assembler->LoadObjectField(boilerplate, offset);
+ assembler->StoreObjectFieldNoWriteBarrier(copy, offset, value);
+ }
+ result.Bind(copy);
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&call_runtime);
+ {
+ result.Bind(assembler->CallRuntime(Runtime::kCreateRegExpLiteral, context,
+ closure, literal_index, pattern, flags));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&end);
+ return result.value();
+}
+
+void FastCloneRegExpStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+ Node* closure = assembler->Parameter(Descriptor::kClosure);
+ Node* literal_index = assembler->Parameter(Descriptor::kLiteralIndex);
+ Node* pattern = assembler->Parameter(Descriptor::kPattern);
+ Node* flags = assembler->Parameter(Descriptor::kFlags);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ assembler->Return(
+ Generate(assembler, closure, literal_index, pattern, flags, context));
+}
void CreateAllocationSiteStub::GenerateAheadOfTime(Isolate* isolate) {
CreateAllocationSiteStub stub(isolate);
@@ -3337,6 +5469,7 @@ void StoreElementStub::Generate(MacroAssembler* masm) {
// static
void StoreFastElementStub::GenerateAheadOfTime(Isolate* isolate) {
+ if (FLAG_minimal) return;
StoreFastElementStub(isolate, false, FAST_HOLEY_ELEMENTS, STANDARD_STORE)
.GetCode();
StoreFastElementStub(isolate, false, FAST_HOLEY_ELEMENTS,
@@ -3349,7 +5482,6 @@ void StoreFastElementStub::GenerateAheadOfTime(Isolate* isolate) {
}
}
-
void ArrayConstructorStub::PrintName(std::ostream& os) const { // NOLINT
os << "ArrayConstructorStub";
switch (argument_count()) {
@@ -3370,20 +5502,10 @@ void ArrayConstructorStub::PrintName(std::ostream& os) const { // NOLINT
}
-std::ostream& ArrayConstructorStubBase::BasePrintName(
- std::ostream& os, // NOLINT
- const char* name) const {
- os << name << "_" << ElementsKindToString(elements_kind());
- if (override_mode() == DISABLE_ALLOCATION_SITES) {
- os << "_DISABLE_ALLOCATION_SITES";
- }
- return os;
-}
-
bool ToBooleanICStub::UpdateStatus(Handle<Object> object) {
Types new_types = types();
Types old_types = new_types;
- bool to_boolean_value = new_types.UpdateStatus(object);
+ bool to_boolean_value = new_types.UpdateStatus(isolate(), object);
TraceTransition(old_types, new_types);
set_sub_minor_key(TypesBits::update(sub_minor_key(), new_types.ToIntegral()));
return to_boolean_value;
@@ -3409,14 +5531,15 @@ std::ostream& operator<<(std::ostream& os, const ToBooleanICStub::Types& s) {
return os << ")";
}
-bool ToBooleanICStub::Types::UpdateStatus(Handle<Object> object) {
- if (object->IsUndefined()) {
+bool ToBooleanICStub::Types::UpdateStatus(Isolate* isolate,
+ Handle<Object> object) {
+ if (object->IsUndefined(isolate)) {
Add(UNDEFINED);
return false;
} else if (object->IsBoolean()) {
Add(BOOLEAN);
- return object->IsTrue();
- } else if (object->IsNull()) {
+ return object->IsTrue(isolate);
+ } else if (object->IsNull(isolate)) {
Add(NULL_TYPE);
return false;
} else if (object->IsSmi()) {
@@ -3472,14 +5595,231 @@ void ProfileEntryHookStub::EntryHookTrampoline(intptr_t function,
entry_hook(function, stack_pointer);
}
+void CreateAllocationSiteStub::GenerateAssembly(
+ CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+ Node* size = assembler->IntPtrConstant(AllocationSite::kSize);
+ Node* site = assembler->Allocate(size, CodeStubAssembler::kPretenured);
+
+ // Store the map
+ assembler->StoreObjectFieldRoot(site, AllocationSite::kMapOffset,
+ Heap::kAllocationSiteMapRootIndex);
+
+ Node* kind =
+ assembler->SmiConstant(Smi::FromInt(GetInitialFastElementsKind()));
+ assembler->StoreObjectFieldNoWriteBarrier(
+ site, AllocationSite::kTransitionInfoOffset, kind);
+
+ // Unlike literals, constructed arrays don't have nested sites
+ Node* zero = assembler->IntPtrConstant(0);
+ assembler->StoreObjectFieldNoWriteBarrier(
+ site, AllocationSite::kNestedSiteOffset, zero);
+
+ // Pretenuring calculation field.
+ assembler->StoreObjectFieldNoWriteBarrier(
+ site, AllocationSite::kPretenureDataOffset, zero);
+
+ // Pretenuring memento creation count field.
+ assembler->StoreObjectFieldNoWriteBarrier(
+ site, AllocationSite::kPretenureCreateCountOffset, zero);
+
+ // Store an empty fixed array for the code dependency.
+ assembler->StoreObjectFieldRoot(site, AllocationSite::kDependentCodeOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+
+ // Link the object to the allocation site list
+ Node* site_list = assembler->ExternalConstant(
+ ExternalReference::allocation_sites_list_address(isolate()));
+ Node* next_site = assembler->LoadBufferObject(site_list, 0);
+
+ // TODO(mvstanton): This is a store to a weak pointer, which we may want to
+ // mark as such in order to skip the write barrier, once we have a unified
+ // system for weakness. For now we decided to keep it like this because having
+ // an initial write barrier backed store makes this pointer strong until the
+ // next GC, and allocation sites are designed to survive several GCs anyway.
+ assembler->StoreObjectField(site, AllocationSite::kWeakNextOffset, next_site);
+ assembler->StoreNoWriteBarrier(MachineRepresentation::kTagged, site_list,
+ site);
+
+ Node* feedback_vector = assembler->Parameter(Descriptor::kVector);
+ Node* slot = assembler->Parameter(Descriptor::kSlot);
+
+ assembler->StoreFixedArrayElement(feedback_vector, slot, site,
+ UPDATE_WRITE_BARRIER,
+ CodeStubAssembler::SMI_PARAMETERS);
+
+ assembler->Return(site);
+}
+
+void CreateWeakCellStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+ assembler->Return(assembler->CreateWeakCellInFeedbackVector(
+ assembler->Parameter(Descriptor::kVector),
+ assembler->Parameter(Descriptor::kSlot),
+ assembler->Parameter(Descriptor::kValue)));
+}
+
+void ArrayNoArgumentConstructorStub::GenerateAssembly(
+ CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+ Node* native_context = assembler->LoadObjectField(
+ assembler->Parameter(Descriptor::kFunction), JSFunction::kContextOffset);
+ bool track_allocation_site =
+ AllocationSite::GetMode(elements_kind()) == TRACK_ALLOCATION_SITE &&
+ override_mode() != DISABLE_ALLOCATION_SITES;
+ Node* allocation_site =
+ track_allocation_site ? assembler->Parameter(Descriptor::kAllocationSite)
+ : nullptr;
+ Node* array_map =
+ assembler->LoadJSArrayElementsMap(elements_kind(), native_context);
+ Node* array = assembler->AllocateJSArray(
+ elements_kind(), array_map,
+ assembler->IntPtrConstant(JSArray::kPreallocatedArrayElements),
+ assembler->IntPtrConstant(0), allocation_site);
+ assembler->Return(array);
+}
+
+void InternalArrayNoArgumentConstructorStub::GenerateAssembly(
+ CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+ Node* array_map =
+ assembler->LoadObjectField(assembler->Parameter(Descriptor::kFunction),
+ JSFunction::kPrototypeOrInitialMapOffset);
+ Node* array = assembler->AllocateJSArray(
+ elements_kind(), array_map,
+ assembler->IntPtrConstant(JSArray::kPreallocatedArrayElements),
+ assembler->IntPtrConstant(0), nullptr);
+ assembler->Return(array);
+}
+
+namespace {
+
+template <typename Descriptor>
+void SingleArgumentConstructorCommon(CodeStubAssembler* assembler,
+ ElementsKind elements_kind,
+ compiler::Node* array_map,
+ compiler::Node* allocation_site,
+ AllocationSiteMode mode) {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Label Label;
+
+ Label ok(assembler);
+ Label smi_size(assembler);
+ Label small_smi_size(assembler);
+ Label call_runtime(assembler, Label::kDeferred);
+
+ Node* size = assembler->Parameter(Descriptor::kArraySizeSmiParameter);
+ assembler->Branch(assembler->WordIsSmi(size), &smi_size, &call_runtime);
+
+ assembler->Bind(&smi_size);
+
+ if (IsFastPackedElementsKind(elements_kind)) {
+ Label abort(assembler, Label::kDeferred);
+ assembler->Branch(
+ assembler->SmiEqual(size, assembler->SmiConstant(Smi::FromInt(0))),
+ &small_smi_size, &abort);
+
+ assembler->Bind(&abort);
+ Node* reason =
+ assembler->SmiConstant(Smi::FromInt(kAllocatingNonEmptyPackedArray));
+ Node* context = assembler->Parameter(Descriptor::kContext);
+ assembler->TailCallRuntime(Runtime::kAbort, context, reason);
+ } else {
+ int element_size =
+ IsFastDoubleElementsKind(elements_kind) ? kDoubleSize : kPointerSize;
+ int max_fast_elements =
+ (Page::kMaxRegularHeapObjectSize - FixedArray::kHeaderSize -
+ JSArray::kSize - AllocationMemento::kSize) /
+ element_size;
+ assembler->Branch(
+ assembler->SmiAboveOrEqual(
+ size, assembler->SmiConstant(Smi::FromInt(max_fast_elements))),
+ &call_runtime, &small_smi_size);
+ }
+
+ assembler->Bind(&small_smi_size);
+ {
+ Node* array = assembler->AllocateJSArray(
+ elements_kind, array_map, size, size,
+ mode == DONT_TRACK_ALLOCATION_SITE ? nullptr : allocation_site,
+ CodeStubAssembler::SMI_PARAMETERS);
+ assembler->Return(array);
+ }
+
+ assembler->Bind(&call_runtime);
+ {
+ Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* function = assembler->Parameter(Descriptor::kFunction);
+ Node* array_size = assembler->Parameter(Descriptor::kArraySizeSmiParameter);
+ Node* allocation_site = assembler->Parameter(Descriptor::kAllocationSite);
+ assembler->TailCallRuntime(Runtime::kNewArray, context, function,
+ array_size, function, allocation_site);
+ }
+}
+} // namespace
+
+void ArraySingleArgumentConstructorStub::GenerateAssembly(
+ CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+ Node* function = assembler->Parameter(Descriptor::kFunction);
+ Node* native_context =
+ assembler->LoadObjectField(function, JSFunction::kContextOffset);
+ Node* array_map =
+ assembler->LoadJSArrayElementsMap(elements_kind(), native_context);
+ AllocationSiteMode mode = override_mode() == DISABLE_ALLOCATION_SITES
+ ? DONT_TRACK_ALLOCATION_SITE
+ : AllocationSite::GetMode(elements_kind());
+ Node* allocation_site = assembler->Parameter(Descriptor::kAllocationSite);
+ SingleArgumentConstructorCommon<Descriptor>(assembler, elements_kind(),
+ array_map, allocation_site, mode);
+}
+
+void InternalArraySingleArgumentConstructorStub::GenerateAssembly(
+ CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+ Node* function = assembler->Parameter(Descriptor::kFunction);
+ Node* array_map = assembler->LoadObjectField(
+ function, JSFunction::kPrototypeOrInitialMapOffset);
+ SingleArgumentConstructorCommon<Descriptor>(
+ assembler, elements_kind(), array_map, assembler->UndefinedConstant(),
+ DONT_TRACK_ALLOCATION_SITE);
+}
+
+void GrowArrayElementsStub::GenerateAssembly(
+ CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+ CodeStubAssembler::Label runtime(assembler,
+ CodeStubAssembler::Label::kDeferred);
+
+ Node* object = assembler->Parameter(Descriptor::kObject);
+ Node* key = assembler->Parameter(Descriptor::kKey);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+ ElementsKind kind = elements_kind();
+
+ Node* elements = assembler->LoadElements(object);
+ Node* new_elements = assembler->CheckAndGrowElementsCapacity(
+ context, elements, kind, key, &runtime);
+ assembler->StoreObjectField(object, JSObject::kElementsOffset, new_elements);
+ assembler->Return(new_elements);
+
+ assembler->Bind(&runtime);
+ // TODO(danno): Make this a tail call when the stub is only used from TurboFan
+ // code. This musn't be a tail call for now, since the caller site in lithium
+ // creates a safepoint. This safepoint musn't have a different number of
+ // arguments on the stack in the case that a GC happens from the slow-case
+ // allocation path (zero, since all the stubs inputs are in registers) and
+ // when the call happens (it would be two in the tail call case due to the
+ // tail call pushing the arguments on the stack for the runtime call). By not
+ // tail-calling, the runtime call case also has zero arguments on the stack
+ // for the stub frame.
+ assembler->Return(assembler->CallRuntime(Runtime::kGrowArrayElements, context,
+ object, key));
+}
ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate)
: PlatformCodeStub(isolate) {
minor_key_ = ArgumentCountBits::encode(ANY);
- ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
}
-
ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate,
int argument_count)
: PlatformCodeStub(isolate) {
@@ -3492,15 +5832,10 @@ ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate,
} else {
UNREACHABLE();
}
- ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
-}
-
-
-InternalArrayConstructorStub::InternalArrayConstructorStub(
- Isolate* isolate) : PlatformCodeStub(isolate) {
- InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
}
+InternalArrayConstructorStub::InternalArrayConstructorStub(Isolate* isolate)
+ : PlatformCodeStub(isolate) {}
Representation RepresentationFromType(Type* type) {
if (type->Is(Type::UntaggedIntegral())) {