summaryrefslogtreecommitdiff
path: root/deps/v8/src/code-stubs.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/code-stubs.cc')
-rw-r--r--deps/v8/src/code-stubs.cc1828
1 files changed, 394 insertions, 1434 deletions
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index 2ee5ece8da..032fdb30b3 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -12,13 +12,17 @@
#include "src/code-stub-assembler.h"
#include "src/factory.h"
#include "src/gdb-jit.h"
+#include "src/ic/accessor-assembler.h"
#include "src/ic/handler-compiler.h"
+#include "src/ic/ic-stats.h"
#include "src/ic/ic.h"
#include "src/macro-assembler.h"
+#include "src/tracing/tracing-category-observer.h"
namespace v8 {
namespace internal {
+using compiler::CodeAssemblerState;
RUNTIME_FUNCTION(UnexpectedStubMiss) {
FATAL("Unexpected deopt of a stub");
@@ -110,6 +114,12 @@ Handle<Code> CodeStub::GetCodeCopy(const Code::FindAndReplacePattern& pattern) {
return ic;
}
+void CodeStub::DeleteStubFromCacheForTesting() {
+ Heap* heap = isolate_->heap();
+ Handle<UnseededNumberDictionary> dict(heap->code_stubs());
+ dict = UnseededNumberDictionary::DeleteKey(dict, GetKey());
+ heap->SetRootCodeStubs(*dict);
+}
Handle<Code> PlatformCodeStub::GenerateCode() {
Factory* factory = isolate()->factory();
@@ -183,8 +193,7 @@ Handle<Code> CodeStub::GetCode() {
}
Activate(code);
- DCHECK(!NeedsImmovableCode() ||
- heap->lo_space()->Contains(code) ||
+ DCHECK(!NeedsImmovableCode() || Heap::IsImmovable(code) ||
heap->code_space()->FirstPage()->Contains(code->address()));
return Handle<Code>(code, isolate());
}
@@ -322,36 +331,38 @@ void StringAddStub::PrintBaseName(std::ostream& os) const { // NOLINT
os << "StringAddStub_" << flags() << "_" << pretenure_flag();
}
-void StringAddStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+void StringAddStub::GenerateAssembly(
+ compiler::CodeAssemblerState* state) const {
typedef compiler::Node Node;
- Node* left = assembler->Parameter(Descriptor::kLeft);
- Node* right = assembler->Parameter(Descriptor::kRight);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ CodeStubAssembler assembler(state);
+ Node* left = assembler.Parameter(Descriptor::kLeft);
+ Node* right = assembler.Parameter(Descriptor::kRight);
+ Node* context = assembler.Parameter(Descriptor::kContext);
if ((flags() & STRING_ADD_CHECK_LEFT) != 0) {
DCHECK((flags() & STRING_ADD_CONVERT) != 0);
// TODO(danno): The ToString and JSReceiverToPrimitive below could be
// combined to avoid duplicate smi and instance type checks.
- left = assembler->ToString(context,
- assembler->JSReceiverToPrimitive(context, left));
+ left = assembler.ToString(context,
+ assembler.JSReceiverToPrimitive(context, left));
}
if ((flags() & STRING_ADD_CHECK_RIGHT) != 0) {
DCHECK((flags() & STRING_ADD_CONVERT) != 0);
// TODO(danno): The ToString and JSReceiverToPrimitive below could be
// combined to avoid duplicate smi and instance type checks.
- right = assembler->ToString(
- context, assembler->JSReceiverToPrimitive(context, right));
+ right = assembler.ToString(context,
+ assembler.JSReceiverToPrimitive(context, right));
}
if ((flags() & STRING_ADD_CHECK_BOTH) == 0) {
CodeStubAssembler::AllocationFlag flags =
(pretenure_flag() == TENURED) ? CodeStubAssembler::kPretenured
: CodeStubAssembler::kNone;
- assembler->Return(assembler->StringAdd(context, left, right, flags));
+ assembler.Return(assembler.StringAdd(context, left, right, flags));
} else {
Callable callable = CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE,
pretenure_flag());
- assembler->TailCallStub(callable, context, left, right);
+ assembler.TailCallStub(callable, context, left, right);
}
}
@@ -422,299 +433,108 @@ Handle<Code> TurboFanCodeStub::GenerateCode() {
const char* name = CodeStub::MajorName(MajorKey());
Zone zone(isolate()->allocator(), ZONE_NAME);
CallInterfaceDescriptor descriptor(GetCallInterfaceDescriptor());
- CodeStubAssembler assembler(isolate(), &zone, descriptor, GetCodeFlags(),
- name);
- GenerateAssembly(&assembler);
- return assembler.GenerateCode();
-}
-
-void LoadICTrampolineStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
- typedef compiler::Node Node;
-
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* context = assembler->Parameter(Descriptor::kContext);
- Node* vector = assembler->LoadTypeFeedbackVectorForStub();
-
- CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
- assembler->LoadIC(&p);
-}
-
-void LoadICStub::GenerateAssembly(CodeStubAssembler* assembler) const {
- typedef compiler::Node Node;
-
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
-
- CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
- assembler->LoadIC(&p);
-}
-
-void LoadICProtoArrayStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
- typedef compiler::Node Node;
-
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* handler = assembler->Parameter(Descriptor::kHandler);
- Node* context = assembler->Parameter(Descriptor::kContext);
-
- CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
- assembler->LoadICProtoArray(&p, handler);
-}
-
-void LoadGlobalICTrampolineStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
- typedef compiler::Node Node;
-
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* context = assembler->Parameter(Descriptor::kContext);
- Node* vector = assembler->LoadTypeFeedbackVectorForStub();
-
- CodeStubAssembler::LoadICParameters p(context, nullptr, nullptr, slot,
- vector);
- assembler->LoadGlobalIC(&p);
-}
-
-void LoadGlobalICStub::GenerateAssembly(CodeStubAssembler* assembler) const {
- typedef compiler::Node Node;
-
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
-
- CodeStubAssembler::LoadICParameters p(context, nullptr, nullptr, slot,
- vector);
- assembler->LoadGlobalIC(&p);
-}
-
-void KeyedLoadICTrampolineTFStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
- typedef compiler::Node Node;
-
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* context = assembler->Parameter(Descriptor::kContext);
- Node* vector = assembler->LoadTypeFeedbackVectorForStub();
-
- CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
- assembler->KeyedLoadIC(&p);
-}
-
-void KeyedLoadICTFStub::GenerateAssembly(CodeStubAssembler* assembler) const {
- typedef compiler::Node Node;
-
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
-
- CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
- assembler->KeyedLoadIC(&p);
-}
-
-void StoreICTrampolineStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
- typedef compiler::Node Node;
-
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* value = assembler->Parameter(Descriptor::kValue);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* context = assembler->Parameter(Descriptor::kContext);
- Node* vector = assembler->LoadTypeFeedbackVectorForStub();
-
- CodeStubAssembler::StoreICParameters p(context, receiver, name, value, slot,
- vector);
- assembler->StoreIC(&p);
+ compiler::CodeAssemblerState state(isolate(), &zone, descriptor,
+ GetCodeFlags(), name);
+ GenerateAssembly(&state);
+ return compiler::CodeAssembler::GenerateCode(&state);
}
-void StoreICStub::GenerateAssembly(CodeStubAssembler* assembler) const {
- typedef compiler::Node Node;
-
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* value = assembler->Parameter(Descriptor::kValue);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
-
- CodeStubAssembler::StoreICParameters p(context, receiver, name, value, slot,
- vector);
- assembler->StoreIC(&p);
-}
-
-void KeyedStoreICTrampolineTFStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
- typedef compiler::Node Node;
-
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* value = assembler->Parameter(Descriptor::kValue);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* context = assembler->Parameter(Descriptor::kContext);
- Node* vector = assembler->LoadTypeFeedbackVectorForStub();
-
- CodeStubAssembler::StoreICParameters p(context, receiver, name, value, slot,
- vector);
- assembler->KeyedStoreIC(&p, StoreICState::GetLanguageMode(GetExtraICState()));
-}
-
-void KeyedStoreICTFStub::GenerateAssembly(CodeStubAssembler* assembler) const {
- typedef compiler::Node Node;
-
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* value = assembler->Parameter(Descriptor::kValue);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
-
- CodeStubAssembler::StoreICParameters p(context, receiver, name, value, slot,
- vector);
- assembler->KeyedStoreIC(&p, StoreICState::GetLanguageMode(GetExtraICState()));
-}
-
-void StoreMapStub::GenerateAssembly(CodeStubAssembler* assembler) const {
- typedef compiler::Node Node;
-
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* map = assembler->Parameter(Descriptor::kMap);
- Node* value = assembler->Parameter(Descriptor::kValue);
-
- assembler->StoreObjectField(receiver, JSObject::kMapOffset, map);
- assembler->Return(value);
-}
-
-void StoreTransitionStub::GenerateAssembly(CodeStubAssembler* assembler) const {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
-
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* offset =
- assembler->SmiUntag(assembler->Parameter(Descriptor::kFieldOffset));
- Node* value = assembler->Parameter(Descriptor::kValue);
- Node* map = assembler->Parameter(Descriptor::kMap);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
-
- Label miss(assembler);
-
- Representation representation = this->representation();
- assembler->Comment("StoreTransitionStub: is_inobject: %d: representation: %s",
- is_inobject(), representation.Mnemonic());
-
- Node* prepared_value =
- assembler->PrepareValueForWrite(value, representation, &miss);
-
- if (store_mode() == StoreTransitionStub::ExtendStorageAndStoreMapAndValue) {
- assembler->Comment("Extend storage");
- assembler->ExtendPropertiesBackingStore(receiver);
- } else {
- DCHECK(store_mode() == StoreTransitionStub::StoreMapAndValue);
- }
-
- // Store the new value into the "extended" object.
- assembler->Comment("Store value");
- assembler->StoreNamedField(receiver, offset, is_inobject(), representation,
- prepared_value, true);
-
- // And finally update the map.
- assembler->Comment("Store map");
- assembler->StoreObjectField(receiver, JSObject::kMapOffset, map);
- assembler->Return(value);
-
- // Only store to tagged field never bails out.
- if (!representation.IsTagged()) {
- assembler->Bind(&miss);
- {
- assembler->Comment("Miss");
- assembler->TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot,
- vector, receiver, name);
- }
- }
+void LoadICProtoArrayStub::GenerateAssembly(CodeAssemblerState* state) const {
+ AccessorAssembler::GenerateLoadICProtoArray(
+ state, throw_reference_error_if_nonexistent());
}
void ElementsTransitionAndStoreStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
+ compiler::CodeAssemblerState* state) const {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* key = assembler->Parameter(Descriptor::kName);
- Node* value = assembler->Parameter(Descriptor::kValue);
- Node* map = assembler->Parameter(Descriptor::kMap);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+ Node* key = assembler.Parameter(Descriptor::kName);
+ Node* value = assembler.Parameter(Descriptor::kValue);
+ Node* map = assembler.Parameter(Descriptor::kMap);
+ Node* slot = assembler.Parameter(Descriptor::kSlot);
+ Node* vector = assembler.Parameter(Descriptor::kVector);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- assembler->Comment(
+ assembler.Comment(
"ElementsTransitionAndStoreStub: from_kind=%s, to_kind=%s,"
" is_jsarray=%d, store_mode=%d",
ElementsKindToString(from_kind()), ElementsKindToString(to_kind()),
is_jsarray(), store_mode());
- Label miss(assembler);
+ Label miss(&assembler);
if (FLAG_trace_elements_transitions) {
// Tracing elements transitions is the job of the runtime.
- assembler->Goto(&miss);
+ assembler.Goto(&miss);
} else {
- assembler->TransitionElementsKind(receiver, map, from_kind(), to_kind(),
- is_jsarray(), &miss);
- assembler->EmitElementStore(receiver, key, value, is_jsarray(), to_kind(),
- store_mode(), &miss);
- assembler->Return(value);
+ assembler.TransitionElementsKind(receiver, map, from_kind(), to_kind(),
+ is_jsarray(), &miss);
+ assembler.EmitElementStore(receiver, key, value, is_jsarray(), to_kind(),
+ store_mode(), &miss);
+ assembler.Return(value);
}
- assembler->Bind(&miss);
+ assembler.Bind(&miss);
{
- assembler->Comment("Miss");
- assembler->TailCallRuntime(Runtime::kElementsTransitionAndStoreIC_Miss,
- context, receiver, key, value, map, slot,
- vector);
+ assembler.Comment("Miss");
+ assembler.TailCallRuntime(Runtime::kElementsTransitionAndStoreIC_Miss,
+ context, receiver, key, value, map, slot, vector);
}
}
void AllocateHeapNumberStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
+ compiler::CodeAssemblerState* state) const {
typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
- Node* result = assembler->AllocateHeapNumber();
- assembler->Return(result);
+ Node* result = assembler.AllocateHeapNumber();
+ assembler.Return(result);
}
-#define SIMD128_GEN_ASM(TYPE, Type, type, lane_count, lane_type) \
- void Allocate##Type##Stub::GenerateAssembly(CodeStubAssembler* assembler) \
- const { \
- compiler::Node* result = \
- assembler->Allocate(Simd128Value::kSize, CodeStubAssembler::kNone); \
- compiler::Node* map = assembler->LoadMap(result); \
- assembler->StoreNoWriteBarrier( \
- MachineRepresentation::kTagged, map, \
- assembler->HeapConstant(isolate()->factory()->type##_map())); \
- assembler->Return(result); \
+#define SIMD128_GEN_ASM(TYPE, Type, type, lane_count, lane_type) \
+ void Allocate##Type##Stub::GenerateAssembly( \
+ compiler::CodeAssemblerState* state) const { \
+ CodeStubAssembler assembler(state); \
+ compiler::Node* result = \
+ assembler.Allocate(Simd128Value::kSize, CodeStubAssembler::kNone); \
+ compiler::Node* map = assembler.LoadMap(result); \
+ assembler.StoreNoWriteBarrier( \
+ MachineRepresentation::kTagged, map, \
+ assembler.HeapConstant(isolate()->factory()->type##_map())); \
+ assembler.Return(result); \
}
SIMD128_TYPES(SIMD128_GEN_ASM)
#undef SIMD128_GEN_ASM
-void StringLengthStub::GenerateAssembly(CodeStubAssembler* assembler) const {
- compiler::Node* value = assembler->Parameter(0);
- compiler::Node* string = assembler->LoadJSValueValue(value);
- compiler::Node* result = assembler->LoadStringLength(string);
- assembler->Return(result);
-}
+void StringLengthStub::GenerateAssembly(
+ compiler::CodeAssemblerState* state) const {
+ CodeStubAssembler assembler(state);
+ compiler::Node* value = assembler.Parameter(0);
+ compiler::Node* string = assembler.LoadJSValueValue(value);
+ compiler::Node* result = assembler.LoadStringLength(string);
+ assembler.Return(result);
+}
+
+#define BINARY_OP_STUB(Name) \
+ void Name::GenerateAssembly(compiler::CodeAssemblerState* state) const { \
+ typedef BinaryOpWithVectorDescriptor Descriptor; \
+ CodeStubAssembler assembler(state); \
+ assembler.Return(Generate( \
+ &assembler, assembler.Parameter(Descriptor::kLeft), \
+ assembler.Parameter(Descriptor::kRight), \
+ assembler.ChangeUint32ToWord(assembler.Parameter(Descriptor::kSlot)), \
+ assembler.Parameter(Descriptor::kVector), \
+ assembler.Parameter(Descriptor::kContext))); \
+ }
+BINARY_OP_STUB(AddWithFeedbackStub)
+BINARY_OP_STUB(SubtractWithFeedbackStub)
+BINARY_OP_STUB(MultiplyWithFeedbackStub)
+BINARY_OP_STUB(DivideWithFeedbackStub)
+BINARY_OP_STUB(ModulusWithFeedbackStub)
+#undef BINARY_OP_STUB
// static
compiler::Node* AddWithFeedbackStub::Generate(
@@ -732,7 +552,7 @@ compiler::Node* AddWithFeedbackStub::Generate(
call_add_stub(assembler), end(assembler);
Variable var_fadd_lhs(assembler, MachineRepresentation::kFloat64),
var_fadd_rhs(assembler, MachineRepresentation::kFloat64),
- var_type_feedback(assembler, MachineRepresentation::kWord32),
+ var_type_feedback(assembler, MachineRepresentation::kTaggedSigned),
var_result(assembler, MachineRepresentation::kTagged);
// Check if the {lhs} is a Smi or a HeapObject.
@@ -768,7 +588,7 @@ compiler::Node* AddWithFeedbackStub::Generate(
assembler->Bind(&if_notoverflow);
{
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall));
+ assembler->SmiConstant(BinaryOperationFeedback::kSignedSmall));
var_result.Bind(assembler->BitcastWordToTaggedSigned(
assembler->Projection(0, pair)));
assembler->Goto(&end);
@@ -829,7 +649,7 @@ compiler::Node* AddWithFeedbackStub::Generate(
assembler->Bind(&do_fadd);
{
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kNumber));
+ assembler->SmiConstant(BinaryOperationFeedback::kNumber));
Node* value =
assembler->Float64Add(var_fadd_lhs.value(), var_fadd_rhs.value());
Node* result = assembler->AllocateHeapNumberWithValue(value);
@@ -876,7 +696,7 @@ compiler::Node* AddWithFeedbackStub::Generate(
&call_with_any_feedback);
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kString));
+ assembler->SmiConstant(BinaryOperationFeedback::kString));
Callable callable = CodeFactory::StringAdd(
assembler->isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
var_result.Bind(assembler->CallStub(callable, context, lhs, rhs));
@@ -899,14 +719,14 @@ compiler::Node* AddWithFeedbackStub::Generate(
assembler->Bind(&call_with_oddball_feedback);
{
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+ assembler->SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
assembler->Goto(&call_add_stub);
}
assembler->Bind(&call_with_any_feedback);
{
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kAny));
+ assembler->SmiConstant(BinaryOperationFeedback::kAny));
assembler->Goto(&call_add_stub);
}
@@ -938,7 +758,7 @@ compiler::Node* SubtractWithFeedbackStub::Generate(
call_with_any_feedback(assembler);
Variable var_fsub_lhs(assembler, MachineRepresentation::kFloat64),
var_fsub_rhs(assembler, MachineRepresentation::kFloat64),
- var_type_feedback(assembler, MachineRepresentation::kWord32),
+ var_type_feedback(assembler, MachineRepresentation::kTaggedSigned),
var_result(assembler, MachineRepresentation::kTagged);
// Check if the {lhs} is a Smi or a HeapObject.
@@ -976,7 +796,7 @@ compiler::Node* SubtractWithFeedbackStub::Generate(
assembler->Bind(&if_notoverflow);
// lhs, rhs, result smi. combined - smi.
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall));
+ assembler->SmiConstant(BinaryOperationFeedback::kSignedSmall));
var_result.Bind(
assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair)));
assembler->Goto(&end);
@@ -1039,7 +859,7 @@ compiler::Node* SubtractWithFeedbackStub::Generate(
assembler->Bind(&do_fsub);
{
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kNumber));
+ assembler->SmiConstant(BinaryOperationFeedback::kNumber));
Node* lhs_value = var_fsub_lhs.value();
Node* rhs_value = var_fsub_rhs.value();
Node* value = assembler->Float64Sub(lhs_value, rhs_value);
@@ -1063,7 +883,7 @@ compiler::Node* SubtractWithFeedbackStub::Generate(
assembler->Bind(&if_rhsissmi);
{
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+ assembler->SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
assembler->Goto(&call_subtract_stub);
}
@@ -1077,7 +897,7 @@ compiler::Node* SubtractWithFeedbackStub::Generate(
&check_rhsisoddball);
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+ assembler->SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
assembler->Goto(&call_subtract_stub);
}
}
@@ -1092,14 +912,14 @@ compiler::Node* SubtractWithFeedbackStub::Generate(
assembler->GotoUnless(rhs_is_oddball, &call_with_any_feedback);
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+ assembler->SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
assembler->Goto(&call_subtract_stub);
}
assembler->Bind(&call_with_any_feedback);
{
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kAny));
+ assembler->SmiConstant(BinaryOperationFeedback::kAny));
assembler->Goto(&call_subtract_stub);
}
@@ -1134,9 +954,7 @@ compiler::Node* MultiplyWithFeedbackStub::Generate(
Variable var_lhs_float64(assembler, MachineRepresentation::kFloat64),
var_rhs_float64(assembler, MachineRepresentation::kFloat64),
var_result(assembler, MachineRepresentation::kTagged),
- var_type_feedback(assembler, MachineRepresentation::kWord32);
-
- Node* number_map = assembler->HeapNumberMapConstant();
+ var_type_feedback(assembler, MachineRepresentation::kTaggedSigned);
Label lhs_is_smi(assembler), lhs_is_not_smi(assembler);
assembler->Branch(assembler->TaggedIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
@@ -1152,11 +970,10 @@ compiler::Node* MultiplyWithFeedbackStub::Generate(
// Both {lhs} and {rhs} are Smis. The result is not necessarily a smi,
// in case of overflow.
var_result.Bind(assembler->SmiMul(lhs, rhs));
- var_type_feedback.Bind(assembler->Select(
+ var_type_feedback.Bind(assembler->SelectSmiConstant(
assembler->TaggedIsSmi(var_result.value()),
- assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall),
- assembler->Int32Constant(BinaryOperationFeedback::kNumber),
- MachineRepresentation::kWord32));
+ BinaryOperationFeedback::kSignedSmall,
+ BinaryOperationFeedback::kNumber));
assembler->Goto(&end);
}
@@ -1165,7 +982,7 @@ compiler::Node* MultiplyWithFeedbackStub::Generate(
Node* rhs_map = assembler->LoadMap(rhs);
// Check if {rhs} is a HeapNumber.
- assembler->GotoUnless(assembler->WordEqual(rhs_map, number_map),
+ assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
&check_rhsisoddball);
// Convert {lhs} to a double and multiply it with the value of {rhs}.
@@ -1180,7 +997,7 @@ compiler::Node* MultiplyWithFeedbackStub::Generate(
Node* lhs_map = assembler->LoadMap(lhs);
// Check if {lhs} is a HeapNumber.
- assembler->GotoUnless(assembler->WordEqual(lhs_map, number_map),
+ assembler->GotoUnless(assembler->IsHeapNumberMap(lhs_map),
&if_lhsisnotnumber);
// Check if {rhs} is a Smi.
@@ -1201,7 +1018,7 @@ compiler::Node* MultiplyWithFeedbackStub::Generate(
Node* rhs_map = assembler->LoadMap(rhs);
// Check if {rhs} is a HeapNumber.
- assembler->GotoUnless(assembler->WordEqual(rhs_map, number_map),
+ assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
&check_rhsisoddball);
// Both {lhs} and {rhs} are HeapNumbers. Load their values and
@@ -1215,7 +1032,7 @@ compiler::Node* MultiplyWithFeedbackStub::Generate(
assembler->Bind(&do_fmul);
{
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kNumber));
+ assembler->SmiConstant(BinaryOperationFeedback::kNumber));
Node* value =
assembler->Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
Node* result = assembler->AllocateHeapNumberWithValue(value);
@@ -1256,14 +1073,14 @@ compiler::Node* MultiplyWithFeedbackStub::Generate(
assembler->Bind(&call_with_oddball_feedback);
{
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+ assembler->SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
assembler->Goto(&call_multiply_stub);
}
assembler->Bind(&call_with_any_feedback);
{
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kAny));
+ assembler->SmiConstant(BinaryOperationFeedback::kAny));
assembler->Goto(&call_multiply_stub);
}
@@ -1298,9 +1115,7 @@ compiler::Node* DivideWithFeedbackStub::Generate(
Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
var_divisor_float64(assembler, MachineRepresentation::kFloat64),
var_result(assembler, MachineRepresentation::kTagged),
- var_type_feedback(assembler, MachineRepresentation::kWord32);
-
- Node* number_map = assembler->HeapNumberMapConstant();
+ var_type_feedback(assembler, MachineRepresentation::kTaggedSigned);
Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
assembler->Branch(assembler->TaggedIsSmi(dividend), &dividend_is_smi,
@@ -1318,27 +1133,26 @@ compiler::Node* DivideWithFeedbackStub::Generate(
// Do floating point division if {divisor} is zero.
assembler->GotoIf(
- assembler->WordEqual(divisor, assembler->IntPtrConstant(0)),
- &bailout);
+ assembler->WordEqual(divisor, assembler->SmiConstant(0)), &bailout);
// Do floating point division {dividend} is zero and {divisor} is
// negative.
Label dividend_is_zero(assembler), dividend_is_not_zero(assembler);
assembler->Branch(
- assembler->WordEqual(dividend, assembler->IntPtrConstant(0)),
+ assembler->WordEqual(dividend, assembler->SmiConstant(0)),
&dividend_is_zero, &dividend_is_not_zero);
assembler->Bind(&dividend_is_zero);
{
assembler->GotoIf(
- assembler->IntPtrLessThan(divisor, assembler->IntPtrConstant(0)),
+ assembler->SmiLessThan(divisor, assembler->SmiConstant(0)),
&bailout);
assembler->Goto(&dividend_is_not_zero);
}
assembler->Bind(&dividend_is_not_zero);
- Node* untagged_divisor = assembler->SmiUntag(divisor);
- Node* untagged_dividend = assembler->SmiUntag(dividend);
+ Node* untagged_divisor = assembler->SmiToWord32(divisor);
+ Node* untagged_dividend = assembler->SmiToWord32(dividend);
// Do floating point division if {dividend} is kMinInt (or kMinInt - 1
// if the Smi size is 31) and {divisor} is -1.
@@ -1367,8 +1181,8 @@ compiler::Node* DivideWithFeedbackStub::Generate(
assembler->GotoIf(assembler->Word32NotEqual(untagged_dividend, truncated),
&bailout);
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall));
- var_result.Bind(assembler->SmiTag(untagged_result));
+ assembler->SmiConstant(BinaryOperationFeedback::kSignedSmall));
+ var_result.Bind(assembler->SmiFromWord32(untagged_result));
assembler->Goto(&end);
// Bailout: convert {dividend} and {divisor} to double and do double
@@ -1386,7 +1200,7 @@ compiler::Node* DivideWithFeedbackStub::Generate(
Node* divisor_map = assembler->LoadMap(divisor);
// Check if {divisor} is a HeapNumber.
- assembler->GotoUnless(assembler->WordEqual(divisor_map, number_map),
+ assembler->GotoUnless(assembler->IsHeapNumberMap(divisor_map),
&check_divisor_for_oddball);
// Convert {dividend} to a double and divide it with the value of
@@ -1401,7 +1215,7 @@ compiler::Node* DivideWithFeedbackStub::Generate(
Node* dividend_map = assembler->LoadMap(dividend);
// Check if {dividend} is a HeapNumber.
- assembler->GotoUnless(assembler->WordEqual(dividend_map, number_map),
+ assembler->GotoUnless(assembler->IsHeapNumberMap(dividend_map),
&dividend_is_not_number);
// Check if {divisor} is a Smi.
@@ -1423,7 +1237,7 @@ compiler::Node* DivideWithFeedbackStub::Generate(
Node* divisor_map = assembler->LoadMap(divisor);
// Check if {divisor} is a HeapNumber.
- assembler->GotoUnless(assembler->WordEqual(divisor_map, number_map),
+ assembler->GotoUnless(assembler->IsHeapNumberMap(divisor_map),
&check_divisor_for_oddball);
// Both {dividend} and {divisor} are HeapNumbers. Load their values
@@ -1438,7 +1252,7 @@ compiler::Node* DivideWithFeedbackStub::Generate(
assembler->Bind(&do_fdiv);
{
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kNumber));
+ assembler->SmiConstant(BinaryOperationFeedback::kNumber));
Node* value = assembler->Float64Div(var_dividend_float64.value(),
var_divisor_float64.value());
var_result.Bind(assembler->AllocateHeapNumberWithValue(value));
@@ -1479,14 +1293,14 @@ compiler::Node* DivideWithFeedbackStub::Generate(
assembler->Bind(&call_with_oddball_feedback);
{
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+ assembler->SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
assembler->Goto(&call_divide_stub);
}
assembler->Bind(&call_with_any_feedback);
{
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kAny));
+ assembler->SmiConstant(BinaryOperationFeedback::kAny));
assembler->Goto(&call_divide_stub);
}
@@ -1520,9 +1334,7 @@ compiler::Node* ModulusWithFeedbackStub::Generate(
Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
var_divisor_float64(assembler, MachineRepresentation::kFloat64),
var_result(assembler, MachineRepresentation::kTagged),
- var_type_feedback(assembler, MachineRepresentation::kWord32);
-
- Node* number_map = assembler->HeapNumberMapConstant();
+ var_type_feedback(assembler, MachineRepresentation::kTaggedSigned);
Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
assembler->Branch(assembler->TaggedIsSmi(dividend), &dividend_is_smi,
@@ -1537,10 +1349,10 @@ compiler::Node* ModulusWithFeedbackStub::Generate(
assembler->Bind(&divisor_is_smi);
{
var_result.Bind(assembler->SmiMod(dividend, divisor));
- var_type_feedback.Bind(assembler->Select(
+ var_type_feedback.Bind(assembler->SelectSmiConstant(
assembler->TaggedIsSmi(var_result.value()),
- assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall),
- assembler->Int32Constant(BinaryOperationFeedback::kNumber)));
+ BinaryOperationFeedback::kSignedSmall,
+ BinaryOperationFeedback::kNumber));
assembler->Goto(&end);
}
@@ -1549,7 +1361,7 @@ compiler::Node* ModulusWithFeedbackStub::Generate(
Node* divisor_map = assembler->LoadMap(divisor);
// Check if {divisor} is a HeapNumber.
- assembler->GotoUnless(assembler->WordEqual(divisor_map, number_map),
+ assembler->GotoUnless(assembler->IsHeapNumberMap(divisor_map),
&check_divisor_for_oddball);
// Convert {dividend} to a double and divide it with the value of
@@ -1565,7 +1377,7 @@ compiler::Node* ModulusWithFeedbackStub::Generate(
Node* dividend_map = assembler->LoadMap(dividend);
// Check if {dividend} is a HeapNumber.
- assembler->GotoUnless(assembler->WordEqual(dividend_map, number_map),
+ assembler->GotoUnless(assembler->IsHeapNumberMap(dividend_map),
&dividend_is_not_number);
// Check if {divisor} is a Smi.
@@ -1587,7 +1399,7 @@ compiler::Node* ModulusWithFeedbackStub::Generate(
Node* divisor_map = assembler->LoadMap(divisor);
// Check if {divisor} is a HeapNumber.
- assembler->GotoUnless(assembler->WordEqual(divisor_map, number_map),
+ assembler->GotoUnless(assembler->IsHeapNumberMap(divisor_map),
&check_divisor_for_oddball);
// Both {dividend} and {divisor} are HeapNumbers. Load their values
@@ -1601,7 +1413,7 @@ compiler::Node* ModulusWithFeedbackStub::Generate(
assembler->Bind(&do_fmod);
{
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kNumber));
+ assembler->SmiConstant(BinaryOperationFeedback::kNumber));
Node* value = assembler->Float64Mod(var_dividend_float64.value(),
var_divisor_float64.value());
var_result.Bind(assembler->AllocateHeapNumberWithValue(value));
@@ -1642,14 +1454,14 @@ compiler::Node* ModulusWithFeedbackStub::Generate(
assembler->Bind(&call_with_oddball_feedback);
{
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+ assembler->SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
assembler->Goto(&call_modulus_stub);
}
assembler->Bind(&call_with_any_feedback);
{
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kAny));
+ assembler->SmiConstant(BinaryOperationFeedback::kAny));
assembler->Goto(&call_modulus_stub);
}
@@ -1666,277 +1478,13 @@ compiler::Node* ModulusWithFeedbackStub::Generate(
return var_result.value();
}
-// static
-compiler::Node* IncStub::Generate(CodeStubAssembler* assembler,
- compiler::Node* value,
- compiler::Node* context,
- compiler::Node* type_feedback_vector,
- compiler::Node* slot_id) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
-
- // Shared entry for floating point increment.
- Label do_finc(assembler), end(assembler);
- Variable var_finc_value(assembler, MachineRepresentation::kFloat64);
-
- // We might need to try again due to ToNumber conversion.
- Variable value_var(assembler, MachineRepresentation::kTagged);
- Variable result_var(assembler, MachineRepresentation::kTagged);
- Variable var_type_feedback(assembler, MachineRepresentation::kWord32);
- Variable* loop_vars[] = {&value_var, &var_type_feedback};
- Label start(assembler, 2, loop_vars);
- value_var.Bind(value);
- var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kNone));
- assembler->Goto(&start);
- assembler->Bind(&start);
- {
- value = value_var.value();
-
- Label if_issmi(assembler), if_isnotsmi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
-
- assembler->Bind(&if_issmi);
- {
- // Try fast Smi addition first.
- Node* one = assembler->SmiConstant(Smi::FromInt(1));
- Node* pair = assembler->IntPtrAddWithOverflow(
- assembler->BitcastTaggedToWord(value),
- assembler->BitcastTaggedToWord(one));
- Node* overflow = assembler->Projection(1, pair);
-
- // Check if the Smi addition overflowed.
- Label if_overflow(assembler), if_notoverflow(assembler);
- assembler->Branch(overflow, &if_overflow, &if_notoverflow);
-
- assembler->Bind(&if_notoverflow);
- var_type_feedback.Bind(assembler->Word32Or(
- var_type_feedback.value(),
- assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall)));
- result_var.Bind(
- assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair)));
- assembler->Goto(&end);
-
- assembler->Bind(&if_overflow);
- {
- var_finc_value.Bind(assembler->SmiToFloat64(value));
- assembler->Goto(&do_finc);
- }
- }
-
- assembler->Bind(&if_isnotsmi);
- {
- // Check if the value is a HeapNumber.
- Label if_valueisnumber(assembler),
- if_valuenotnumber(assembler, Label::kDeferred);
- Node* value_map = assembler->LoadMap(value);
- assembler->Branch(assembler->IsHeapNumberMap(value_map),
- &if_valueisnumber, &if_valuenotnumber);
-
- assembler->Bind(&if_valueisnumber);
- {
- // Load the HeapNumber value.
- var_finc_value.Bind(assembler->LoadHeapNumberValue(value));
- assembler->Goto(&do_finc);
- }
-
- assembler->Bind(&if_valuenotnumber);
- {
- // We do not require an Or with earlier feedback here because once we
- // convert the value to a number, we cannot reach this path. We can
- // only reach this path on the first pass when the feedback is kNone.
- CSA_ASSERT(assembler,
- assembler->Word32Equal(var_type_feedback.value(),
- assembler->Int32Constant(
- BinaryOperationFeedback::kNone)));
-
- Label if_valueisoddball(assembler), if_valuenotoddball(assembler);
- Node* instance_type = assembler->LoadMapInstanceType(value_map);
- Node* is_oddball = assembler->Word32Equal(
- instance_type, assembler->Int32Constant(ODDBALL_TYPE));
- assembler->Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball);
-
- assembler->Bind(&if_valueisoddball);
- {
- // Convert Oddball to Number and check again.
- value_var.Bind(
- assembler->LoadObjectField(value, Oddball::kToNumberOffset));
- var_type_feedback.Bind(assembler->Int32Constant(
- BinaryOperationFeedback::kNumberOrOddball));
- assembler->Goto(&start);
- }
-
- assembler->Bind(&if_valuenotoddball);
- {
- // Convert to a Number first and try again.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kAny));
- value_var.Bind(assembler->CallStub(callable, context, value));
- assembler->Goto(&start);
- }
- }
- }
- }
-
- assembler->Bind(&do_finc);
- {
- Node* finc_value = var_finc_value.value();
- Node* one = assembler->Float64Constant(1.0);
- Node* finc_result = assembler->Float64Add(finc_value, one);
- var_type_feedback.Bind(assembler->Word32Or(
- var_type_feedback.value(),
- assembler->Int32Constant(BinaryOperationFeedback::kNumber)));
- result_var.Bind(assembler->AllocateHeapNumberWithValue(finc_result));
- assembler->Goto(&end);
- }
-
- assembler->Bind(&end);
- assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
- slot_id);
- return result_var.value();
-}
-
-void NumberToStringStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+void NumberToStringStub::GenerateAssembly(
+ compiler::CodeAssemblerState* state) const {
typedef compiler::Node Node;
- Node* argument = assembler->Parameter(Descriptor::kArgument);
- Node* context = assembler->Parameter(Descriptor::kContext);
- assembler->Return(assembler->NumberToString(context, argument));
-}
-
-// static
-compiler::Node* DecStub::Generate(CodeStubAssembler* assembler,
- compiler::Node* value,
- compiler::Node* context,
- compiler::Node* type_feedback_vector,
- compiler::Node* slot_id) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
-
- // Shared entry for floating point decrement.
- Label do_fdec(assembler), end(assembler);
- Variable var_fdec_value(assembler, MachineRepresentation::kFloat64);
-
- // We might need to try again due to ToNumber conversion.
- Variable value_var(assembler, MachineRepresentation::kTagged);
- Variable result_var(assembler, MachineRepresentation::kTagged);
- Variable var_type_feedback(assembler, MachineRepresentation::kWord32);
- Variable* loop_vars[] = {&value_var, &var_type_feedback};
- Label start(assembler, 2, loop_vars);
- var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kNone));
- value_var.Bind(value);
- assembler->Goto(&start);
- assembler->Bind(&start);
- {
- value = value_var.value();
-
- Label if_issmi(assembler), if_isnotsmi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
-
- assembler->Bind(&if_issmi);
- {
- // Try fast Smi subtraction first.
- Node* one = assembler->SmiConstant(Smi::FromInt(1));
- Node* pair = assembler->IntPtrSubWithOverflow(
- assembler->BitcastTaggedToWord(value),
- assembler->BitcastTaggedToWord(one));
- Node* overflow = assembler->Projection(1, pair);
-
- // Check if the Smi subtraction overflowed.
- Label if_overflow(assembler), if_notoverflow(assembler);
- assembler->Branch(overflow, &if_overflow, &if_notoverflow);
-
- assembler->Bind(&if_notoverflow);
- var_type_feedback.Bind(assembler->Word32Or(
- var_type_feedback.value(),
- assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall)));
- result_var.Bind(
- assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair)));
- assembler->Goto(&end);
-
- assembler->Bind(&if_overflow);
- {
- var_fdec_value.Bind(assembler->SmiToFloat64(value));
- assembler->Goto(&do_fdec);
- }
- }
-
- assembler->Bind(&if_isnotsmi);
- {
- // Check if the value is a HeapNumber.
- Label if_valueisnumber(assembler),
- if_valuenotnumber(assembler, Label::kDeferred);
- Node* value_map = assembler->LoadMap(value);
- assembler->Branch(assembler->IsHeapNumberMap(value_map),
- &if_valueisnumber, &if_valuenotnumber);
-
- assembler->Bind(&if_valueisnumber);
- {
- // Load the HeapNumber value.
- var_fdec_value.Bind(assembler->LoadHeapNumberValue(value));
- assembler->Goto(&do_fdec);
- }
-
- assembler->Bind(&if_valuenotnumber);
- {
- // We do not require an Or with earlier feedback here because once we
- // convert the value to a number, we cannot reach this path. We can
- // only reach this path on the first pass when the feedback is kNone.
- CSA_ASSERT(assembler,
- assembler->Word32Equal(var_type_feedback.value(),
- assembler->Int32Constant(
- BinaryOperationFeedback::kNone)));
-
- Label if_valueisoddball(assembler), if_valuenotoddball(assembler);
- Node* instance_type = assembler->LoadMapInstanceType(value_map);
- Node* is_oddball = assembler->Word32Equal(
- instance_type, assembler->Int32Constant(ODDBALL_TYPE));
- assembler->Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball);
-
- assembler->Bind(&if_valueisoddball);
- {
- // Convert Oddball to Number and check again.
- value_var.Bind(
- assembler->LoadObjectField(value, Oddball::kToNumberOffset));
- var_type_feedback.Bind(assembler->Int32Constant(
- BinaryOperationFeedback::kNumberOrOddball));
- assembler->Goto(&start);
- }
-
- assembler->Bind(&if_valuenotoddball);
- {
- // Convert to a Number first and try again.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kAny));
- value_var.Bind(assembler->CallStub(callable, context, value));
- assembler->Goto(&start);
- }
- }
- }
- }
-
- assembler->Bind(&do_fdec);
- {
- Node* fdec_value = var_fdec_value.value();
- Node* one = assembler->Float64Constant(1.0);
- Node* fdec_result = assembler->Float64Sub(fdec_value, one);
- var_type_feedback.Bind(assembler->Word32Or(
- var_type_feedback.value(),
- assembler->Int32Constant(BinaryOperationFeedback::kNumber)));
- result_var.Bind(assembler->AllocateHeapNumberWithValue(fdec_result));
- assembler->Goto(&end);
- }
-
- assembler->Bind(&end);
- assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
- slot_id);
- return result_var.value();
+ CodeStubAssembler assembler(state);
+ Node* argument = assembler.Parameter(Descriptor::kArgument);
+ Node* context = assembler.Parameter(Descriptor::kContext);
+ assembler.Return(assembler.NumberToString(context, argument));
}
// ES6 section 21.1.3.19 String.prototype.substring ( start, end )
@@ -1948,110 +1496,85 @@ compiler::Node* SubStringStub::Generate(CodeStubAssembler* assembler,
return assembler->SubString(context, string, from, to);
}
-void LoadApiGetterStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+void SubStringStub::GenerateAssembly(
+ compiler::CodeAssemblerState* state) const {
+ CodeStubAssembler assembler(state);
+ assembler.Return(Generate(&assembler,
+ assembler.Parameter(Descriptor::kString),
+ assembler.Parameter(Descriptor::kFrom),
+ assembler.Parameter(Descriptor::kTo),
+ assembler.Parameter(Descriptor::kContext)));
+}
+
+void LoadApiGetterStub::GenerateAssembly(
+ compiler::CodeAssemblerState* state) const {
typedef compiler::Node Node;
- Node* context = assembler->Parameter(Descriptor::kContext);
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ CodeStubAssembler assembler(state);
+ Node* context = assembler.Parameter(Descriptor::kContext);
+ Node* receiver = assembler.Parameter(Descriptor::kReceiver);
// For now we only support receiver_is_holder.
DCHECK(receiver_is_holder());
Node* holder = receiver;
- Node* map = assembler->LoadMap(receiver);
- Node* descriptors = assembler->LoadMapDescriptors(map);
- Node* value_index =
- assembler->IntPtrConstant(DescriptorArray::ToValueIndex(index()));
- Node* callback = assembler->LoadFixedArrayElement(
- descriptors, value_index, 0, CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->TailCallStub(CodeFactory::ApiGetter(isolate()), context, receiver,
- holder, callback);
+ Node* map = assembler.LoadMap(receiver);
+ Node* descriptors = assembler.LoadMapDescriptors(map);
+ Node* callback = assembler.LoadFixedArrayElement(
+ descriptors, DescriptorArray::ToValueIndex(index()));
+ assembler.TailCallStub(CodeFactory::ApiGetter(isolate()), context, receiver,
+ holder, callback);
}
-void StoreFieldStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+void StoreGlobalStub::GenerateAssembly(
+ compiler::CodeAssemblerState* state) const {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
- FieldIndex index = this->index();
- Representation representation = this->representation();
-
- assembler->Comment("StoreFieldStub: inobject=%d, offset=%d, rep=%s",
- index.is_inobject(), index.offset(),
- representation.Mnemonic());
-
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* value = assembler->Parameter(Descriptor::kValue);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
-
- Label miss(assembler);
-
- Node* prepared_value =
- assembler->PrepareValueForWrite(value, representation, &miss);
- assembler->StoreNamedField(receiver, index, representation, prepared_value,
- false);
- assembler->Return(value);
-
- // Only stores to tagged field can't bailout.
- if (!representation.IsTagged()) {
- assembler->Bind(&miss);
- {
- assembler->Comment("Miss");
- assembler->TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot,
- vector, receiver, name);
- }
- }
-}
-
-void StoreGlobalStub::GenerateAssembly(CodeStubAssembler* assembler) const {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
-
- assembler->Comment(
+ assembler.Comment(
"StoreGlobalStub: cell_type=%d, constant_type=%d, check_global=%d",
cell_type(), PropertyCellType::kConstantType == cell_type()
? static_cast<int>(constant_type())
: -1,
check_global());
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* value = assembler->Parameter(Descriptor::kValue);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+ Node* name = assembler.Parameter(Descriptor::kName);
+ Node* value = assembler.Parameter(Descriptor::kValue);
+ Node* slot = assembler.Parameter(Descriptor::kSlot);
+ Node* vector = assembler.Parameter(Descriptor::kVector);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- Label miss(assembler);
+ Label miss(&assembler);
if (check_global()) {
// Check that the map of the global has not changed: use a placeholder map
// that will be replaced later with the global object's map.
- Node* proxy_map = assembler->LoadMap(receiver);
- Node* global = assembler->LoadObjectField(proxy_map, Map::kPrototypeOffset);
- Node* map_cell = assembler->HeapConstant(isolate()->factory()->NewWeakCell(
+ Node* proxy_map = assembler.LoadMap(receiver);
+ Node* global = assembler.LoadObjectField(proxy_map, Map::kPrototypeOffset);
+ Node* map_cell = assembler.HeapConstant(isolate()->factory()->NewWeakCell(
StoreGlobalStub::global_map_placeholder(isolate())));
- Node* expected_map = assembler->LoadWeakCellValueUnchecked(map_cell);
- Node* map = assembler->LoadMap(global);
- assembler->GotoIf(assembler->WordNotEqual(expected_map, map), &miss);
+ Node* expected_map = assembler.LoadWeakCellValueUnchecked(map_cell);
+ Node* map = assembler.LoadMap(global);
+ assembler.GotoIf(assembler.WordNotEqual(expected_map, map), &miss);
}
- Node* weak_cell = assembler->HeapConstant(isolate()->factory()->NewWeakCell(
+ Node* weak_cell = assembler.HeapConstant(isolate()->factory()->NewWeakCell(
StoreGlobalStub::property_cell_placeholder(isolate())));
- Node* cell = assembler->LoadWeakCellValue(weak_cell);
- assembler->GotoIf(assembler->TaggedIsSmi(cell), &miss);
+ Node* cell = assembler.LoadWeakCellValue(weak_cell);
+ assembler.GotoIf(assembler.TaggedIsSmi(cell), &miss);
// Load the payload of the global parameter cell. A hole indicates that the
// cell has been invalidated and that the store must be handled by the
// runtime.
Node* cell_contents =
- assembler->LoadObjectField(cell, PropertyCell::kValueOffset);
+ assembler.LoadObjectField(cell, PropertyCell::kValueOffset);
PropertyCellType cell_type = this->cell_type();
if (cell_type == PropertyCellType::kConstant ||
cell_type == PropertyCellType::kUndefined) {
// This is always valid for all states a cell can be in.
- assembler->GotoIf(assembler->WordNotEqual(cell_contents, value), &miss);
+ assembler.GotoIf(assembler.WordNotEqual(cell_contents, value), &miss);
} else {
- assembler->GotoIf(assembler->IsTheHole(cell_contents), &miss);
+ assembler.GotoIf(assembler.IsTheHole(cell_contents), &miss);
// When dealing with constant types, the type may be allowed to change, as
// long as optimized code remains valid.
@@ -2059,7 +1582,7 @@ void StoreGlobalStub::GenerateAssembly(CodeStubAssembler* assembler) const {
if (cell_type == PropertyCellType::kConstantType) {
switch (constant_type()) {
case PropertyCellConstantType::kSmi:
- assembler->GotoUnless(assembler->TaggedIsSmi(value), &miss);
+ assembler.GotoUnless(assembler.TaggedIsSmi(value), &miss);
value_is_smi = true;
break;
case PropertyCellConstantType::kStableMap: {
@@ -2068,273 +1591,156 @@ void StoreGlobalStub::GenerateAssembly(CodeStubAssembler* assembler) const {
// are the maps that were originally in the cell or not. If optimized
// code will deopt when a cell has a unstable map and if it has a
// dependency on a stable map, it will deopt if the map destabilizes.
- assembler->GotoIf(assembler->TaggedIsSmi(value), &miss);
- assembler->GotoIf(assembler->TaggedIsSmi(cell_contents), &miss);
- Node* expected_map = assembler->LoadMap(cell_contents);
- Node* map = assembler->LoadMap(value);
- assembler->GotoIf(assembler->WordNotEqual(expected_map, map), &miss);
+ assembler.GotoIf(assembler.TaggedIsSmi(value), &miss);
+ assembler.GotoIf(assembler.TaggedIsSmi(cell_contents), &miss);
+ Node* expected_map = assembler.LoadMap(cell_contents);
+ Node* map = assembler.LoadMap(value);
+ assembler.GotoIf(assembler.WordNotEqual(expected_map, map), &miss);
break;
}
}
}
if (value_is_smi) {
- assembler->StoreObjectFieldNoWriteBarrier(
- cell, PropertyCell::kValueOffset, value);
+ assembler.StoreObjectFieldNoWriteBarrier(cell, PropertyCell::kValueOffset,
+ value);
} else {
- assembler->StoreObjectField(cell, PropertyCell::kValueOffset, value);
+ assembler.StoreObjectField(cell, PropertyCell::kValueOffset, value);
}
}
- assembler->Return(value);
+ assembler.Return(value);
- assembler->Bind(&miss);
+ assembler.Bind(&miss);
{
- assembler->Comment("Miss");
- assembler->TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot,
- vector, receiver, name);
+ assembler.Comment("Miss");
+ assembler.TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot,
+ vector, receiver, name);
}
}
+void LoadFieldStub::GenerateAssembly(
+ compiler::CodeAssemblerState* state) const {
+ AccessorAssembler::GenerateLoadField(state);
+}
+
void KeyedLoadSloppyArgumentsStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
+ compiler::CodeAssemblerState* state) const {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* key = assembler->Parameter(Descriptor::kName);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+ Node* key = assembler.Parameter(Descriptor::kName);
+ Node* slot = assembler.Parameter(Descriptor::kSlot);
+ Node* vector = assembler.Parameter(Descriptor::kVector);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- Label miss(assembler);
+ Label miss(&assembler);
- Node* result = assembler->LoadKeyedSloppyArguments(receiver, key, &miss);
- assembler->Return(result);
+ Node* result = assembler.LoadKeyedSloppyArguments(receiver, key, &miss);
+ assembler.Return(result);
- assembler->Bind(&miss);
+ assembler.Bind(&miss);
{
- assembler->Comment("Miss");
- assembler->TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver,
- key, slot, vector);
+ assembler.Comment("Miss");
+ assembler.TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver,
+ key, slot, vector);
}
}
void KeyedStoreSloppyArgumentsStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
+ compiler::CodeAssemblerState* state) const {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* key = assembler->Parameter(Descriptor::kName);
- Node* value = assembler->Parameter(Descriptor::kValue);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+ Node* key = assembler.Parameter(Descriptor::kName);
+ Node* value = assembler.Parameter(Descriptor::kValue);
+ Node* slot = assembler.Parameter(Descriptor::kSlot);
+ Node* vector = assembler.Parameter(Descriptor::kVector);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- Label miss(assembler);
+ Label miss(&assembler);
- assembler->StoreKeyedSloppyArguments(receiver, key, value, &miss);
- assembler->Return(value);
+ assembler.StoreKeyedSloppyArguments(receiver, key, value, &miss);
+ assembler.Return(value);
- assembler->Bind(&miss);
+ assembler.Bind(&miss);
{
- assembler->Comment("Miss");
- assembler->TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, value,
- slot, vector, receiver, key);
+ assembler.Comment("Miss");
+ assembler.TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, value, slot,
+ vector, receiver, key);
}
}
void LoadScriptContextFieldStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
+ compiler::CodeAssemblerState* state) const {
typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
- assembler->Comment("LoadScriptContextFieldStub: context_index=%d, slot=%d",
- context_index(), slot_index());
+ assembler.Comment("LoadScriptContextFieldStub: context_index=%d, slot=%d",
+ context_index(), slot_index());
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- Node* script_context = assembler->LoadScriptContext(context, context_index());
- Node* result = assembler->LoadFixedArrayElement(
- script_context, assembler->IntPtrConstant(slot_index()), 0,
- CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->Return(result);
+ Node* script_context = assembler.LoadScriptContext(context, context_index());
+ Node* result = assembler.LoadFixedArrayElement(script_context, slot_index());
+ assembler.Return(result);
}
void StoreScriptContextFieldStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
+ compiler::CodeAssemblerState* state) const {
typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
- assembler->Comment("StoreScriptContextFieldStub: context_index=%d, slot=%d",
- context_index(), slot_index());
+ assembler.Comment("StoreScriptContextFieldStub: context_index=%d, slot=%d",
+ context_index(), slot_index());
- Node* value = assembler->Parameter(Descriptor::kValue);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* value = assembler.Parameter(Descriptor::kValue);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- Node* script_context = assembler->LoadScriptContext(context, context_index());
- assembler->StoreFixedArrayElement(
- script_context, assembler->IntPtrConstant(slot_index()), value,
- UPDATE_WRITE_BARRIER, CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->Return(value);
+ Node* script_context = assembler.LoadScriptContext(context, context_index());
+ assembler.StoreFixedArrayElement(
+ script_context, assembler.IntPtrConstant(slot_index()), value);
+ assembler.Return(value);
}
void StoreInterceptorStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
+ compiler::CodeAssemblerState* state) const {
typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* value = assembler->Parameter(Descriptor::kValue);
- Node* context = assembler->Parameter(Descriptor::kContext);
- assembler->TailCallRuntime(Runtime::kStorePropertyWithInterceptor, context,
- receiver, name, value);
+ Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+ Node* name = assembler.Parameter(Descriptor::kName);
+ Node* value = assembler.Parameter(Descriptor::kValue);
+ Node* context = assembler.Parameter(Descriptor::kContext);
+ assembler.TailCallRuntime(Runtime::kStorePropertyWithInterceptor, context,
+ receiver, name, value);
}
void LoadIndexedInterceptorStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
+ compiler::CodeAssemblerState* state) const {
typedef compiler::Node Node;
typedef CodeStubAssembler::Label Label;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* key = assembler->Parameter(Descriptor::kName);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
-
- Label if_keyispositivesmi(assembler), if_keyisinvalid(assembler);
- assembler->Branch(assembler->WordIsPositiveSmi(key), &if_keyispositivesmi,
- &if_keyisinvalid);
- assembler->Bind(&if_keyispositivesmi);
- assembler->TailCallRuntime(Runtime::kLoadElementWithInterceptor, context,
- receiver, key);
-
- assembler->Bind(&if_keyisinvalid);
- assembler->TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver, key,
- slot, vector);
-}
-
-// static
-bool FastCloneShallowObjectStub::IsSupported(ObjectLiteral* expr) {
- // FastCloneShallowObjectStub doesn't copy elements, and object literals don't
- // support copy-on-write (COW) elements for now.
- // TODO(mvstanton): make object literals support COW elements.
- return expr->fast_elements() && expr->has_shallow_properties() &&
- expr->properties_count() <= kMaximumClonedProperties;
-}
-
-// static
-int FastCloneShallowObjectStub::PropertiesCount(int literal_length) {
- // This heuristic of setting empty literals to have
- // kInitialGlobalObjectUnusedPropertiesCount must remain in-sync with the
- // runtime.
- // TODO(verwaest): Unify this with the heuristic in the runtime.
- return literal_length == 0
- ? JSObject::kInitialGlobalObjectUnusedPropertiesCount
- : literal_length;
-}
-
-// static
-compiler::Node* FastCloneShallowObjectStub::GenerateFastPath(
- CodeStubAssembler* assembler, compiler::CodeAssembler::Label* call_runtime,
- compiler::Node* closure, compiler::Node* literals_index,
- compiler::Node* properties_count) {
- typedef compiler::Node Node;
- typedef compiler::CodeAssembler::Label Label;
- typedef compiler::CodeAssembler::Variable Variable;
-
- Node* literals_array =
- assembler->LoadObjectField(closure, JSFunction::kLiteralsOffset);
- Node* allocation_site = assembler->LoadFixedArrayElement(
- literals_array, literals_index,
- LiteralsArray::kFirstLiteralIndex * kPointerSize,
- CodeStubAssembler::SMI_PARAMETERS);
- assembler->GotoIf(assembler->IsUndefined(allocation_site), call_runtime);
-
- // Calculate the object and allocation size based on the properties count.
- Node* object_size = assembler->IntPtrAdd(
- assembler->WordShl(properties_count, kPointerSizeLog2),
- assembler->IntPtrConstant(JSObject::kHeaderSize));
- Node* allocation_size = object_size;
- if (FLAG_allocation_site_pretenuring) {
- allocation_size = assembler->IntPtrAdd(
- object_size, assembler->IntPtrConstant(AllocationMemento::kSize));
- }
- Node* boilerplate = assembler->LoadObjectField(
- allocation_site, AllocationSite::kTransitionInfoOffset);
- Node* boilerplate_map = assembler->LoadMap(boilerplate);
- Node* instance_size = assembler->LoadMapInstanceSize(boilerplate_map);
- Node* size_in_words = assembler->WordShr(object_size, kPointerSizeLog2);
- assembler->GotoUnless(assembler->Word32Equal(instance_size, size_in_words),
- call_runtime);
-
- Node* copy = assembler->Allocate(allocation_size);
-
- // Copy boilerplate elements.
- Variable offset(assembler, MachineType::PointerRepresentation());
- offset.Bind(assembler->IntPtrConstant(-kHeapObjectTag));
- Node* end_offset = assembler->IntPtrAdd(object_size, offset.value());
- Label loop_body(assembler, &offset), loop_check(assembler, &offset);
- // We should always have an object size greater than zero.
- assembler->Goto(&loop_body);
- assembler->Bind(&loop_body);
- {
- // The Allocate above guarantees that the copy lies in new space. This
- // allows us to skip write barriers. This is necessary since we may also be
- // copying unboxed doubles.
- Node* field =
- assembler->Load(MachineType::IntPtr(), boilerplate, offset.value());
- assembler->StoreNoWriteBarrier(MachineType::PointerRepresentation(), copy,
- offset.value(), field);
- assembler->Goto(&loop_check);
- }
- assembler->Bind(&loop_check);
- {
- offset.Bind(assembler->IntPtrAdd(offset.value(),
- assembler->IntPtrConstant(kPointerSize)));
- assembler->GotoUnless(
- assembler->IntPtrGreaterThanOrEqual(offset.value(), end_offset),
- &loop_body);
- }
-
- if (FLAG_allocation_site_pretenuring) {
- Node* memento = assembler->InnerAllocate(copy, object_size);
- assembler->StoreObjectFieldNoWriteBarrier(
- memento, HeapObject::kMapOffset,
- assembler->LoadRoot(Heap::kAllocationMementoMapRootIndex));
- assembler->StoreObjectFieldNoWriteBarrier(
- memento, AllocationMemento::kAllocationSiteOffset, allocation_site);
- Node* memento_create_count = assembler->LoadObjectField(
- allocation_site, AllocationSite::kPretenureCreateCountOffset);
- memento_create_count = assembler->SmiAdd(
- memento_create_count, assembler->SmiConstant(Smi::FromInt(1)));
- assembler->StoreObjectFieldNoWriteBarrier(
- allocation_site, AllocationSite::kPretenureCreateCountOffset,
- memento_create_count);
- }
-
- // TODO(verwaest): Allocate and fill in double boxes.
- return copy;
-}
-
-void FastCloneShallowObjectStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- Label call_runtime(assembler);
- Node* closure = assembler->Parameter(0);
- Node* literals_index = assembler->Parameter(1);
+ Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+ Node* key = assembler.Parameter(Descriptor::kName);
+ Node* slot = assembler.Parameter(Descriptor::kSlot);
+ Node* vector = assembler.Parameter(Descriptor::kVector);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- Node* properties_count =
- assembler->IntPtrConstant(PropertiesCount(this->length()));
- Node* copy = GenerateFastPath(assembler, &call_runtime, closure,
- literals_index, properties_count);
- assembler->Return(copy);
+ Label if_keyispositivesmi(&assembler), if_keyisinvalid(&assembler);
+ assembler.Branch(assembler.TaggedIsPositiveSmi(key), &if_keyispositivesmi,
+ &if_keyisinvalid);
+ assembler.Bind(&if_keyispositivesmi);
+ assembler.TailCallRuntime(Runtime::kLoadElementWithInterceptor, context,
+ receiver, key);
- assembler->Bind(&call_runtime);
- Node* constant_properties = assembler->Parameter(2);
- Node* flags = assembler->Parameter(3);
- Node* context = assembler->Parameter(4);
- assembler->TailCallRuntime(Runtime::kCreateObjectLiteral, context, closure,
- literals_index, constant_properties, flags);
+ assembler.Bind(&if_keyisinvalid);
+ assembler.TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver, key,
+ slot, vector);
}
template<class StateType>
@@ -2342,7 +1748,19 @@ void HydrogenCodeStub::TraceTransition(StateType from, StateType to) {
// Note: Although a no-op transition is semantically OK, it is hinting at a
// bug somewhere in our state transition machinery.
DCHECK(from != to);
- if (!FLAG_trace_ic) return;
+ if (V8_LIKELY(!FLAG_ic_stats)) return;
+ if (FLAG_ic_stats &
+ v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING) {
+ auto ic_stats = ICStats::instance();
+ ic_stats->Begin();
+ ICInfo& ic_info = ic_stats->Current();
+ ic_info.type = MajorName(MajorKey());
+ ic_info.state = ToString(from);
+ ic_info.state += "=>";
+ ic_info.state += ToString(to);
+ ic_stats->End();
+ return;
+ }
OFStream os(stdout);
os << "[";
PrintBaseName(os);
@@ -2362,12 +1780,6 @@ void JSEntryStub::FinishCode(Handle<Code> code) {
}
-void LoadDictionaryElementStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- descriptor->Initialize(
- FUNCTION_ADDR(Runtime_KeyedLoadIC_MissFromStubFailure));
-}
-
void HandlerStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
DCHECK(kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC);
if (kind() == Code::KEYED_LOAD_IC) {
@@ -2427,528 +1839,67 @@ void BinaryOpWithAllocationSiteStub::InitializeDescriptor(
FUNCTION_ADDR(Runtime_BinaryOpIC_MissWithAllocationSite));
}
-void GetPropertyStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+void GetPropertyStub::GenerateAssembly(
+ compiler::CodeAssemblerState* state) const {
typedef compiler::Node Node;
typedef CodeStubAssembler::Label Label;
typedef CodeStubAssembler::Variable Variable;
+ CodeStubAssembler assembler(state);
- Label call_runtime(assembler, Label::kDeferred), return_undefined(assembler),
- end(assembler);
+ Label call_runtime(&assembler, Label::kDeferred),
+ return_undefined(&assembler), end(&assembler);
- Node* object = assembler->Parameter(0);
- Node* key = assembler->Parameter(1);
- Node* context = assembler->Parameter(2);
- Variable var_result(assembler, MachineRepresentation::kTagged);
+ Node* object = assembler.Parameter(0);
+ Node* key = assembler.Parameter(1);
+ Node* context = assembler.Parameter(2);
+ Variable var_result(&assembler, MachineRepresentation::kTagged);
CodeStubAssembler::LookupInHolder lookup_property_in_holder =
- [assembler, context, &var_result, &end](
+ [&assembler, context, &var_result, &end](
Node* receiver, Node* holder, Node* holder_map,
Node* holder_instance_type, Node* unique_name, Label* next_holder,
Label* if_bailout) {
- Variable var_value(assembler, MachineRepresentation::kTagged);
- Label if_found(assembler);
- assembler->TryGetOwnProperty(
+ Variable var_value(&assembler, MachineRepresentation::kTagged);
+ Label if_found(&assembler);
+ assembler.TryGetOwnProperty(
context, receiver, holder, holder_map, holder_instance_type,
unique_name, &if_found, &var_value, next_holder, if_bailout);
- assembler->Bind(&if_found);
+ assembler.Bind(&if_found);
{
var_result.Bind(var_value.value());
- assembler->Goto(&end);
+ assembler.Goto(&end);
}
};
CodeStubAssembler::LookupInHolder lookup_element_in_holder =
- [assembler, context, &var_result, &end](
+ [&assembler, context, &var_result, &end](
Node* receiver, Node* holder, Node* holder_map,
Node* holder_instance_type, Node* index, Label* next_holder,
Label* if_bailout) {
// Not supported yet.
- assembler->Use(next_holder);
- assembler->Goto(if_bailout);
+ assembler.Use(next_holder);
+ assembler.Goto(if_bailout);
};
- assembler->TryPrototypeChainLookup(object, key, lookup_property_in_holder,
- lookup_element_in_holder,
- &return_undefined, &call_runtime);
+ assembler.TryPrototypeChainLookup(object, key, lookup_property_in_holder,
+ lookup_element_in_holder, &return_undefined,
+ &call_runtime);
- assembler->Bind(&return_undefined);
+ assembler.Bind(&return_undefined);
{
- var_result.Bind(assembler->UndefinedConstant());
- assembler->Goto(&end);
+ var_result.Bind(assembler.UndefinedConstant());
+ assembler.Goto(&end);
}
- assembler->Bind(&call_runtime);
+ assembler.Bind(&call_runtime);
{
var_result.Bind(
- assembler->CallRuntime(Runtime::kGetProperty, context, object, key));
- assembler->Goto(&end);
- }
-
- assembler->Bind(&end);
- assembler->Return(var_result.value());
-}
-
-// static
-compiler::Node* FastNewClosureStub::Generate(CodeStubAssembler* assembler,
- compiler::Node* shared_info,
- compiler::Node* context) {
- typedef compiler::Node Node;
- typedef compiler::CodeAssembler::Label Label;
- typedef compiler::CodeAssembler::Variable Variable;
-
- Isolate* isolate = assembler->isolate();
- Factory* factory = assembler->isolate()->factory();
- assembler->IncrementCounter(isolate->counters()->fast_new_closure_total(), 1);
-
- // Create a new closure from the given function info in new space
- Node* result = assembler->Allocate(JSFunction::kSize);
-
- // Calculate the index of the map we should install on the function based on
- // the FunctionKind and LanguageMode of the function.
- // Note: Must be kept in sync with Context::FunctionMapIndex
- Node* compiler_hints = assembler->LoadObjectField(
- shared_info, SharedFunctionInfo::kCompilerHintsOffset,
- MachineType::Uint32());
- Node* is_strict = assembler->Word32And(
- compiler_hints,
- assembler->Int32Constant(1 << SharedFunctionInfo::kStrictModeBit));
-
- Label if_normal(assembler), if_generator(assembler), if_async(assembler),
- if_class_constructor(assembler), if_function_without_prototype(assembler),
- load_map(assembler);
- Variable map_index(assembler, MachineType::PointerRepresentation());
-
- STATIC_ASSERT(FunctionKind::kNormalFunction == 0);
- Node* is_not_normal = assembler->Word32And(
- compiler_hints,
- assembler->Int32Constant(SharedFunctionInfo::kAllFunctionKindBitsMask));
- assembler->GotoUnless(is_not_normal, &if_normal);
-
- Node* is_generator = assembler->Word32And(
- compiler_hints,
- assembler->Int32Constant(FunctionKind::kGeneratorFunction
- << SharedFunctionInfo::kFunctionKindShift));
- assembler->GotoIf(is_generator, &if_generator);
-
- Node* is_async = assembler->Word32And(
- compiler_hints,
- assembler->Int32Constant(FunctionKind::kAsyncFunction
- << SharedFunctionInfo::kFunctionKindShift));
- assembler->GotoIf(is_async, &if_async);
-
- Node* is_class_constructor = assembler->Word32And(
- compiler_hints,
- assembler->Int32Constant(FunctionKind::kClassConstructor
- << SharedFunctionInfo::kFunctionKindShift));
- assembler->GotoIf(is_class_constructor, &if_class_constructor);
-
- if (FLAG_debug_code) {
- // Function must be a function without a prototype.
- CSA_ASSERT(assembler, assembler->Word32And(
- compiler_hints,
- assembler->Int32Constant(
- (FunctionKind::kAccessorFunction |
- FunctionKind::kArrowFunction |
- FunctionKind::kConciseMethod)
- << SharedFunctionInfo::kFunctionKindShift)));
- }
- assembler->Goto(&if_function_without_prototype);
-
- assembler->Bind(&if_normal);
- {
- map_index.Bind(assembler->Select(
- is_strict,
- assembler->IntPtrConstant(Context::STRICT_FUNCTION_MAP_INDEX),
- assembler->IntPtrConstant(Context::SLOPPY_FUNCTION_MAP_INDEX)));
- assembler->Goto(&load_map);
- }
-
- assembler->Bind(&if_generator);
- {
- map_index.Bind(assembler->Select(
- is_strict,
- assembler->IntPtrConstant(Context::STRICT_GENERATOR_FUNCTION_MAP_INDEX),
- assembler->IntPtrConstant(
- Context::SLOPPY_GENERATOR_FUNCTION_MAP_INDEX)));
- assembler->Goto(&load_map);
- }
-
- assembler->Bind(&if_async);
- {
- map_index.Bind(assembler->Select(
- is_strict,
- assembler->IntPtrConstant(Context::STRICT_ASYNC_FUNCTION_MAP_INDEX),
- assembler->IntPtrConstant(Context::SLOPPY_ASYNC_FUNCTION_MAP_INDEX)));
- assembler->Goto(&load_map);
- }
-
- assembler->Bind(&if_class_constructor);
- {
- map_index.Bind(
- assembler->IntPtrConstant(Context::STRICT_FUNCTION_MAP_INDEX));
- assembler->Goto(&load_map);
- }
-
- assembler->Bind(&if_function_without_prototype);
- {
- map_index.Bind(assembler->IntPtrConstant(
- Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX));
- assembler->Goto(&load_map);
- }
-
- assembler->Bind(&load_map);
-
- // Get the function map in the current native context and set that
- // as the map of the allocated object.
- Node* native_context = assembler->LoadNativeContext(context);
- Node* map_slot_value =
- assembler->LoadFixedArrayElement(native_context, map_index.value(), 0,
- CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->StoreMapNoWriteBarrier(result, map_slot_value);
-
- // Initialize the rest of the function.
- Node* empty_fixed_array =
- assembler->HeapConstant(factory->empty_fixed_array());
- Node* empty_literals_array =
- assembler->HeapConstant(factory->empty_literals_array());
- assembler->StoreObjectFieldNoWriteBarrier(result, JSObject::kPropertiesOffset,
- empty_fixed_array);
- assembler->StoreObjectFieldNoWriteBarrier(result, JSObject::kElementsOffset,
- empty_fixed_array);
- assembler->StoreObjectFieldNoWriteBarrier(result, JSFunction::kLiteralsOffset,
- empty_literals_array);
- assembler->StoreObjectFieldNoWriteBarrier(
- result, JSFunction::kPrototypeOrInitialMapOffset,
- assembler->TheHoleConstant());
- assembler->StoreObjectFieldNoWriteBarrier(
- result, JSFunction::kSharedFunctionInfoOffset, shared_info);
- assembler->StoreObjectFieldNoWriteBarrier(result, JSFunction::kContextOffset,
- context);
- Handle<Code> lazy_builtin_handle(
- assembler->isolate()->builtins()->builtin(Builtins::kCompileLazy));
- Node* lazy_builtin = assembler->HeapConstant(lazy_builtin_handle);
- Node* lazy_builtin_entry = assembler->IntPtrAdd(
- lazy_builtin,
- assembler->IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
- assembler->StoreObjectFieldNoWriteBarrier(
- result, JSFunction::kCodeEntryOffset, lazy_builtin_entry);
- assembler->StoreObjectFieldNoWriteBarrier(result,
- JSFunction::kNextFunctionLinkOffset,
- assembler->UndefinedConstant());
-
- return result;
-}
-
-void FastNewClosureStub::GenerateAssembly(CodeStubAssembler* assembler) const {
- assembler->Return(
- Generate(assembler, assembler->Parameter(0), assembler->Parameter(1)));
-}
-
-// static
-compiler::Node* FastNewFunctionContextStub::Generate(
- CodeStubAssembler* assembler, compiler::Node* function,
- compiler::Node* slots, compiler::Node* context) {
- typedef compiler::Node Node;
-
- Node* min_context_slots =
- assembler->Int32Constant(Context::MIN_CONTEXT_SLOTS);
- Node* length = assembler->Int32Add(slots, min_context_slots);
- Node* size = assembler->Int32Add(
- assembler->Word32Shl(length, assembler->Int32Constant(kPointerSizeLog2)),
- assembler->Int32Constant(FixedArray::kHeaderSize));
-
- // Create a new closure from the given function info in new space
- Node* function_context = assembler->Allocate(size);
-
- Isolate* isolate = assembler->isolate();
- assembler->StoreMapNoWriteBarrier(
- function_context,
- assembler->HeapConstant(isolate->factory()->function_context_map()));
- assembler->StoreObjectFieldNoWriteBarrier(function_context,
- Context::kLengthOffset,
- assembler->SmiFromWord32(length));
-
- // Set up the fixed slots.
- assembler->StoreFixedArrayElement(
- function_context, assembler->Int32Constant(Context::CLOSURE_INDEX),
- function, SKIP_WRITE_BARRIER);
- assembler->StoreFixedArrayElement(
- function_context, assembler->Int32Constant(Context::PREVIOUS_INDEX),
- context, SKIP_WRITE_BARRIER);
- assembler->StoreFixedArrayElement(
- function_context, assembler->Int32Constant(Context::EXTENSION_INDEX),
- assembler->TheHoleConstant(), SKIP_WRITE_BARRIER);
-
- // Copy the native context from the previous context.
- Node* native_context = assembler->LoadNativeContext(context);
- assembler->StoreFixedArrayElement(
- function_context, assembler->Int32Constant(Context::NATIVE_CONTEXT_INDEX),
- native_context, SKIP_WRITE_BARRIER);
-
- // Initialize the rest of the slots to undefined.
- Node* undefined = assembler->UndefinedConstant();
- assembler->BuildFastFixedArrayForEach(
- function_context, FAST_ELEMENTS, min_context_slots, length,
- [undefined](CodeStubAssembler* assembler, Node* context, Node* offset) {
- assembler->StoreNoWriteBarrier(MachineType::PointerRepresentation(),
- context, offset, undefined);
- });
-
- return function_context;
-}
-
-void FastNewFunctionContextStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
- typedef compiler::Node Node;
- Node* function = assembler->Parameter(Descriptor::kFunction);
- Node* slots = assembler->Parameter(FastNewFunctionContextDescriptor::kSlots);
- Node* context = assembler->Parameter(Descriptor::kContext);
-
- assembler->Return(Generate(assembler, function, slots, context));
-}
-
-// static
-compiler::Node* FastCloneRegExpStub::Generate(CodeStubAssembler* assembler,
- compiler::Node* closure,
- compiler::Node* literal_index,
- compiler::Node* pattern,
- compiler::Node* flags,
- compiler::Node* context) {
- typedef CodeStubAssembler::Label Label;
- typedef CodeStubAssembler::Variable Variable;
- typedef compiler::Node Node;
-
- Label call_runtime(assembler, Label::kDeferred), end(assembler);
-
- Variable result(assembler, MachineRepresentation::kTagged);
-
- Node* literals_array =
- assembler->LoadObjectField(closure, JSFunction::kLiteralsOffset);
- Node* boilerplate = assembler->LoadFixedArrayElement(
- literals_array, literal_index,
- LiteralsArray::kFirstLiteralIndex * kPointerSize,
- CodeStubAssembler::SMI_PARAMETERS);
- assembler->GotoIf(assembler->IsUndefined(boilerplate), &call_runtime);
-
- {
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Node* copy = assembler->Allocate(size);
- for (int offset = 0; offset < size; offset += kPointerSize) {
- Node* value = assembler->LoadObjectField(boilerplate, offset);
- assembler->StoreObjectFieldNoWriteBarrier(copy, offset, value);
- }
- result.Bind(copy);
- assembler->Goto(&end);
- }
-
- assembler->Bind(&call_runtime);
- {
- result.Bind(assembler->CallRuntime(Runtime::kCreateRegExpLiteral, context,
- closure, literal_index, pattern, flags));
- assembler->Goto(&end);
- }
-
- assembler->Bind(&end);
- return result.value();
-}
-
-void FastCloneRegExpStub::GenerateAssembly(CodeStubAssembler* assembler) const {
- typedef compiler::Node Node;
- Node* closure = assembler->Parameter(Descriptor::kClosure);
- Node* literal_index = assembler->Parameter(Descriptor::kLiteralIndex);
- Node* pattern = assembler->Parameter(Descriptor::kPattern);
- Node* flags = assembler->Parameter(Descriptor::kFlags);
- Node* context = assembler->Parameter(Descriptor::kContext);
-
- assembler->Return(
- Generate(assembler, closure, literal_index, pattern, flags, context));
-}
-
-namespace {
-
-compiler::Node* NonEmptyShallowClone(CodeStubAssembler* assembler,
- compiler::Node* boilerplate,
- compiler::Node* boilerplate_map,
- compiler::Node* boilerplate_elements,
- compiler::Node* allocation_site,
- compiler::Node* capacity,
- ElementsKind kind) {
- typedef compiler::Node Node;
- typedef CodeStubAssembler::ParameterMode ParameterMode;
-
- ParameterMode param_mode = CodeStubAssembler::SMI_PARAMETERS;
-
- Node* length = assembler->LoadJSArrayLength(boilerplate);
-
- if (assembler->Is64()) {
- capacity = assembler->SmiUntag(capacity);
- param_mode = CodeStubAssembler::INTEGER_PARAMETERS;
- }
-
- Node *array, *elements;
- std::tie(array, elements) =
- assembler->AllocateUninitializedJSArrayWithElements(
- kind, boilerplate_map, length, allocation_site, capacity, param_mode);
-
- assembler->Comment("copy elements header");
- for (int offset = 0; offset < FixedArrayBase::kHeaderSize;
- offset += kPointerSize) {
- Node* value = assembler->LoadObjectField(boilerplate_elements, offset);
- assembler->StoreObjectField(elements, offset, value);
- }
-
- if (assembler->Is64()) {
- length = assembler->SmiUntag(length);
- }
-
- assembler->Comment("copy boilerplate elements");
- assembler->CopyFixedArrayElements(kind, boilerplate_elements, elements,
- length, SKIP_WRITE_BARRIER, param_mode);
- assembler->IncrementCounter(
- assembler->isolate()->counters()->inlined_copied_elements(), 1);
-
- return array;
-}
-
-} // namespace
-
-// static
-compiler::Node* FastCloneShallowArrayStub::Generate(
- CodeStubAssembler* assembler, compiler::Node* closure,
- compiler::Node* literal_index, compiler::Node* context,
- CodeStubAssembler::Label* call_runtime,
- AllocationSiteMode allocation_site_mode) {
- typedef CodeStubAssembler::Label Label;
- typedef CodeStubAssembler::Variable Variable;
- typedef compiler::Node Node;
-
- Label zero_capacity(assembler), cow_elements(assembler),
- fast_elements(assembler), return_result(assembler);
- Variable result(assembler, MachineRepresentation::kTagged);
-
- Node* literals_array =
- assembler->LoadObjectField(closure, JSFunction::kLiteralsOffset);
- Node* allocation_site = assembler->LoadFixedArrayElement(
- literals_array, literal_index,
- LiteralsArray::kFirstLiteralIndex * kPointerSize,
- CodeStubAssembler::SMI_PARAMETERS);
-
- assembler->GotoIf(assembler->IsUndefined(allocation_site), call_runtime);
- allocation_site = assembler->LoadFixedArrayElement(
- literals_array, literal_index,
- LiteralsArray::kFirstLiteralIndex * kPointerSize,
- CodeStubAssembler::SMI_PARAMETERS);
-
- Node* boilerplate = assembler->LoadObjectField(
- allocation_site, AllocationSite::kTransitionInfoOffset);
- Node* boilerplate_map = assembler->LoadMap(boilerplate);
- Node* boilerplate_elements = assembler->LoadElements(boilerplate);
- Node* capacity = assembler->LoadFixedArrayBaseLength(boilerplate_elements);
- allocation_site =
- allocation_site_mode == TRACK_ALLOCATION_SITE ? allocation_site : nullptr;
-
- Node* zero = assembler->SmiConstant(Smi::kZero);
- assembler->GotoIf(assembler->SmiEqual(capacity, zero), &zero_capacity);
-
- Node* elements_map = assembler->LoadMap(boilerplate_elements);
- assembler->GotoIf(assembler->IsFixedCOWArrayMap(elements_map), &cow_elements);
-
- assembler->GotoIf(assembler->IsFixedArrayMap(elements_map), &fast_elements);
- {
- assembler->Comment("fast double elements path");
- if (FLAG_debug_code) {
- Label correct_elements_map(assembler), abort(assembler, Label::kDeferred);
- assembler->Branch(assembler->IsFixedDoubleArrayMap(elements_map),
- &correct_elements_map, &abort);
-
- assembler->Bind(&abort);
- {
- Node* abort_id = assembler->SmiConstant(
- Smi::FromInt(BailoutReason::kExpectedFixedDoubleArrayMap));
- assembler->CallRuntime(Runtime::kAbort, context, abort_id);
- result.Bind(assembler->UndefinedConstant());
- assembler->Goto(&return_result);
- }
- assembler->Bind(&correct_elements_map);
- }
-
- Node* array = NonEmptyShallowClone(assembler, boilerplate, boilerplate_map,
- boilerplate_elements, allocation_site,
- capacity, FAST_DOUBLE_ELEMENTS);
- result.Bind(array);
- assembler->Goto(&return_result);
+ assembler.CallRuntime(Runtime::kGetProperty, context, object, key));
+ assembler.Goto(&end);
}
- assembler->Bind(&fast_elements);
- {
- assembler->Comment("fast elements path");
- Node* array = NonEmptyShallowClone(assembler, boilerplate, boilerplate_map,
- boilerplate_elements, allocation_site,
- capacity, FAST_ELEMENTS);
- result.Bind(array);
- assembler->Goto(&return_result);
- }
-
- Variable length(assembler, MachineRepresentation::kTagged),
- elements(assembler, MachineRepresentation::kTagged);
- Label allocate_without_elements(assembler);
-
- assembler->Bind(&cow_elements);
- {
- assembler->Comment("fixed cow path");
- length.Bind(assembler->LoadJSArrayLength(boilerplate));
- elements.Bind(boilerplate_elements);
-
- assembler->Goto(&allocate_without_elements);
- }
-
- assembler->Bind(&zero_capacity);
- {
- assembler->Comment("zero capacity path");
- length.Bind(zero);
- elements.Bind(assembler->LoadRoot(Heap::kEmptyFixedArrayRootIndex));
-
- assembler->Goto(&allocate_without_elements);
- }
-
- assembler->Bind(&allocate_without_elements);
- {
- Node* array = assembler->AllocateUninitializedJSArrayWithoutElements(
- FAST_ELEMENTS, boilerplate_map, length.value(), allocation_site);
- assembler->StoreObjectField(array, JSObject::kElementsOffset,
- elements.value());
- result.Bind(array);
- assembler->Goto(&return_result);
- }
-
- assembler->Bind(&return_result);
- return result.value();
-}
-
-void FastCloneShallowArrayStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Label Label;
- Node* closure = assembler->Parameter(Descriptor::kClosure);
- Node* literal_index = assembler->Parameter(Descriptor::kLiteralIndex);
- Node* constant_elements = assembler->Parameter(Descriptor::kConstantElements);
- Node* context = assembler->Parameter(Descriptor::kContext);
- Label call_runtime(assembler, Label::kDeferred);
- assembler->Return(Generate(assembler, closure, literal_index, context,
- &call_runtime, allocation_site_mode()));
-
- assembler->Bind(&call_runtime);
- {
- assembler->Comment("call runtime");
- Node* flags = assembler->SmiConstant(
- Smi::FromInt(ArrayLiteral::kShallowElements |
- (allocation_site_mode() == TRACK_ALLOCATION_SITE
- ? 0
- : ArrayLiteral::kDisableMementos)));
- assembler->Return(assembler->CallRuntime(Runtime::kCreateArrayLiteral,
- context, closure, literal_index,
- constant_elements, flags));
- }
+ assembler.Bind(&end);
+ assembler.Return(var_result.value());
}
void CreateAllocationSiteStub::GenerateAheadOfTime(Isolate* isolate) {
@@ -2969,32 +1920,33 @@ void StoreElementStub::Generate(MacroAssembler* masm) {
}
void StoreFastElementStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
+ compiler::CodeAssemblerState* state) const {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
- assembler->Comment(
+ assembler.Comment(
"StoreFastElementStub: js_array=%d, elements_kind=%s, store_mode=%d",
is_js_array(), ElementsKindToString(elements_kind()), store_mode());
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* key = assembler->Parameter(Descriptor::kName);
- Node* value = assembler->Parameter(Descriptor::kValue);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+ Node* key = assembler.Parameter(Descriptor::kName);
+ Node* value = assembler.Parameter(Descriptor::kValue);
+ Node* slot = assembler.Parameter(Descriptor::kSlot);
+ Node* vector = assembler.Parameter(Descriptor::kVector);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- Label miss(assembler);
+ Label miss(&assembler);
- assembler->EmitElementStore(receiver, key, value, is_js_array(),
- elements_kind(), store_mode(), &miss);
- assembler->Return(value);
+ assembler.EmitElementStore(receiver, key, value, is_js_array(),
+ elements_kind(), store_mode(), &miss);
+ assembler.Return(value);
- assembler->Bind(&miss);
+ assembler.Bind(&miss);
{
- assembler->Comment("Miss");
- assembler->TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, value,
- slot, vector, receiver, key);
+ assembler.Comment("Miss");
+ assembler.TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, value, slot,
+ vector, receiver, key);
}
}
@@ -3078,50 +2030,55 @@ void ProfileEntryHookStub::EntryHookTrampoline(intptr_t function,
}
void CreateAllocationSiteStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
- assembler->Return(assembler->CreateAllocationSiteInFeedbackVector(
- assembler->Parameter(Descriptor::kVector),
- assembler->Parameter(Descriptor::kSlot)));
+ compiler::CodeAssemblerState* state) const {
+ CodeStubAssembler assembler(state);
+ assembler.Return(assembler.CreateAllocationSiteInFeedbackVector(
+ assembler.Parameter(Descriptor::kVector),
+ assembler.Parameter(Descriptor::kSlot)));
}
-void CreateWeakCellStub::GenerateAssembly(CodeStubAssembler* assembler) const {
- assembler->Return(assembler->CreateWeakCellInFeedbackVector(
- assembler->Parameter(Descriptor::kVector),
- assembler->Parameter(Descriptor::kSlot),
- assembler->Parameter(Descriptor::kValue)));
+void CreateWeakCellStub::GenerateAssembly(
+ compiler::CodeAssemblerState* state) const {
+ CodeStubAssembler assembler(state);
+ assembler.Return(assembler.CreateWeakCellInFeedbackVector(
+ assembler.Parameter(Descriptor::kVector),
+ assembler.Parameter(Descriptor::kSlot),
+ assembler.Parameter(Descriptor::kValue)));
}
void ArrayNoArgumentConstructorStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
+ compiler::CodeAssemblerState* state) const {
typedef compiler::Node Node;
- Node* native_context = assembler->LoadObjectField(
- assembler->Parameter(Descriptor::kFunction), JSFunction::kContextOffset);
+ CodeStubAssembler assembler(state);
+ Node* native_context = assembler.LoadObjectField(
+ assembler.Parameter(Descriptor::kFunction), JSFunction::kContextOffset);
bool track_allocation_site =
AllocationSite::GetMode(elements_kind()) == TRACK_ALLOCATION_SITE &&
override_mode() != DISABLE_ALLOCATION_SITES;
- Node* allocation_site =
- track_allocation_site ? assembler->Parameter(Descriptor::kAllocationSite)
- : nullptr;
+ Node* allocation_site = track_allocation_site
+ ? assembler.Parameter(Descriptor::kAllocationSite)
+ : nullptr;
Node* array_map =
- assembler->LoadJSArrayElementsMap(elements_kind(), native_context);
- Node* array = assembler->AllocateJSArray(
+ assembler.LoadJSArrayElementsMap(elements_kind(), native_context);
+ Node* array = assembler.AllocateJSArray(
elements_kind(), array_map,
- assembler->IntPtrConstant(JSArray::kPreallocatedArrayElements),
- assembler->SmiConstant(Smi::kZero), allocation_site);
- assembler->Return(array);
+ assembler.IntPtrConstant(JSArray::kPreallocatedArrayElements),
+ assembler.SmiConstant(Smi::kZero), allocation_site);
+ assembler.Return(array);
}
void InternalArrayNoArgumentConstructorStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
+ compiler::CodeAssemblerState* state) const {
typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
Node* array_map =
- assembler->LoadObjectField(assembler->Parameter(Descriptor::kFunction),
- JSFunction::kPrototypeOrInitialMapOffset);
- Node* array = assembler->AllocateJSArray(
+ assembler.LoadObjectField(assembler.Parameter(Descriptor::kFunction),
+ JSFunction::kPrototypeOrInitialMapOffset);
+ Node* array = assembler.AllocateJSArray(
elements_kind(), array_map,
- assembler->IntPtrConstant(JSArray::kPreallocatedArrayElements),
- assembler->SmiConstant(Smi::kZero), nullptr);
- assembler->Return(array);
+ assembler.IntPtrConstant(JSArray::kPreallocatedArrayElements),
+ assembler.SmiConstant(Smi::kZero));
+ assembler.Return(array);
}
namespace {
@@ -3191,49 +2148,52 @@ void SingleArgumentConstructorCommon(CodeStubAssembler* assembler,
} // namespace
void ArraySingleArgumentConstructorStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
+ compiler::CodeAssemblerState* state) const {
typedef compiler::Node Node;
- Node* function = assembler->Parameter(Descriptor::kFunction);
+ CodeStubAssembler assembler(state);
+ Node* function = assembler.Parameter(Descriptor::kFunction);
Node* native_context =
- assembler->LoadObjectField(function, JSFunction::kContextOffset);
+ assembler.LoadObjectField(function, JSFunction::kContextOffset);
Node* array_map =
- assembler->LoadJSArrayElementsMap(elements_kind(), native_context);
+ assembler.LoadJSArrayElementsMap(elements_kind(), native_context);
AllocationSiteMode mode = override_mode() == DISABLE_ALLOCATION_SITES
? DONT_TRACK_ALLOCATION_SITE
: AllocationSite::GetMode(elements_kind());
- Node* allocation_site = assembler->Parameter(Descriptor::kAllocationSite);
- SingleArgumentConstructorCommon<Descriptor>(assembler, elements_kind(),
+ Node* allocation_site = assembler.Parameter(Descriptor::kAllocationSite);
+ SingleArgumentConstructorCommon<Descriptor>(&assembler, elements_kind(),
array_map, allocation_site, mode);
}
void InternalArraySingleArgumentConstructorStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
+ compiler::CodeAssemblerState* state) const {
typedef compiler::Node Node;
- Node* function = assembler->Parameter(Descriptor::kFunction);
- Node* array_map = assembler->LoadObjectField(
+ CodeStubAssembler assembler(state);
+ Node* function = assembler.Parameter(Descriptor::kFunction);
+ Node* array_map = assembler.LoadObjectField(
function, JSFunction::kPrototypeOrInitialMapOffset);
SingleArgumentConstructorCommon<Descriptor>(
- assembler, elements_kind(), array_map, assembler->UndefinedConstant(),
+ &assembler, elements_kind(), array_map, assembler.UndefinedConstant(),
DONT_TRACK_ALLOCATION_SITE);
}
void GrowArrayElementsStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
+ compiler::CodeAssemblerState* state) const {
typedef compiler::Node Node;
- CodeStubAssembler::Label runtime(assembler,
+ CodeStubAssembler assembler(state);
+ CodeStubAssembler::Label runtime(&assembler,
CodeStubAssembler::Label::kDeferred);
- Node* object = assembler->Parameter(Descriptor::kObject);
- Node* key = assembler->Parameter(Descriptor::kKey);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* object = assembler.Parameter(Descriptor::kObject);
+ Node* key = assembler.Parameter(Descriptor::kKey);
+ Node* context = assembler.Parameter(Descriptor::kContext);
ElementsKind kind = elements_kind();
- Node* elements = assembler->LoadElements(object);
+ Node* elements = assembler.LoadElements(object);
Node* new_elements =
- assembler->TryGrowElementsCapacity(object, elements, kind, key, &runtime);
- assembler->Return(new_elements);
+ assembler.TryGrowElementsCapacity(object, elements, kind, key, &runtime);
+ assembler.Return(new_elements);
- assembler->Bind(&runtime);
+ assembler.Bind(&runtime);
// TODO(danno): Make this a tail call when the stub is only used from TurboFan
// code. This musn't be a tail call for now, since the caller site in lithium
// creates a safepoint. This safepoint musn't have a different number of
@@ -3243,8 +2203,8 @@ void GrowArrayElementsStub::GenerateAssembly(
// tail call pushing the arguments on the stack for the runtime call). By not
// tail-calling, the runtime call case also has zero arguments on the stack
// for the stub frame.
- assembler->Return(assembler->CallRuntime(Runtime::kGrowArrayElements, context,
- object, key));
+ assembler.Return(
+ assembler.CallRuntime(Runtime::kGrowArrayElements, context, object, key));
}
ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate)