summaryrefslogtreecommitdiff
path: root/deps/v8/src/ic
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2017-02-14 11:27:26 +0100
committerMichaël Zasso <targos@protonmail.com>2017-02-22 15:55:42 +0100
commit7a77daf24344db7942e34c962b0f1ee729ab7af5 (patch)
treee7cbe7bf4e2f4b802a8f5bc18336c546cd6a0d7f /deps/v8/src/ic
parent5f08871ee93ea739148cc49e0f7679e33c70295a (diff)
downloadandroid-node-v8-7a77daf24344db7942e34c962b0f1ee729ab7af5.tar.gz
android-node-v8-7a77daf24344db7942e34c962b0f1ee729ab7af5.tar.bz2
android-node-v8-7a77daf24344db7942e34c962b0f1ee729ab7af5.zip
deps: update V8 to 5.6.326.55
PR-URL: https://github.com/nodejs/node/pull/10992 Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl>
Diffstat (limited to 'deps/v8/src/ic')
-rw-r--r--deps/v8/src/ic/access-compiler-data.h48
-rw-r--r--deps/v8/src/ic/access-compiler.cc13
-rw-r--r--deps/v8/src/ic/access-compiler.h12
-rw-r--r--deps/v8/src/ic/arm/access-compiler-arm.cc22
-rw-r--r--deps/v8/src/ic/arm/handler-compiler-arm.cc73
-rw-r--r--deps/v8/src/ic/arm/ic-arm.cc244
-rw-r--r--deps/v8/src/ic/arm64/access-compiler-arm64.cc23
-rw-r--r--deps/v8/src/ic/arm64/handler-compiler-arm64.cc80
-rw-r--r--deps/v8/src/ic/arm64/ic-arm64.cc271
-rw-r--r--deps/v8/src/ic/handler-compiler.cc64
-rw-r--r--deps/v8/src/ic/handler-compiler.h15
-rw-r--r--deps/v8/src/ic/handler-configuration-inl.h145
-rw-r--r--deps/v8/src/ic/handler-configuration.h200
-rw-r--r--deps/v8/src/ic/ia32/access-compiler-ia32.cc21
-rw-r--r--deps/v8/src/ic/ia32/handler-compiler-ia32.cc71
-rw-r--r--deps/v8/src/ic/ia32/ic-ia32.cc244
-rw-r--r--deps/v8/src/ic/ic-compiler.cc20
-rw-r--r--deps/v8/src/ic/ic-inl.h6
-rw-r--r--deps/v8/src/ic/ic-state.cc10
-rw-r--r--deps/v8/src/ic/ic-state.h10
-rw-r--r--deps/v8/src/ic/ic.cc657
-rw-r--r--deps/v8/src/ic/ic.h39
-rw-r--r--deps/v8/src/ic/keyed-store-generic.cc549
-rw-r--r--deps/v8/src/ic/keyed-store-generic.h23
-rw-r--r--deps/v8/src/ic/mips/access-compiler-mips.cc22
-rw-r--r--deps/v8/src/ic/mips/handler-compiler-mips.cc69
-rw-r--r--deps/v8/src/ic/mips/ic-mips.cc244
-rw-r--r--deps/v8/src/ic/mips64/access-compiler-mips64.cc22
-rw-r--r--deps/v8/src/ic/mips64/handler-compiler-mips64.cc69
-rw-r--r--deps/v8/src/ic/mips64/ic-mips64.cc245
-rw-r--r--deps/v8/src/ic/ppc/access-compiler-ppc.cc22
-rw-r--r--deps/v8/src/ic/ppc/handler-compiler-ppc.cc67
-rw-r--r--deps/v8/src/ic/ppc/ic-ppc.cc250
-rw-r--r--deps/v8/src/ic/s390/access-compiler-s390.cc21
-rw-r--r--deps/v8/src/ic/s390/handler-compiler-s390.cc73
-rw-r--r--deps/v8/src/ic/s390/ic-s390.cc242
-rw-r--r--deps/v8/src/ic/stub-cache.cc44
-rw-r--r--deps/v8/src/ic/stub-cache.h6
-rw-r--r--deps/v8/src/ic/x64/access-compiler-x64.cc23
-rw-r--r--deps/v8/src/ic/x64/handler-compiler-x64.cc71
-rw-r--r--deps/v8/src/ic/x64/ic-x64.cc243
-rw-r--r--deps/v8/src/ic/x87/access-compiler-x87.cc21
-rw-r--r--deps/v8/src/ic/x87/handler-compiler-x87.cc71
-rw-r--r--deps/v8/src/ic/x87/ic-x87.cc244
44 files changed, 1979 insertions, 2950 deletions
diff --git a/deps/v8/src/ic/access-compiler-data.h b/deps/v8/src/ic/access-compiler-data.h
new file mode 100644
index 0000000000..dffcac7d05
--- /dev/null
+++ b/deps/v8/src/ic/access-compiler-data.h
@@ -0,0 +1,48 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_IC_ACCESS_COMPILER_DATA_H_
+#define V8_IC_ACCESS_COMPILER_DATA_H_
+
+#include <memory>
+
+#include "src/allocation.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+
+class AccessCompilerData {
+ public:
+ AccessCompilerData() {}
+
+ bool IsInitialized() const { return load_calling_convention_ != nullptr; }
+ void Initialize(int load_register_count, const Register* load_registers,
+ int store_register_count, const Register* store_registers) {
+ load_calling_convention_.reset(NewArray<Register>(load_register_count));
+ for (int i = 0; i < load_register_count; ++i) {
+ load_calling_convention_[i] = load_registers[i];
+ }
+ store_calling_convention_.reset(NewArray<Register>(store_register_count));
+ for (int i = 0; i < store_register_count; ++i) {
+ store_calling_convention_[i] = store_registers[i];
+ }
+ }
+
+ Register* load_calling_convention() { return load_calling_convention_.get(); }
+ Register* store_calling_convention() {
+ return store_calling_convention_.get();
+ }
+
+ private:
+ std::unique_ptr<Register[]> load_calling_convention_;
+ std::unique_ptr<Register[]> store_calling_convention_;
+
+ DISALLOW_COPY_AND_ASSIGN(AccessCompilerData);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_IC_ACCESS_COMPILER_DATA_H_
diff --git a/deps/v8/src/ic/access-compiler.cc b/deps/v8/src/ic/access-compiler.cc
index bb6b5e50d9..d92f9c0c53 100644
--- a/deps/v8/src/ic/access-compiler.cc
+++ b/deps/v8/src/ic/access-compiler.cc
@@ -4,7 +4,6 @@
#include "src/ic/access-compiler.h"
-
namespace v8 {
namespace internal {
@@ -42,13 +41,17 @@ void PropertyAccessCompiler::TailCallBuiltin(MacroAssembler* masm,
GenerateTailCall(masm, code);
}
-
-Register* PropertyAccessCompiler::GetCallingConvention(Code::Kind kind) {
+Register* PropertyAccessCompiler::GetCallingConvention(Isolate* isolate,
+ Code::Kind kind) {
+ AccessCompilerData* data = isolate->access_compiler_data();
+ if (!data->IsInitialized()) {
+ InitializePlatformSpecific(data);
+ }
if (kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC) {
- return load_calling_convention();
+ return data->load_calling_convention();
}
DCHECK(kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC);
- return store_calling_convention();
+ return data->store_calling_convention();
}
diff --git a/deps/v8/src/ic/access-compiler.h b/deps/v8/src/ic/access-compiler.h
index ecc5c08a59..3d488e82ea 100644
--- a/deps/v8/src/ic/access-compiler.h
+++ b/deps/v8/src/ic/access-compiler.h
@@ -6,13 +6,13 @@
#define V8_IC_ACCESS_COMPILER_H_
#include "src/code-stubs.h"
+#include "src/ic/access-compiler-data.h"
#include "src/macro-assembler.h"
#include "src/objects.h"
namespace v8 {
namespace internal {
-
class PropertyAccessCompiler BASE_EMBEDDED {
public:
static Builtins::Name MissBuiltin(Code::Kind kind) {
@@ -36,7 +36,7 @@ class PropertyAccessCompiler BASE_EMBEDDED {
protected:
PropertyAccessCompiler(Isolate* isolate, Code::Kind kind,
CacheHolderFlag cache_holder)
- : registers_(GetCallingConvention(kind)),
+ : registers_(GetCallingConvention(isolate, kind)),
kind_(kind),
cache_holder_(cache_holder),
isolate_(isolate),
@@ -59,11 +59,6 @@ class PropertyAccessCompiler BASE_EMBEDDED {
Register scratch1() const { return registers_[2]; }
Register scratch2() const { return registers_[3]; }
- static Register* GetCallingConvention(Code::Kind);
- static Register* load_calling_convention();
- static Register* store_calling_convention();
- static Register* keyed_store_calling_convention();
-
Register* registers_;
static void GenerateTailCall(MacroAssembler* masm, Handle<Code> code);
@@ -72,6 +67,9 @@ class PropertyAccessCompiler BASE_EMBEDDED {
Handle<Code> GetCodeWithFlags(Code::Flags flags, Handle<Name> name);
private:
+ static Register* GetCallingConvention(Isolate* isolate, Code::Kind kind);
+ static void InitializePlatformSpecific(AccessCompilerData* data);
+
Code::Kind kind_;
CacheHolderFlag cache_holder_;
diff --git a/deps/v8/src/ic/arm/access-compiler-arm.cc b/deps/v8/src/ic/arm/access-compiler-arm.cc
index 9ce485ed46..e501cdcc8b 100644
--- a/deps/v8/src/ic/arm/access-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/access-compiler-arm.cc
@@ -17,24 +17,22 @@ void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
__ Jump(code, RelocInfo::CODE_TARGET);
}
-
-Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
+void PropertyAccessCompiler::InitializePlatformSpecific(
+ AccessCompilerData* data) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- static Register registers[] = {receiver, name, r3, r0, r4};
- return registers;
-}
+ // Load calling convention.
+ // receiver, name, scratch1, scratch2, scratch3.
+ Register load_registers[] = {receiver, name, r3, r0, r4};
-Register* PropertyAccessCompiler::store_calling_convention() {
+ // Store calling convention.
// receiver, name, scratch1, scratch2.
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- static Register registers[] = {receiver, name, r3, r4};
- return registers;
-}
+ Register store_registers[] = {receiver, name, r3, r4};
+ data->Initialize(arraysize(load_registers), load_registers,
+ arraysize(store_registers), store_registers);
+}
#undef __
} // namespace internal
diff --git a/deps/v8/src/ic/arm/handler-compiler-arm.cc b/deps/v8/src/ic/arm/handler-compiler-arm.cc
index 691fe3d23d..6145d43641 100644
--- a/deps/v8/src/ic/arm/handler-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/handler-compiler-arm.cc
@@ -407,10 +407,34 @@ void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
}
}
+void PropertyHandlerCompiler::GenerateAccessCheck(
+ Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
+ Label* miss, bool compare_native_contexts_only) {
+ Label done;
+ // Load current native context.
+ __ ldr(scratch1, NativeContextMemOperand());
+ // Load expected native context.
+ __ LoadWeakValue(scratch2, native_context_cell, miss);
+ __ cmp(scratch1, scratch2);
+
+ if (!compare_native_contexts_only) {
+ __ b(eq, &done);
+
+ // Compare security tokens of current and expected native contexts.
+ __ ldr(scratch1,
+ ContextMemOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
+ __ ldr(scratch2,
+ ContextMemOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
+ __ cmp(scratch1, scratch2);
+ }
+ __ b(ne, miss);
+
+ __ bind(&done);
+}
Register PropertyHandlerCompiler::CheckPrototypes(
Register object_reg, Register holder_reg, Register scratch1,
- Register scratch2, Handle<Name> name, Label* miss, PrototypeCheckType check,
+ Register scratch2, Handle<Name> name, Label* miss,
ReturnHolder return_what) {
Handle<Map> receiver_map = map();
@@ -429,17 +453,6 @@ Register PropertyHandlerCompiler::CheckPrototypes(
__ b(ne, miss);
}
- // The prototype chain of primitives (and their JSValue wrappers) depends
- // on the native context, which can't be guarded by validity cells.
- // |object_reg| holds the native context specific prototype in this case;
- // we need to check its map.
- if (check == CHECK_ALL_MAPS) {
- __ ldr(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
- __ CmpWeakValue(scratch1, cell, scratch2);
- __ b(ne, miss);
- }
-
// Keep track of the current object in register reg.
Register reg = object_reg;
int depth = 0;
@@ -449,46 +462,28 @@ Register PropertyHandlerCompiler::CheckPrototypes(
current = isolate()->global_object();
}
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- // This allows us to install generated handlers for accesses to the
- // global proxy (as opposed to using slow ICs). See corresponding code
- // in LookupForRead().
- if (receiver_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch2, miss);
- }
-
- Handle<JSObject> prototype = Handle<JSObject>::null();
- Handle<Map> current_map = receiver_map;
+ Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
+ isolate());
Handle<Map> holder_map(holder()->map());
// Traverse the prototype chain and check the maps in the prototype chain for
// fast and global objects or do negative lookup for normal objects.
while (!current_map.is_identical_to(holder_map)) {
++depth;
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
-
- prototype = handle(JSObject::cast(current_map->prototype()));
if (current_map->IsJSGlobalObjectMap()) {
GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
name, scratch2, miss);
} else if (current_map->is_dictionary_map()) {
DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- if (!name->IsUniqueName()) {
- DCHECK(name->IsString());
- name = factory()->InternalizeString(Handle<String>::cast(name));
- }
+ DCHECK(name->IsUniqueName());
DCHECK(current.is_null() ||
current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
if (depth > 1) {
- // TODO(jkummerow): Cache and re-use weak cell.
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
scratch2);
@@ -496,7 +491,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
reg = holder_reg; // From now on the object will be in holder_reg.
// Go to the next object in the prototype chain.
- current = prototype;
+ current = handle(JSObject::cast(current_map->prototype()));
current_map = handle(current->map());
}
@@ -507,7 +502,9 @@ Register PropertyHandlerCompiler::CheckPrototypes(
bool return_holder = return_what == RETURN_HOLDER;
if (return_holder && depth != 0) {
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
// Return the register containing the holder.
diff --git a/deps/v8/src/ic/arm/ic-arm.cc b/deps/v8/src/ic/arm/ic-arm.cc
index 10ec578f7b..babf497a5b 100644
--- a/deps/v8/src/ic/arm/ic-arm.cc
+++ b/deps/v8/src/ic/arm/ic-arm.cc
@@ -19,18 +19,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
- Label* global_object) {
- // Register usage:
- // type: holds the receiver instance type on entry.
- __ cmp(type, Operand(JS_GLOBAL_OBJECT_TYPE));
- __ b(eq, global_object);
- __ cmp(type, Operand(JS_GLOBAL_PROXY_TYPE));
- __ b(eq, global_object);
-}
-
-
// Helper function used from LoadIC GenerateNormal.
//
// elements: Property dictionary. It is not clobbered if a jump to the miss
@@ -126,138 +114,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
kDontSaveFPRegs);
}
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver, Register map,
- Register scratch,
- int interceptor_bit, Label* slow) {
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, slow);
- // Get the map of the receiver.
- __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check bit field.
- __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
- __ tst(scratch,
- Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
- __ b(ne, slow);
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object,
- // we enter the runtime system to make sure that indexing into string
- // objects work as intended.
- DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ cmp(scratch, Operand(JS_OBJECT_TYPE));
- __ b(lt, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
- Register key, Register elements,
- Register scratch1, Register scratch2,
- Register result, Label* slow) {
- // Register use:
- //
- // receiver - holds the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the the same as 'receiver' or 'key'.
- // Unchanged on bailout so 'receiver' and 'key' can be safely
- // used by further computation.
- //
- // Scratch registers:
- //
- // elements - holds the elements of the receiver and its prototypes.
- //
- // scratch1 - used to hold elements length, bit fields, base addresses.
- //
- // scratch2 - used to hold maps, prototypes, and the loaded value.
- Label check_prototypes, check_next_prototype;
- Label done, in_bounds, absent;
-
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ AssertFastElements(elements);
-
- // Check that the key (index) is within bounds.
- __ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ cmp(key, Operand(scratch1));
- __ b(lo, &in_bounds);
- // Out-of-bounds. Check the prototype chain to see if we can just return
- // 'undefined'.
- __ cmp(key, Operand(0));
- __ b(lt, slow); // Negative keys can't take the fast OOB path.
- __ bind(&check_prototypes);
- __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ bind(&check_next_prototype);
- __ ldr(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
- // scratch2: current prototype
- __ CompareRoot(scratch2, Heap::kNullValueRootIndex);
- __ b(eq, &absent);
- __ ldr(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
- __ ldr(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
- // elements: elements of current prototype
- // scratch2: map of current prototype
- __ CompareInstanceType(scratch2, scratch1, JS_OBJECT_TYPE);
- __ b(lo, slow);
- __ ldrb(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
- __ tst(scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
- (1 << Map::kHasIndexedInterceptor)));
- __ b(ne, slow);
- __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
- __ b(ne, slow);
- __ jmp(&check_next_prototype);
-
- __ bind(&absent);
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ jmp(&done);
-
- __ bind(&in_bounds);
- // Fast case: Do the load.
- __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(scratch2, MemOperand::PointerAddressFromSmiKey(scratch1, key));
- __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
- // In case the loaded value is the_hole we have to check the prototype chain.
- __ b(eq, &check_prototypes);
- __ mov(result, scratch2);
- __ bind(&done);
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if a key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
- Register map, Register hash,
- Label* index_string, Label* not_unique) {
- // The key is not a smi.
- Label unique;
- // Is it a name?
- __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE);
- __ b(hi, not_unique);
- STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
- __ b(eq, &unique);
-
- // Is the string an array index, with cached numeric value?
- __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
- __ tst(hash, Operand(Name::kContainsCachedArrayIndexMask));
- __ b(eq, index_string);
-
- // Is the string internalized? We know it's a string, so a single
- // bit test is enough.
- // map: key map
- __ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag == 0);
- __ tst(hash, Operand(kIsNotInternalizedMask));
- __ b(ne, not_unique);
-
- __ bind(&unique);
-}
-
void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = r0;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
@@ -340,106 +196,6 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedGetProperty);
}
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // The return address is in lr.
- Label slow, check_name, index_smi, index_name, property_array_property;
- Label probe_dictionary, check_number_dictionary;
-
- Register key = LoadDescriptor::NameRegister();
- Register receiver = LoadDescriptor::ReceiverRegister();
- DCHECK(key.is(r2));
- DCHECK(receiver.is(r1));
-
- Isolate* isolate = masm->isolate();
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &check_name);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, r0, r3,
- Map::kHasIndexedInterceptor, &slow);
-
- // Check the receiver's map to see if it has fast elements.
- __ CheckFastElements(r0, r3, &check_number_dictionary);
-
- GenerateFastArrayLoad(masm, receiver, key, r0, r3, r4, r0, &slow);
- __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, r4,
- r3);
- __ Ret();
-
- __ bind(&check_number_dictionary);
- __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ ldr(r3, FieldMemOperand(r4, JSObject::kMapOffset));
-
- // Check whether the elements is a number dictionary.
- // r3: elements map
- // r4: elements
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r3, ip);
- __ b(ne, &slow);
- __ SmiUntag(r0, key);
- __ LoadFromNumberDictionary(&slow, r4, key, r0, r0, r3, r5);
- __ Ret();
-
- // Slow case, key and receiver still in r2 and r1.
- __ bind(&slow);
- __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, r4,
- r3);
- GenerateRuntimeGetProperty(masm);
-
- __ bind(&check_name);
- GenerateKeyNameCheck(masm, key, r0, r3, &index_name, &slow);
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, r0, r3,
- Map::kHasNamedInterceptor, &slow);
-
- // If the receiver is a fast-case object, check the stub cache. Otherwise
- // probe the dictionary.
- __ ldr(r3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r4, ip);
- __ b(eq, &probe_dictionary);
-
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- Register slot = LoadWithVectorDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, r4, r5, r6, r9));
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot_index = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
- __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
- __ mov(slot, Operand(Smi::FromInt(slot_index)));
-
- masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, r4, r5,
- r6, r9);
- // Cache miss.
- GenerateMiss(masm);
-
- // Do a quick inline probe of the receiver's dictionary, if it
- // exists.
- __ bind(&probe_dictionary);
- // r3: elements
- __ ldr(r0, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, r0, &slow);
- // Load the property to r0.
- GenerateDictionaryLoad(masm, &slow, r3, key, r0, r5, r4);
- __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
- r4, r3);
- __ Ret();
-
- __ bind(&index_name);
- __ IndexFromHash(r3, key);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(StoreWithVectorDescriptor::ValueRegister(),
StoreWithVectorDescriptor::SlotRegister(),
diff --git a/deps/v8/src/ic/arm64/access-compiler-arm64.cc b/deps/v8/src/ic/arm64/access-compiler-arm64.cc
index 6273633822..8cbb5278ea 100644
--- a/deps/v8/src/ic/arm64/access-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/access-compiler-arm64.cc
@@ -25,23 +25,22 @@ void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
// registers are actually scratch registers, and which are important. For now,
// we use the same assignments as ARM to remain on the safe side.
-Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
+void PropertyAccessCompiler::InitializePlatformSpecific(
+ AccessCompilerData* data) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- static Register registers[] = {receiver, name, x3, x0, x4};
- return registers;
-}
+ // Load calling convention.
+ // receiver, name, scratch1, scratch2, scratch3.
+ Register load_registers[] = {receiver, name, x3, x0, x4};
-Register* PropertyAccessCompiler::store_calling_convention() {
- // receiver, value, scratch1, scratch2.
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- static Register registers[] = {receiver, name, x3, x4};
- return registers;
-}
+ // Store calling convention.
+ // receiver, name, scratch1, scratch2.
+ Register store_registers[] = {receiver, name, x3, x4};
+ data->Initialize(arraysize(load_registers), load_registers,
+ arraysize(store_registers), store_registers);
+}
#undef __
} // namespace internal
diff --git a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
index 3f97fddcd5..58d0bb7446 100644
--- a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
@@ -437,10 +437,34 @@ void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
}
}
+void PropertyHandlerCompiler::GenerateAccessCheck(
+ Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
+ Label* miss, bool compare_native_contexts_only) {
+ Label done;
+ // Load current native context.
+ __ Ldr(scratch1, NativeContextMemOperand());
+ // Load expected native context.
+ __ LoadWeakValue(scratch2, native_context_cell, miss);
+ __ Cmp(scratch1, scratch2);
+
+ if (!compare_native_contexts_only) {
+ __ B(eq, &done);
+
+ // Compare security tokens of current and expected native contexts.
+ __ Ldr(scratch1,
+ ContextMemOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
+ __ Ldr(scratch2,
+ ContextMemOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
+ __ Cmp(scratch1, scratch2);
+ }
+ __ B(ne, miss);
+
+ __ bind(&done);
+}
Register PropertyHandlerCompiler::CheckPrototypes(
Register object_reg, Register holder_reg, Register scratch1,
- Register scratch2, Handle<Name> name, Label* miss, PrototypeCheckType check,
+ Register scratch2, Handle<Name> name, Label* miss,
ReturnHolder return_what) {
Handle<Map> receiver_map = map();
@@ -454,19 +478,10 @@ Register PropertyHandlerCompiler::CheckPrototypes(
DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
__ Mov(scratch1, Operand(validity_cell));
__ Ldr(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
- __ Cmp(scratch1, Operand(Smi::FromInt(Map::kPrototypeChainValid)));
- __ B(ne, miss);
- }
-
- // The prototype chain of primitives (and their JSValue wrappers) depends
- // on the native context, which can't be guarded by validity cells.
- // |object_reg| holds the native context specific prototype in this case;
- // we need to check its map.
- if (check == CHECK_ALL_MAPS) {
- __ Ldr(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
- __ CmpWeakValue(scratch1, cell, scratch2);
- __ B(ne, miss);
+ // Compare scratch1 against Map::kPrototypeChainValid.
+ static_assert(Map::kPrototypeChainValid == 0,
+ "Map::kPrototypeChainValid has unexpected value");
+ __ Cbnz(scratch1, miss);
}
// Keep track of the current object in register reg.
@@ -478,46 +493,27 @@ Register PropertyHandlerCompiler::CheckPrototypes(
current = isolate()->global_object();
}
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- // This allows us to install generated handlers for accesses to the
- // global proxy (as opposed to using slow ICs). See corresponding code
- // in LookupForRead().
- if (receiver_map->IsJSGlobalProxyMap()) {
- UseScratchRegisterScope temps(masm());
- __ CheckAccessGlobalProxy(reg, scratch2, temps.AcquireX(), miss);
- }
-
- Handle<JSObject> prototype = Handle<JSObject>::null();
- Handle<Map> current_map = receiver_map;
+ Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
+ isolate());
Handle<Map> holder_map(holder()->map());
// Traverse the prototype chain and check the maps in the prototype chain for
// fast and global objects or do negative lookup for normal objects.
while (!current_map.is_identical_to(holder_map)) {
++depth;
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
-
- prototype = handle(JSObject::cast(current_map->prototype()));
if (current_map->IsJSGlobalObjectMap()) {
GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
name, scratch2, miss);
} else if (current_map->is_dictionary_map()) {
DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- if (!name->IsUniqueName()) {
- DCHECK(name->IsString());
- name = factory()->InternalizeString(Handle<String>::cast(name));
- }
+ DCHECK(name->IsUniqueName());
DCHECK(current.is_null() || (current->property_dictionary()->FindEntry(
name) == NameDictionary::kNotFound));
if (depth > 1) {
- // TODO(jkummerow): Cache and re-use weak cell.
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
scratch2);
@@ -525,7 +521,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
reg = holder_reg; // From now on the object will be in holder_reg.
// Go to the next object in the prototype chain.
- current = prototype;
+ current = handle(JSObject::cast(current_map->prototype()));
current_map = handle(current->map());
}
@@ -536,7 +532,9 @@ Register PropertyHandlerCompiler::CheckPrototypes(
bool return_holder = return_what == RETURN_HOLDER;
if (return_holder && depth != 0) {
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
// Return the register containing the holder.
diff --git a/deps/v8/src/ic/arm64/ic-arm64.cc b/deps/v8/src/ic/arm64/ic-arm64.cc
index fa9d7c16b7..0ced207d8a 100644
--- a/deps/v8/src/ic/arm64/ic-arm64.cc
+++ b/deps/v8/src/ic/arm64/ic-arm64.cc
@@ -15,18 +15,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-
-// "type" holds an instance type on entry and is not clobbered.
-// Generated code branch on "global_object" if type is any kind of global
-// JS object.
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
- Label* global_object) {
- __ Cmp(type, JS_GLOBAL_OBJECT_TYPE);
- __ Ccmp(type, JS_GLOBAL_PROXY_TYPE, ZFlag, ne);
- __ B(eq, global_object);
-}
-
-
// Helper function used from LoadIC GenerateNormal.
//
// elements: Property dictionary. It is not clobbered if a jump to the miss
@@ -116,144 +104,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
kDontSaveFPRegs);
}
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object and return the map of the
-// receiver in 'map_scratch' if the receiver is not a SMI.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register map_scratch,
- Register scratch,
- int interceptor_bit, Label* slow) {
- DCHECK(!AreAliased(map_scratch, scratch));
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, slow);
- // Get the map of the receiver.
- __ Ldr(map_scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check bit field.
- __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kBitFieldOffset));
- __ Tbnz(scratch, Map::kIsAccessCheckNeeded, slow);
- __ Tbnz(scratch, interceptor_bit, slow);
-
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object, we enter the
- // runtime system to make sure that indexing into string objects work
- // as intended.
- STATIC_ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
- __ Cmp(scratch, JS_OBJECT_TYPE);
- __ B(lt, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-//
-// receiver - holds the receiver on entry.
-// Unchanged unless 'result' is the same register.
-//
-// key - holds the smi key on entry.
-// Unchanged unless 'result' is the same register.
-//
-// elements - holds the elements of the receiver and its prototypes. Clobbered.
-//
-// result - holds the result on exit if the load succeeded.
-// Allowed to be the the same as 'receiver' or 'key'.
-// Unchanged on bailout so 'receiver' and 'key' can be safely
-// used by further computation.
-static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
- Register key, Register elements,
- Register scratch1, Register scratch2,
- Register result, Label* slow) {
- DCHECK(!AreAliased(receiver, key, elements, scratch1, scratch2));
-
- Label check_prototypes, check_next_prototype;
- Label done, in_bounds, absent;
-
- // Check for fast array.
- __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ AssertFastElements(elements);
-
- // Check that the key (index) is within bounds.
- __ Ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Cmp(key, scratch1);
- __ B(lo, &in_bounds);
-
- // Out of bounds. Check the prototype chain to see if we can just return
- // 'undefined'.
- __ Cmp(key, Operand(Smi::FromInt(0)));
- __ B(lt, slow); // Negative keys can't take the fast OOB path.
- __ Bind(&check_prototypes);
- __ Ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Bind(&check_next_prototype);
- __ Ldr(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
- // scratch2: current prototype
- __ JumpIfRoot(scratch2, Heap::kNullValueRootIndex, &absent);
- __ Ldr(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
- __ Ldr(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
- // elements: elements of current prototype
- // scratch2: map of current prototype
- __ CompareInstanceType(scratch2, scratch1, JS_OBJECT_TYPE);
- __ B(lo, slow);
- __ Ldrb(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
- __ Tbnz(scratch1, Map::kIsAccessCheckNeeded, slow);
- __ Tbnz(scratch1, Map::kHasIndexedInterceptor, slow);
- __ JumpIfNotRoot(elements, Heap::kEmptyFixedArrayRootIndex, slow);
- __ B(&check_next_prototype);
-
- __ Bind(&absent);
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ B(&done);
-
- __ Bind(&in_bounds);
- // Fast case: Do the load.
- __ Add(scratch1, elements, FixedArray::kHeaderSize - kHeapObjectTag);
- __ SmiUntag(scratch2, key);
- __ Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
-
- // In case the loaded value is the_hole we have to check the prototype chain.
- __ JumpIfRoot(scratch2, Heap::kTheHoleValueRootIndex, &check_prototypes);
-
- // Move the value to the result register.
- // 'result' can alias with 'receiver' or 'key' but these two must be
- // preserved if we jump to 'slow'.
- __ Mov(result, scratch2);
- __ Bind(&done);
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if a key is a unique name.
-// The map of the key is returned in 'map_scratch'.
-// If the jump to 'index_string' is done the hash of the key is left
-// in 'hash_scratch'.
-static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
- Register map_scratch, Register hash_scratch,
- Label* index_string, Label* not_unique) {
- DCHECK(!AreAliased(key, map_scratch, hash_scratch));
-
- // Is the key a name?
- Label unique;
- __ JumpIfObjectType(key, map_scratch, hash_scratch, LAST_UNIQUE_NAME_TYPE,
- not_unique, hi);
- STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
- __ B(eq, &unique);
-
- // Is the string an array index with cached numeric value?
- __ Ldr(hash_scratch.W(), FieldMemOperand(key, Name::kHashFieldOffset));
- __ TestAndBranchIfAllClear(hash_scratch, Name::kContainsCachedArrayIndexMask,
- index_string);
-
- // Is the string internalized? We know it's a string, so a single bit test is
- // enough.
- __ Ldrb(hash_scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag == 0);
- __ TestAndBranchIfAnySet(hash_scratch, kIsNotInternalizedMask, not_unique);
-
- __ Bind(&unique);
- // Fall through if the key is a unique name.
-}
-
void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = x0;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
@@ -323,127 +173,6 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedGetProperty);
}
-static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm, Register key,
- Register receiver, Register scratch1,
- Register scratch2, Register scratch3,
- Register scratch4, Register scratch5,
- Label* slow) {
- DCHECK(!AreAliased(key, receiver, scratch1, scratch2, scratch3, scratch4,
- scratch5));
-
- Isolate* isolate = masm->isolate();
- Label check_number_dictionary;
- // If we can load the value, it should be returned in x0.
- Register result = x0;
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, scratch1, scratch2,
- Map::kHasIndexedInterceptor, slow);
-
- // Check the receiver's map to see if it has fast elements.
- __ CheckFastElements(scratch1, scratch2, &check_number_dictionary);
-
- GenerateFastArrayLoad(masm, receiver, key, scratch3, scratch2, scratch1,
- result, slow);
- __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1,
- scratch1, scratch2);
- __ Ret();
-
- __ Bind(&check_number_dictionary);
- __ Ldr(scratch3, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ Ldr(scratch2, FieldMemOperand(scratch3, JSObject::kMapOffset));
-
- // Check whether we have a number dictionary.
- __ JumpIfNotRoot(scratch2, Heap::kHashTableMapRootIndex, slow);
-
- __ LoadFromNumberDictionary(slow, scratch3, key, result, scratch1, scratch2,
- scratch4, scratch5);
- __ Ret();
-}
-
-static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm, Register key,
- Register receiver, Register scratch1,
- Register scratch2, Register scratch3,
- Register scratch4, Register scratch5,
- Label* slow) {
- DCHECK(!AreAliased(key, receiver, scratch1, scratch2, scratch3, scratch4,
- scratch5));
-
- Isolate* isolate = masm->isolate();
- Label probe_dictionary, property_array_property;
- // If we can load the value, it should be returned in x0.
- Register result = x0;
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, scratch1, scratch2,
- Map::kHasNamedInterceptor, slow);
-
- // If the receiver is a fast-case object, check the stub cache. Otherwise
- // probe the dictionary.
- __ Ldr(scratch2, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
- __ JumpIfRoot(scratch3, Heap::kHashTableMapRootIndex, &probe_dictionary);
-
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- Register slot = LoadWithVectorDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, scratch1, scratch2, scratch3, scratch4));
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot_index = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
- __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
- __ Mov(slot, Operand(Smi::FromInt(slot_index)));
-
- masm->isolate()->load_stub_cache()->GenerateProbe(
- masm, receiver, key, scratch1, scratch2, scratch3, scratch4);
- // Cache miss.
- KeyedLoadIC::GenerateMiss(masm);
-
- // Do a quick inline probe of the receiver's dictionary, if it exists.
- __ Bind(&probe_dictionary);
- __ Ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, scratch1, slow);
- // Load the property.
- GenerateDictionaryLoad(masm, slow, scratch2, key, result, scratch1, scratch3);
- __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
- scratch1, scratch2);
- __ Ret();
-}
-
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // The return address is in lr.
- Label slow, check_name, index_smi, index_name;
-
- Register key = LoadDescriptor::NameRegister();
- Register receiver = LoadDescriptor::ReceiverRegister();
- DCHECK(key.is(x2));
- DCHECK(receiver.is(x1));
-
- __ JumpIfNotSmi(key, &check_name);
- __ Bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
- GenerateKeyedLoadWithSmiKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow);
-
- // Slow case.
- __ Bind(&slow);
- __ IncrementCounter(masm->isolate()->counters()->ic_keyed_load_generic_slow(),
- 1, x4, x3);
- GenerateRuntimeGetProperty(masm);
-
- __ Bind(&check_name);
- GenerateKeyNameCheck(masm, key, x0, x3, &index_name, &slow);
-
- GenerateKeyedLoadWithNameKey(masm, key, receiver, x4, x5, x6, x7, x3, &slow);
-
- __ Bind(&index_name);
- __ IndexFromHash(x3, key);
- // Now jump to the place where smi keys are handled.
- __ B(&index_smi);
-}
-
-
static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(StoreWithVectorDescriptor::ValueRegister(),
StoreWithVectorDescriptor::SlotRegister(),
diff --git a/deps/v8/src/ic/handler-compiler.cc b/deps/v8/src/ic/handler-compiler.cc
index 3b2e115b4f..05e9031915 100644
--- a/deps/v8/src/ic/handler-compiler.cc
+++ b/deps/v8/src/ic/handler-compiler.cc
@@ -6,7 +6,7 @@
#include "src/field-type.h"
#include "src/ic/call-optimization.h"
-#include "src/ic/handler-configuration.h"
+#include "src/ic/handler-configuration-inl.h"
#include "src/ic/ic-inl.h"
#include "src/ic/ic.h"
#include "src/isolate-inl.h"
@@ -65,7 +65,10 @@ Handle<Code> NamedLoadHandlerCompiler::ComputeLoadNonexistent(
// name specific if there are global objects involved.
Handle<Code> handler = PropertyHandlerCompiler::Find(
cache_name, stub_holder_map, Code::LOAD_IC, flag);
- if (!handler.is_null()) return handler;
+ if (!handler.is_null()) {
+ TRACE_HANDLER_STATS(isolate, LoadIC_HandlerCacheHit_NonExistent);
+ return handler;
+ }
TRACE_HANDLER_STATS(isolate, LoadIC_LoadNonexistent);
NamedLoadHandlerCompiler compiler(isolate, receiver_map, last, flag);
@@ -95,24 +98,23 @@ Register NamedLoadHandlerCompiler::FrontendHeader(Register object_reg,
Handle<Name> name,
Label* miss,
ReturnHolder return_what) {
- PrototypeCheckType check_type = SKIP_RECEIVER;
- int function_index = map()->IsPrimitiveMap()
- ? map()->GetConstructorFunctionIndex()
- : Map::kNoConstructorFunctionIndex;
- if (function_index != Map::kNoConstructorFunctionIndex) {
- GenerateDirectLoadGlobalFunctionPrototype(masm(), function_index,
- scratch1(), miss);
- Object* function = isolate()->native_context()->get(function_index);
- Object* prototype = JSFunction::cast(function)->instance_prototype();
- Handle<Map> map(JSObject::cast(prototype)->map());
- set_map(map);
- object_reg = scratch1();
- check_type = CHECK_ALL_MAPS;
+ if (map()->IsPrimitiveMap() || map()->IsJSGlobalProxyMap()) {
+ // If the receiver is a global proxy and if we get to this point then
+ // the compile-time (current) native context has access to global proxy's
+ // native context. Since access rights revocation is not supported at all,
+ // we can generate a check that an execution-time native context is either
+ // the same as compile-time native context or has the same access token.
+ Handle<Context> native_context = isolate()->native_context();
+ Handle<WeakCell> weak_cell(native_context->self_weak_cell(), isolate());
+
+ bool compare_native_contexts_only = map()->IsPrimitiveMap();
+ GenerateAccessCheck(weak_cell, scratch1(), scratch2(), miss,
+ compare_native_contexts_only);
}
// Check that the maps starting from the prototype haven't changed.
return CheckPrototypes(object_reg, scratch1(), scratch2(), scratch3(), name,
- miss, check_type, return_what);
+ miss, return_what);
}
@@ -122,8 +124,14 @@ Register NamedStoreHandlerCompiler::FrontendHeader(Register object_reg,
Handle<Name> name,
Label* miss,
ReturnHolder return_what) {
+ if (map()->IsJSGlobalProxyMap()) {
+ Handle<Context> native_context = isolate()->native_context();
+ Handle<WeakCell> weak_cell(native_context->self_weak_cell(), isolate());
+ GenerateAccessCheck(weak_cell, scratch1(), scratch2(), miss, false);
+ }
+
return CheckPrototypes(object_reg, this->name(), scratch1(), scratch2(), name,
- miss, SKIP_RECEIVER, return_what);
+ miss, return_what);
}
@@ -224,7 +232,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadNonexistent(
Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
Handle<Name> name, Handle<AccessorInfo> callback, Handle<Code> slow_stub) {
- if (FLAG_runtime_call_stats) {
+ if (V8_UNLIKELY(FLAG_runtime_stats)) {
GenerateTailCall(masm(), slow_stub);
}
Register reg = Frontend(name);
@@ -236,7 +244,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
Handle<Name> name, const CallOptimization& call_optimization,
int accessor_index, Handle<Code> slow_stub) {
DCHECK(call_optimization.is_simple_api_call());
- if (FLAG_runtime_call_stats) {
+ if (V8_UNLIKELY(FLAG_runtime_stats)) {
GenerateTailCall(masm(), slow_stub);
}
Register holder = Frontend(name);
@@ -590,7 +598,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Handle<JSObject> object, Handle<Name> name,
const CallOptimization& call_optimization, int accessor_index,
Handle<Code> slow_stub) {
- if (FLAG_runtime_call_stats) {
+ if (V8_UNLIKELY(FLAG_runtime_stats)) {
GenerateTailCall(masm(), slow_stub);
}
Register holder = Frontend(name);
@@ -633,11 +641,9 @@ Handle<Object> ElementHandlerCompiler::GetKeyedLoadHandler(
bool is_js_array = instance_type == JS_ARRAY_TYPE;
if (elements_kind == DICTIONARY_ELEMENTS) {
if (FLAG_tf_load_ic_stub) {
- int config = KeyedLoadElementsKind::encode(elements_kind) |
- KeyedLoadConvertHole::encode(false) |
- KeyedLoadIsJsArray::encode(is_js_array) |
- LoadHandlerTypeBit::encode(kLoadICHandlerForElements);
- return handle(Smi::FromInt(config), isolate);
+ TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadElementDH);
+ return LoadHandler::LoadElement(isolate, elements_kind, false,
+ is_js_array);
}
TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadDictionaryElementStub);
return LoadDictionaryElementStub(isolate).GetCode();
@@ -649,11 +655,9 @@ Handle<Object> ElementHandlerCompiler::GetKeyedLoadHandler(
is_js_array && elements_kind == FAST_HOLEY_ELEMENTS &&
*receiver_map == isolate->get_initial_js_array_map(elements_kind);
if (FLAG_tf_load_ic_stub) {
- int config = KeyedLoadElementsKind::encode(elements_kind) |
- KeyedLoadConvertHole::encode(convert_hole_to_undefined) |
- KeyedLoadIsJsArray::encode(is_js_array) |
- LoadHandlerTypeBit::encode(kLoadICHandlerForElements);
- return handle(Smi::FromInt(config), isolate);
+ TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadElementDH);
+ return LoadHandler::LoadElement(isolate, elements_kind,
+ convert_hole_to_undefined, is_js_array);
} else {
TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadFastElementStub);
return LoadFastElementStub(isolate, is_js_array, elements_kind,
diff --git a/deps/v8/src/ic/handler-compiler.h b/deps/v8/src/ic/handler-compiler.h
index 63ca050ca2..0dec36af2f 100644
--- a/deps/v8/src/ic/handler-compiler.h
+++ b/deps/v8/src/ic/handler-compiler.h
@@ -13,7 +13,6 @@ namespace internal {
class CallOptimization;
-enum PrototypeCheckType { CHECK_ALL_MAPS, SKIP_RECEIVER };
enum ReturnHolder { RETURN_HOLDER, DONT_RETURN_ANYTHING };
class PropertyHandlerCompiler : public PropertyAccessCompiler {
@@ -84,6 +83,18 @@ class PropertyHandlerCompiler : public PropertyAccessCompiler {
Handle<Name> name, Register scratch,
Label* miss);
+ // Generates check that current native context has the same access rights
+ // as the given |native_context_cell|.
+ // If |compare_native_contexts_only| is true then access check is considered
+ // passed if the execution-time native context is equal to contents of
+ // |native_context_cell|.
+ // If |compare_native_contexts_only| is false then access check is considered
+ // passed if the execution-time native context is equal to contents of
+ // |native_context_cell| or security tokens of both contexts are equal.
+ void GenerateAccessCheck(Handle<WeakCell> native_context_cell,
+ Register scratch1, Register scratch2, Label* miss,
+ bool compare_native_contexts_only);
+
// Generates code that verifies that the property holder has not changed
// (checking maps of objects in the prototype chain for fast and global
// objects or doing negative lookup for slow objects, ensures that the
@@ -99,7 +110,7 @@ class PropertyHandlerCompiler : public PropertyAccessCompiler {
Register CheckPrototypes(Register object_reg, Register holder_reg,
Register scratch1, Register scratch2,
Handle<Name> name, Label* miss,
- PrototypeCheckType check, ReturnHolder return_what);
+ ReturnHolder return_what);
Handle<Code> GetCode(Code::Kind kind, Handle<Name> name);
void set_holder(Handle<JSObject> holder) { holder_ = holder; }
diff --git a/deps/v8/src/ic/handler-configuration-inl.h b/deps/v8/src/ic/handler-configuration-inl.h
new file mode 100644
index 0000000000..505d67cf42
--- /dev/null
+++ b/deps/v8/src/ic/handler-configuration-inl.h
@@ -0,0 +1,145 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_IC_HANDLER_CONFIGURATION_INL_H_
+#define V8_IC_HANDLER_CONFIGURATION_INL_H_
+
+#include "src/ic/handler-configuration.h"
+
+#include "src/field-index-inl.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+Handle<Object> LoadHandler::LoadField(Isolate* isolate,
+ FieldIndex field_index) {
+ int config = KindBits::encode(kForFields) |
+ IsInobjectBits::encode(field_index.is_inobject()) |
+ IsDoubleBits::encode(field_index.is_double()) |
+ FieldOffsetBits::encode(field_index.offset());
+ return handle(Smi::FromInt(config), isolate);
+}
+
+Handle<Object> LoadHandler::LoadConstant(Isolate* isolate, int descriptor) {
+ int config = KindBits::encode(kForConstants) |
+ IsAccessorInfoBits::encode(false) |
+ DescriptorValueIndexBits::encode(
+ DescriptorArray::ToValueIndex(descriptor));
+ return handle(Smi::FromInt(config), isolate);
+}
+
+Handle<Object> LoadHandler::LoadApiGetter(Isolate* isolate, int descriptor) {
+ int config = KindBits::encode(kForConstants) |
+ IsAccessorInfoBits::encode(true) |
+ DescriptorValueIndexBits::encode(
+ DescriptorArray::ToValueIndex(descriptor));
+ return handle(Smi::FromInt(config), isolate);
+}
+
+Handle<Object> LoadHandler::EnableAccessCheckOnReceiver(
+ Isolate* isolate, Handle<Object> smi_handler) {
+ int config = Smi::cast(*smi_handler)->value();
+#ifdef DEBUG
+ Kind kind = KindBits::decode(config);
+ DCHECK_NE(kForElements, kind);
+#endif
+ config = DoAccessCheckOnReceiverBits::update(config, true);
+ return handle(Smi::FromInt(config), isolate);
+}
+
+Handle<Object> LoadHandler::EnableNegativeLookupOnReceiver(
+ Isolate* isolate, Handle<Object> smi_handler) {
+ int config = Smi::cast(*smi_handler)->value();
+#ifdef DEBUG
+ Kind kind = KindBits::decode(config);
+ DCHECK_NE(kForElements, kind);
+#endif
+ config = DoNegativeLookupOnReceiverBits::update(config, true);
+ return handle(Smi::FromInt(config), isolate);
+}
+
+Handle<Object> LoadHandler::LoadNonExistent(
+ Isolate* isolate, bool do_negative_lookup_on_receiver) {
+ int config =
+ KindBits::encode(kForNonExistent) |
+ DoNegativeLookupOnReceiverBits::encode(do_negative_lookup_on_receiver);
+ return handle(Smi::FromInt(config), isolate);
+}
+
+Handle<Object> LoadHandler::LoadElement(Isolate* isolate,
+ ElementsKind elements_kind,
+ bool convert_hole_to_undefined,
+ bool is_js_array) {
+ int config = KindBits::encode(kForElements) |
+ ElementsKindBits::encode(elements_kind) |
+ ConvertHoleBits::encode(convert_hole_to_undefined) |
+ IsJsArrayBits::encode(is_js_array);
+ return handle(Smi::FromInt(config), isolate);
+}
+
+Handle<Object> StoreHandler::StoreField(Isolate* isolate, Kind kind,
+ int descriptor, FieldIndex field_index,
+ Representation representation,
+ bool extend_storage) {
+ StoreHandler::FieldRepresentation field_rep;
+ switch (representation.kind()) {
+ case Representation::kSmi:
+ field_rep = StoreHandler::kSmi;
+ break;
+ case Representation::kDouble:
+ field_rep = StoreHandler::kDouble;
+ break;
+ case Representation::kHeapObject:
+ field_rep = StoreHandler::kHeapObject;
+ break;
+ case Representation::kTagged:
+ field_rep = StoreHandler::kTagged;
+ break;
+ default:
+ UNREACHABLE();
+ return Handle<Object>::null();
+ }
+ int value_index = DescriptorArray::ToValueIndex(descriptor);
+
+ DCHECK(kind == kStoreField || kind == kTransitionToField);
+ DCHECK_IMPLIES(kind == kStoreField, !extend_storage);
+
+ int config = StoreHandler::KindBits::encode(kind) |
+ StoreHandler::ExtendStorageBits::encode(extend_storage) |
+ StoreHandler::IsInobjectBits::encode(field_index.is_inobject()) |
+ StoreHandler::FieldRepresentationBits::encode(field_rep) |
+ StoreHandler::DescriptorValueIndexBits::encode(value_index) |
+ StoreHandler::FieldOffsetBits::encode(field_index.offset());
+ return handle(Smi::FromInt(config), isolate);
+}
+
+Handle<Object> StoreHandler::StoreField(Isolate* isolate, int descriptor,
+ FieldIndex field_index,
+ Representation representation) {
+ return StoreField(isolate, kStoreField, descriptor, field_index,
+ representation, false);
+}
+
+Handle<Object> StoreHandler::TransitionToField(Isolate* isolate, int descriptor,
+ FieldIndex field_index,
+ Representation representation,
+ bool extend_storage) {
+ return StoreField(isolate, kTransitionToField, descriptor, field_index,
+ representation, extend_storage);
+}
+
+Handle<Object> StoreHandler::TransitionToConstant(Isolate* isolate,
+ int descriptor) {
+ int value_index = DescriptorArray::ToValueIndex(descriptor);
+ int config =
+ StoreHandler::KindBits::encode(StoreHandler::kTransitionToConstant) |
+ StoreHandler::DescriptorValueIndexBits::encode(value_index);
+ return handle(Smi::FromInt(config), isolate);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_IC_HANDLER_CONFIGURATION_INL_H_
diff --git a/deps/v8/src/ic/handler-configuration.h b/deps/v8/src/ic/handler-configuration.h
index bf7c4770b9..a5291736dc 100644
--- a/deps/v8/src/ic/handler-configuration.h
+++ b/deps/v8/src/ic/handler-configuration.h
@@ -6,38 +6,196 @@
#define V8_IC_HANDLER_CONFIGURATION_H_
#include "src/elements-kind.h"
+#include "src/field-index.h"
#include "src/globals.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
-enum LoadHandlerType {
- kLoadICHandlerForElements = 0,
- kLoadICHandlerForProperties = 1
-};
+// A set of bit fields representing Smi handlers for loads.
+class LoadHandler {
+ public:
+ enum Kind { kForElements, kForFields, kForConstants, kForNonExistent };
+ class KindBits : public BitField<Kind, 0, 2> {};
+
+ // Defines whether access rights check should be done on receiver object.
+ // Applicable to kForFields, kForConstants and kForNonExistent kinds only when
+ // loading value from prototype chain. Ignored when loading from holder.
+ class DoAccessCheckOnReceiverBits
+ : public BitField<bool, KindBits::kNext, 1> {};
+
+ // Defines whether negative lookup check should be done on receiver object.
+ // Applicable to kForFields, kForConstants and kForNonExistent kinds only when
+ // loading value from prototype chain. Ignored when loading from holder.
+ class DoNegativeLookupOnReceiverBits
+ : public BitField<bool, DoAccessCheckOnReceiverBits::kNext, 1> {};
+
+ //
+ // Encoding when KindBits contains kForConstants.
+ //
+
+ class IsAccessorInfoBits
+ : public BitField<bool, DoNegativeLookupOnReceiverBits::kNext, 1> {};
+ // Index of a value entry in the descriptor array.
+ // +2 here is because each descriptor entry occupies 3 slots in array.
+ class DescriptorValueIndexBits
+ : public BitField<unsigned, IsAccessorInfoBits::kNext,
+ kDescriptorIndexBitCount + 2> {};
+ // Make sure we don't overflow the smi.
+ STATIC_ASSERT(DescriptorValueIndexBits::kNext <= kSmiValueSize);
+
+ //
+ // Encoding when KindBits contains kForFields.
+ //
+ class IsInobjectBits
+ : public BitField<bool, DoNegativeLookupOnReceiverBits::kNext, 1> {};
+ class IsDoubleBits : public BitField<bool, IsInobjectBits::kNext, 1> {};
+ // +1 here is to cover all possible JSObject header sizes.
+ class FieldOffsetBits
+ : public BitField<unsigned, IsDoubleBits::kNext,
+ kDescriptorIndexBitCount + 1 + kPointerSizeLog2> {};
+ // Make sure we don't overflow the smi.
+ STATIC_ASSERT(FieldOffsetBits::kNext <= kSmiValueSize);
+
+ //
+ // Encoding when KindBits contains kForElements.
+ //
+ class IsJsArrayBits : public BitField<bool, KindBits::kNext, 1> {};
+ class ConvertHoleBits : public BitField<bool, IsJsArrayBits::kNext, 1> {};
+ class ElementsKindBits
+ : public BitField<ElementsKind, ConvertHoleBits::kNext, 8> {};
+ // Make sure we don't overflow the smi.
+ STATIC_ASSERT(ElementsKindBits::kNext <= kSmiValueSize);
+
+ // The layout of an Tuple3 handler representing a load of a field from
+ // prototype when prototype chain checks do not include non-existing lookups
+ // or access checks.
+ static const int kHolderCellOffset = Tuple3::kValue1Offset;
+ static const int kSmiHandlerOffset = Tuple3::kValue2Offset;
+ static const int kValidityCellOffset = Tuple3::kValue3Offset;
+
+ // The layout of an array handler representing a load of a field from
+ // prototype when prototype chain checks include non-existing lookups and
+ // access checks.
+ static const int kSmiHandlerIndex = 0;
+ static const int kValidityCellIndex = 1;
+ static const int kHolderCellIndex = 2;
+ static const int kFirstPrototypeIndex = 3;
+
+ // Creates a Smi-handler for loading a field from fast object.
+ static inline Handle<Object> LoadField(Isolate* isolate,
+ FieldIndex field_index);
+
+ // Creates a Smi-handler for loading a constant from fast object.
+ static inline Handle<Object> LoadConstant(Isolate* isolate, int descriptor);
+
+ // Creates a Smi-handler for loading an Api getter property from fast object.
+ static inline Handle<Object> LoadApiGetter(Isolate* isolate, int descriptor);
+
+ // Sets DoAccessCheckOnReceiverBits in given Smi-handler. The receiver
+ // check is a part of a prototype chain check.
+ static inline Handle<Object> EnableAccessCheckOnReceiver(
+ Isolate* isolate, Handle<Object> smi_handler);
-class LoadHandlerTypeBit : public BitField<bool, 0, 1> {};
+ // Sets DoNegativeLookupOnReceiverBits in given Smi-handler. The receiver
+ // check is a part of a prototype chain check.
+ static inline Handle<Object> EnableNegativeLookupOnReceiver(
+ Isolate* isolate, Handle<Object> smi_handler);
-// Encoding for configuration Smis for property loads:
-class FieldOffsetIsInobject
- : public BitField<bool, LoadHandlerTypeBit::kNext, 1> {};
-class FieldOffsetIsDouble
- : public BitField<bool, FieldOffsetIsInobject::kNext, 1> {};
-class FieldOffsetOffset : public BitField<int, FieldOffsetIsDouble::kNext, 27> {
+ // Creates a Smi-handler for loading a non-existent property. Works only as
+ // a part of prototype chain check.
+ static inline Handle<Object> LoadNonExistent(
+ Isolate* isolate, bool do_negative_lookup_on_receiver);
+
+ // Creates a Smi-handler for loading an element.
+ static inline Handle<Object> LoadElement(Isolate* isolate,
+ ElementsKind elements_kind,
+ bool convert_hole_to_undefined,
+ bool is_js_array);
};
-// Make sure we don't overflow into the sign bit.
-STATIC_ASSERT(FieldOffsetOffset::kNext <= kSmiValueSize - 1);
-// Encoding for configuration Smis for elements loads:
-class KeyedLoadIsJsArray : public BitField<bool, LoadHandlerTypeBit::kNext, 1> {
+// A set of bit fields representing Smi handlers for stores.
+class StoreHandler {
+ public:
+ enum Kind {
+ kStoreElement,
+ kStoreField,
+ kTransitionToField,
+ kTransitionToConstant
+ };
+ class KindBits : public BitField<Kind, 0, 2> {};
+
+ enum FieldRepresentation { kSmi, kDouble, kHeapObject, kTagged };
+
+ // Applicable to kStoreField, kTransitionToField and kTransitionToConstant
+ // kinds.
+
+ // Index of a value entry in the descriptor array.
+ // +2 here is because each descriptor entry occupies 3 slots in array.
+ class DescriptorValueIndexBits
+ : public BitField<unsigned, KindBits::kNext,
+ kDescriptorIndexBitCount + 2> {};
+ //
+ // Encoding when KindBits contains kTransitionToConstant.
+ //
+
+ // Make sure we don't overflow the smi.
+ STATIC_ASSERT(DescriptorValueIndexBits::kNext <= kSmiValueSize);
+
+ //
+ // Encoding when KindBits contains kStoreField or kTransitionToField.
+ //
+ class ExtendStorageBits
+ : public BitField<bool, DescriptorValueIndexBits::kNext, 1> {};
+ class IsInobjectBits : public BitField<bool, ExtendStorageBits::kNext, 1> {};
+ class FieldRepresentationBits
+ : public BitField<FieldRepresentation, IsInobjectBits::kNext, 2> {};
+ // +1 here is to cover all possible JSObject header sizes.
+ class FieldOffsetBits
+ : public BitField<unsigned, FieldRepresentationBits::kNext,
+ kDescriptorIndexBitCount + 1 + kPointerSizeLog2> {};
+ // Make sure we don't overflow the smi.
+ STATIC_ASSERT(FieldOffsetBits::kNext <= kSmiValueSize);
+
+ // The layout of an Tuple3 handler representing a transitioning store
+ // when prototype chain checks do not include non-existing lookups or access
+ // checks.
+ static const int kTransitionCellOffset = Tuple3::kValue1Offset;
+ static const int kSmiHandlerOffset = Tuple3::kValue2Offset;
+ static const int kValidityCellOffset = Tuple3::kValue3Offset;
+
+ // The layout of an array handler representing a transitioning store
+ // when prototype chain checks include non-existing lookups and access checks.
+ static const int kSmiHandlerIndex = 0;
+ static const int kValidityCellIndex = 1;
+ static const int kTransitionCellIndex = 2;
+ static const int kFirstPrototypeIndex = 3;
+
+ // Creates a Smi-handler for storing a field to fast object.
+ static inline Handle<Object> StoreField(Isolate* isolate, int descriptor,
+ FieldIndex field_index,
+ Representation representation);
+
+ // Creates a Smi-handler for transitioning store to a field.
+ static inline Handle<Object> TransitionToField(Isolate* isolate,
+ int descriptor,
+ FieldIndex field_index,
+ Representation representation,
+ bool extend_storage);
+
+ // Creates a Smi-handler for transitioning store to a constant field (in this
+ // case the only thing that needs to be done is an update of a map).
+ static inline Handle<Object> TransitionToConstant(Isolate* isolate,
+ int descriptor);
+
+ private:
+ static inline Handle<Object> StoreField(Isolate* isolate, Kind kind,
+ int descriptor,
+ FieldIndex field_index,
+ Representation representation,
+ bool extend_storage);
};
-class KeyedLoadConvertHole
- : public BitField<bool, KeyedLoadIsJsArray::kNext, 1> {};
-class KeyedLoadElementsKind
- : public BitField<ElementsKind, KeyedLoadConvertHole::kNext, 8> {};
-// Make sure we don't overflow into the sign bit.
-STATIC_ASSERT(KeyedLoadElementsKind::kNext <= kSmiValueSize - 1);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ic/ia32/access-compiler-ia32.cc b/deps/v8/src/ic/ia32/access-compiler-ia32.cc
index 3219f3d1cb..411c744659 100644
--- a/deps/v8/src/ic/ia32/access-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/access-compiler-ia32.cc
@@ -16,22 +16,21 @@ void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
__ jmp(code, RelocInfo::CODE_TARGET);
}
-
-Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
+void PropertyAccessCompiler::InitializePlatformSpecific(
+ AccessCompilerData* data) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- static Register registers[] = {receiver, name, ebx, eax, edi};
- return registers;
-}
+ // Load calling convention.
+ // receiver, name, scratch1, scratch2, scratch3.
+ Register load_registers[] = {receiver, name, ebx, eax, edi};
-Register* PropertyAccessCompiler::store_calling_convention() {
+ // Store calling convention.
// receiver, name, scratch1, scratch2.
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- static Register registers[] = {receiver, name, ebx, edi};
- return registers;
+ Register store_registers[] = {receiver, name, ebx, edi};
+
+ data->Initialize(arraysize(load_registers), load_registers,
+ arraysize(store_registers), store_registers);
}
#undef __
diff --git a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
index 06c58b8aae..68fd1b9d98 100644
--- a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
@@ -411,10 +411,32 @@ void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
}
}
+void PropertyHandlerCompiler::GenerateAccessCheck(
+ Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
+ Label* miss, bool compare_native_contexts_only) {
+ Label done;
+ // Load current native context.
+ __ mov(scratch1, NativeContextOperand());
+ // Load expected native context.
+ __ LoadWeakValue(scratch2, native_context_cell, miss);
+ __ cmp(scratch1, scratch2);
+
+ if (!compare_native_contexts_only) {
+ __ j(equal, &done);
+
+ // Compare security tokens of current and expected native contexts.
+ __ mov(scratch1, ContextOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
+ __ mov(scratch2, ContextOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
+ __ cmp(scratch1, scratch2);
+ }
+ __ j(not_equal, miss);
+
+ __ bind(&done);
+}
Register PropertyHandlerCompiler::CheckPrototypes(
Register object_reg, Register holder_reg, Register scratch1,
- Register scratch2, Handle<Name> name, Label* miss, PrototypeCheckType check,
+ Register scratch2, Handle<Name> name, Label* miss,
ReturnHolder return_what) {
Handle<Map> receiver_map = map();
@@ -433,17 +455,6 @@ Register PropertyHandlerCompiler::CheckPrototypes(
__ j(not_equal, miss);
}
- // The prototype chain of primitives (and their JSValue wrappers) depends
- // on the native context, which can't be guarded by validity cells.
- // |object_reg| holds the native context specific prototype in this case;
- // we need to check its map.
- if (check == CHECK_ALL_MAPS) {
- __ mov(scratch1, FieldOperand(object_reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
- __ CmpWeakValue(scratch1, cell, scratch2);
- __ j(not_equal, miss);
- }
-
// Keep track of the current object in register reg.
Register reg = object_reg;
int depth = 0;
@@ -453,46 +464,28 @@ Register PropertyHandlerCompiler::CheckPrototypes(
current = isolate()->global_object();
}
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- // This allows us to install generated handlers for accesses to the
- // global proxy (as opposed to using slow ICs). See corresponding code
- // in LookupForRead().
- if (receiver_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
- }
-
- Handle<JSObject> prototype = Handle<JSObject>::null();
- Handle<Map> current_map = receiver_map;
+ Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
+ isolate());
Handle<Map> holder_map(holder()->map());
// Traverse the prototype chain and check the maps in the prototype chain for
// fast and global objects or do negative lookup for normal objects.
while (!current_map.is_identical_to(holder_map)) {
++depth;
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
-
- prototype = handle(JSObject::cast(current_map->prototype()));
if (current_map->IsJSGlobalObjectMap()) {
GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
name, scratch2, miss);
} else if (current_map->is_dictionary_map()) {
DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- if (!name->IsUniqueName()) {
- DCHECK(name->IsString());
- name = factory()->InternalizeString(Handle<String>::cast(name));
- }
+ DCHECK(name->IsUniqueName());
DCHECK(current.is_null() ||
current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
if (depth > 1) {
- // TODO(jkummerow): Cache and re-use weak cell.
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
scratch2);
@@ -500,7 +493,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
reg = holder_reg; // From now on the object will be in holder_reg.
// Go to the next object in the prototype chain.
- current = prototype;
+ current = handle(JSObject::cast(current_map->prototype()));
current_map = handle(current->map());
}
@@ -511,7 +504,9 @@ Register PropertyHandlerCompiler::CheckPrototypes(
bool return_holder = return_what == RETURN_HOLDER;
if (return_holder && depth != 0) {
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
// Return the register containing the holder.
diff --git a/deps/v8/src/ic/ia32/ic-ia32.cc b/deps/v8/src/ic/ia32/ic-ia32.cc
index b7496d4624..44a5b9f531 100644
--- a/deps/v8/src/ic/ia32/ic-ia32.cc
+++ b/deps/v8/src/ic/ia32/ic-ia32.cc
@@ -18,18 +18,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
- Label* global_object) {
- // Register usage:
- // type: holds the receiver instance type on entry.
- __ cmp(type, JS_GLOBAL_OBJECT_TYPE);
- __ j(equal, global_object);
- __ cmp(type, JS_GLOBAL_PROXY_TYPE);
- __ j(equal, global_object);
-}
-
-
// Helper function used to load a property from a dictionary backing
// storage. This function may fail to load a property even though it is
// in the dictionary, so code at miss_label must always call a backup
@@ -132,238 +120,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label,
__ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
}
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver, Register map,
- int interceptor_bit, Label* slow) {
- // Register use:
- // receiver - holds the receiver and is unchanged.
- // Scratch registers:
- // map - used to hold the map of the receiver.
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, slow);
-
- // Get the map of the receiver.
- __ mov(map, FieldOperand(receiver, HeapObject::kMapOffset));
-
- // Check bit field.
- __ test_b(
- FieldOperand(map, Map::kBitFieldOffset),
- Immediate((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
- __ j(not_zero, slow);
- // Check that the object is some kind of JS object EXCEPT JS Value type. In
- // the case that the object is a value-wrapper object, we enter the runtime
- // system to make sure that indexing into string objects works as intended.
- DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
-
- __ CmpInstanceType(map, JS_OBJECT_TYPE);
- __ j(below, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
- Register key, Register scratch,
- Register scratch2, Register result,
- Label* slow) {
- // Register use:
- // receiver - holds the receiver and is unchanged.
- // key - holds the key and is unchanged (must be a smi).
- // Scratch registers:
- // scratch - used to hold elements of the receiver and the loaded value.
- // scratch2 - holds maps and prototypes during prototype chain check.
- // result - holds the result on exit if the load succeeds and
- // we fall through.
- Label check_prototypes, check_next_prototype;
- Label done, in_bounds, absent;
-
- __ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset));
- __ AssertFastElements(scratch);
-
- // Check that the key (index) is within bounds.
- __ cmp(key, FieldOperand(scratch, FixedArray::kLengthOffset));
- __ j(below, &in_bounds);
- // Out-of-bounds. Check the prototype chain to see if we can just return
- // 'undefined'.
- __ cmp(key, 0);
- __ j(less, slow); // Negative keys can't take the fast OOB path.
- __ bind(&check_prototypes);
- __ mov(scratch2, FieldOperand(receiver, HeapObject::kMapOffset));
- __ bind(&check_next_prototype);
- __ mov(scratch2, FieldOperand(scratch2, Map::kPrototypeOffset));
- // scratch2: current prototype
- __ cmp(scratch2, masm->isolate()->factory()->null_value());
- __ j(equal, &absent);
- __ mov(scratch, FieldOperand(scratch2, JSObject::kElementsOffset));
- __ mov(scratch2, FieldOperand(scratch2, HeapObject::kMapOffset));
- // scratch: elements of current prototype
- // scratch2: map of current prototype
- __ CmpInstanceType(scratch2, JS_OBJECT_TYPE);
- __ j(below, slow);
- __ test_b(FieldOperand(scratch2, Map::kBitFieldOffset),
- Immediate((1 << Map::kIsAccessCheckNeeded) |
- (1 << Map::kHasIndexedInterceptor)));
- __ j(not_zero, slow);
- __ cmp(scratch, masm->isolate()->factory()->empty_fixed_array());
- __ j(not_equal, slow);
- __ jmp(&check_next_prototype);
-
- __ bind(&absent);
- __ mov(result, masm->isolate()->factory()->undefined_value());
- __ jmp(&done);
-
- __ bind(&in_bounds);
- // Fast case: Do the load.
- STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
- __ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
- __ cmp(scratch, Immediate(masm->isolate()->factory()->the_hole_value()));
- // In case the loaded value is the_hole we have to check the prototype chain.
- __ j(equal, &check_prototypes);
- __ Move(result, scratch);
- __ bind(&done);
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if the key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
- Register map, Register hash,
- Label* index_string, Label* not_unique) {
- // Register use:
- // key - holds the key and is unchanged. Assumed to be non-smi.
- // Scratch registers:
- // map - used to hold the map of the key.
- // hash - used to hold the hash of the key.
- Label unique;
- __ CmpObjectType(key, LAST_UNIQUE_NAME_TYPE, map);
- __ j(above, not_unique);
- STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
- __ j(equal, &unique);
-
- // Is the string an array index, with cached numeric value?
- __ mov(hash, FieldOperand(key, Name::kHashFieldOffset));
- __ test(hash, Immediate(Name::kContainsCachedArrayIndexMask));
- __ j(zero, index_string);
-
- // Is the string internalized? We already know it's a string so a single
- // bit test is enough.
- STATIC_ASSERT(kNotInternalizedTag != 0);
- __ test_b(FieldOperand(map, Map::kInstanceTypeOffset),
- Immediate(kIsNotInternalizedMask));
- __ j(not_zero, not_unique);
-
- __ bind(&unique);
-}
-
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // The return address is on the stack.
- Label slow, check_name, index_smi, index_name, property_array_property;
- Label probe_dictionary, check_number_dictionary;
-
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
- DCHECK(receiver.is(edx));
- DCHECK(key.is(ecx));
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &check_name);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, eax,
- Map::kHasIndexedInterceptor, &slow);
-
- // Check the receiver's map to see if it has fast elements.
- __ CheckFastElements(eax, &check_number_dictionary);
-
- GenerateFastArrayLoad(masm, receiver, key, eax, ebx, eax, &slow);
- Isolate* isolate = masm->isolate();
- Counters* counters = isolate->counters();
- __ IncrementCounter(counters->ic_keyed_load_generic_smi(), 1);
- __ ret(0);
-
- __ bind(&check_number_dictionary);
- __ mov(ebx, key);
- __ SmiUntag(ebx);
- __ mov(eax, FieldOperand(receiver, JSObject::kElementsOffset));
-
- // Check whether the elements is a number dictionary.
- // ebx: untagged index
- // eax: elements
- __ CheckMap(eax, isolate->factory()->hash_table_map(), &slow,
- DONT_DO_SMI_CHECK);
- Label slow_pop_receiver;
- // Push receiver on the stack to free up a register for the dictionary
- // probing.
- __ push(receiver);
- __ LoadFromNumberDictionary(&slow_pop_receiver, eax, key, ebx, edx, edi, eax);
- // Pop receiver before returning.
- __ pop(receiver);
- __ ret(0);
-
- __ bind(&slow_pop_receiver);
- // Pop the receiver from the stack and jump to runtime.
- __ pop(receiver);
-
- __ bind(&slow);
- // Slow case: jump to runtime.
- __ IncrementCounter(counters->ic_keyed_load_generic_slow(), 1);
- GenerateRuntimeGetProperty(masm);
-
- __ bind(&check_name);
- GenerateKeyNameCheck(masm, key, eax, ebx, &index_name, &slow);
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, eax, Map::kHasNamedInterceptor,
- &slow);
-
- // If the receiver is a fast-case object, check the stub cache. Otherwise
- // probe the dictionary.
- __ mov(ebx, FieldOperand(receiver, JSObject::kPropertiesOffset));
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(isolate->factory()->hash_table_map()));
- __ j(equal, &probe_dictionary);
-
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(isolate);
- int slot = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
- __ push(Immediate(Smi::FromInt(slot)));
- __ push(Immediate(dummy_vector));
-
- masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, ebx,
- edi);
-
- __ pop(LoadWithVectorDescriptor::VectorRegister());
- __ pop(LoadDescriptor::SlotRegister());
-
- // Cache miss.
- GenerateMiss(masm);
-
- // Do a quick inline probe of the receiver's dictionary, if it
- // exists.
- __ bind(&probe_dictionary);
-
- __ mov(eax, FieldOperand(receiver, JSObject::kMapOffset));
- __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, eax, &slow);
-
- GenerateDictionaryLoad(masm, &slow, ebx, key, eax, edi, eax);
- __ IncrementCounter(counters->ic_keyed_load_generic_symbol(), 1);
- __ ret(0);
-
- __ bind(&index_name);
- __ IndexFromHash(ebx, key);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
static void KeyedStoreGenerateMegamorphicHelper(
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
diff --git a/deps/v8/src/ic/ic-compiler.cc b/deps/v8/src/ic/ic-compiler.cc
index 2f0633e0d8..750c88daa9 100644
--- a/deps/v8/src/ic/ic-compiler.cc
+++ b/deps/v8/src/ic/ic-compiler.cc
@@ -56,9 +56,11 @@ void PropertyICCompiler::CompileKeyedStorePolymorphicHandlers(
// Tracking to do a better job of ensuring the data types are what they need
// to be. Not all the elements are in place yet, pessimistic elements
// transitions are still important for performance.
- bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- ElementsKind elements_kind = receiver_map->elements_kind();
if (!transitioned_map.is_null()) {
+ bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+ ElementsKind elements_kind = receiver_map->elements_kind();
+ TRACE_HANDLER_STATS(isolate(),
+ KeyedStoreIC_ElementsTransitionAndStoreStub);
cached_stub =
ElementsTransitionAndStoreStub(isolate(), elements_kind,
transitioned_map->elements_kind(),
@@ -66,19 +68,11 @@ void PropertyICCompiler::CompileKeyedStorePolymorphicHandlers(
} else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) {
// TODO(mvstanton): Consider embedding store_mode in the state of the slow
// keyed store ic for uniformity.
+ TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_SlowStub);
cached_stub = isolate()->builtins()->KeyedStoreIC_Slow();
} else {
- if (IsSloppyArgumentsElements(elements_kind)) {
- cached_stub =
- KeyedStoreSloppyArgumentsStub(isolate(), store_mode).GetCode();
- } else if (receiver_map->has_fast_elements() ||
- receiver_map->has_fixed_typed_array_elements()) {
- cached_stub = StoreFastElementStub(isolate(), is_js_array,
- elements_kind, store_mode).GetCode();
- } else {
- cached_stub =
- StoreElementStub(isolate(), elements_kind, store_mode).GetCode();
- }
+ cached_stub =
+ CompileKeyedStoreMonomorphicHandler(receiver_map, store_mode);
}
DCHECK(!cached_stub.is_null());
handlers->Add(cached_stub);
diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index 4fc8ada8df..1b5d063270 100644
--- a/deps/v8/src/ic/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -92,6 +92,12 @@ Code* IC::target() const {
return GetTargetAtAddress(address(), constant_pool());
}
+bool IC::IsHandler(Object* object) {
+ return (object->IsSmi() && (object != nullptr)) || object->IsTuple3() ||
+ object->IsFixedArray() ||
+ (object->IsCode() && Code::cast(object)->is_handler());
+}
+
Handle<Map> IC::GetHandlerCacheHolder(Handle<Map> receiver_map,
bool receiver_is_holder, Isolate* isolate,
CacheHolderFlag* flag) {
diff --git a/deps/v8/src/ic/ic-state.cc b/deps/v8/src/ic/ic-state.cc
index ea1f16c824..f94803681b 100644
--- a/deps/v8/src/ic/ic-state.cc
+++ b/deps/v8/src/ic/ic-state.cc
@@ -17,7 +17,7 @@ void ICUtility::Clear(Isolate* isolate, Address address,
std::ostream& operator<<(std::ostream& os, const CallICState& s) {
- return os << "(args(" << s.argc() << "), " << s.convert_mode() << ", ";
+ return os << "(" << s.convert_mode() << ", " << s.tail_call_mode() << ")";
}
@@ -256,10 +256,10 @@ void BinaryOpICState::Update(Handle<Object> left, Handle<Object> right,
if (old_extra_ic_state == GetExtraICState()) {
// Tagged operations can lead to non-truncating HChanges
- if (left->IsUndefined(isolate_) || left->IsBoolean()) {
+ if (left->IsOddball()) {
left_kind_ = GENERIC;
} else {
- DCHECK(right->IsUndefined(isolate_) || right->IsBoolean());
+ DCHECK(right->IsOddball());
right_kind_ = GENERIC;
}
}
@@ -270,8 +270,8 @@ BinaryOpICState::Kind BinaryOpICState::UpdateKind(Handle<Object> object,
Kind kind) const {
Kind new_kind = GENERIC;
bool is_truncating = Token::IsTruncatingBinaryOp(op());
- if (object->IsBoolean() && is_truncating) {
- // Booleans will be automatically truncated by HChange.
+ if (object->IsOddball() && is_truncating) {
+ // Oddballs will be automatically truncated by HChange.
new_kind = INT32;
} else if (object->IsUndefined(isolate_)) {
// Undefined will be automatically truncated by HChange.
diff --git a/deps/v8/src/ic/ic-state.h b/deps/v8/src/ic/ic-state.h
index 38be57ac04..1ba37b99db 100644
--- a/deps/v8/src/ic/ic-state.h
+++ b/deps/v8/src/ic/ic-state.h
@@ -26,10 +26,8 @@ class CallICState final BASE_EMBEDDED {
public:
explicit CallICState(ExtraICState extra_ic_state)
: bit_field_(extra_ic_state) {}
- CallICState(int argc, ConvertReceiverMode convert_mode,
- TailCallMode tail_call_mode)
- : bit_field_(ArgcBits::encode(argc) |
- ConvertModeBits::encode(convert_mode) |
+ CallICState(ConvertReceiverMode convert_mode, TailCallMode tail_call_mode)
+ : bit_field_(ConvertModeBits::encode(convert_mode) |
TailCallModeBits::encode(tail_call_mode)) {}
ExtraICState GetExtraICState() const { return bit_field_; }
@@ -38,7 +36,6 @@ class CallICState final BASE_EMBEDDED {
void (*Generate)(Isolate*,
const CallICState&));
- int argc() const { return ArgcBits::decode(bit_field_); }
ConvertReceiverMode convert_mode() const {
return ConvertModeBits::decode(bit_field_);
}
@@ -47,8 +44,7 @@ class CallICState final BASE_EMBEDDED {
}
private:
- typedef BitField<int, 0, Code::kArgumentsBits> ArgcBits;
- typedef BitField<ConvertReceiverMode, ArgcBits::kNext, 2> ConvertModeBits;
+ typedef BitField<ConvertReceiverMode, 0, 2> ConvertModeBits;
typedef BitField<TailCallMode, ConvertModeBits::kNext, 1> TailCallModeBits;
int const bit_field_;
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index 0e751bd358..7e0cefdca9 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -4,6 +4,8 @@
#include "src/ic/ic.h"
+#include <iostream>
+
#include "src/accessors.h"
#include "src/api-arguments-inl.h"
#include "src/api.h"
@@ -16,6 +18,7 @@
#include "src/frames-inl.h"
#include "src/ic/call-optimization.h"
#include "src/ic/handler-compiler.h"
+#include "src/ic/handler-configuration-inl.h"
#include "src/ic/ic-compiler.h"
#include "src/ic/ic-inl.h"
#include "src/ic/stub-cache.h"
@@ -98,38 +101,51 @@ void IC::TraceIC(const char* type, Handle<Object> name) {
void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
State new_state) {
- if (FLAG_trace_ic) {
- PrintF("[%s%s in ", is_keyed() ? "Keyed" : "", type);
-
- // TODO(jkummerow): Add support for "apply". The logic is roughly:
- // marker = [fp_ + kMarkerOffset];
- // if marker is smi and marker.value == INTERNAL and
- // the frame's code == builtin(Builtins::kFunctionApply):
- // then print "apply from" and advance one frame
-
- Object* maybe_function =
- Memory::Object_at(fp_ + JavaScriptFrameConstants::kFunctionOffset);
- if (maybe_function->IsJSFunction()) {
- JSFunction* function = JSFunction::cast(maybe_function);
- JavaScriptFrame::PrintFunctionAndOffset(function, function->code(), pc(),
- stdout, true);
+ if (!FLAG_trace_ic) return;
+ PrintF("[%s%s in ", is_keyed() ? "Keyed" : "", type);
+
+ // TODO(jkummerow): Add support for "apply". The logic is roughly:
+ // marker = [fp_ + kMarkerOffset];
+ // if marker is smi and marker.value == INTERNAL and
+ // the frame's code == builtin(Builtins::kFunctionApply):
+ // then print "apply from" and advance one frame
+
+ Object* maybe_function =
+ Memory::Object_at(fp_ + JavaScriptFrameConstants::kFunctionOffset);
+ if (maybe_function->IsJSFunction()) {
+ JSFunction* function = JSFunction::cast(maybe_function);
+ int code_offset = 0;
+ if (function->IsInterpreted()) {
+ code_offset = InterpretedFrame::GetBytecodeOffset(fp());
+ } else {
+ code_offset =
+ static_cast<int>(pc() - function->code()->instruction_start());
}
+ JavaScriptFrame::PrintFunctionAndOffset(function, function->abstract_code(),
+ code_offset, stdout, true);
+ }
- const char* modifier = "";
- if (kind() == Code::KEYED_STORE_IC) {
- KeyedAccessStoreMode mode =
- casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode();
- modifier = GetTransitionMarkModifier(mode);
- }
- void* map = nullptr;
- if (!receiver_map().is_null()) {
- map = reinterpret_cast<void*>(*receiver_map());
- }
- PrintF(" (%c->%c%s) map=%p ", TransitionMarkFromState(old_state),
- TransitionMarkFromState(new_state), modifier, map);
- name->ShortPrint(stdout);
- PrintF("]\n");
+ const char* modifier = "";
+ if (kind() == Code::KEYED_STORE_IC) {
+ KeyedAccessStoreMode mode =
+ casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode();
+ modifier = GetTransitionMarkModifier(mode);
}
+ Map* map = nullptr;
+ if (!receiver_map().is_null()) {
+ map = *receiver_map();
+ }
+ PrintF(" (%c->%c%s) map=(%p", TransitionMarkFromState(old_state),
+ TransitionMarkFromState(new_state), modifier,
+ reinterpret_cast<void*>(map));
+ if (map != nullptr) {
+ PrintF(" dict=%u own=%u type=", map->is_dictionary_map(),
+ map->NumberOfOwnDescriptors());
+ std::cout << map->instance_type();
+ }
+ PrintF(") ");
+ name->ShortPrint(stdout);
+ PrintF("]\n");
}
@@ -171,6 +187,16 @@ IC::IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus)
StackFrame* frame = it.frame();
DCHECK(fp == frame->fp() && pc_address == frame->pc_address());
#endif
+ // For interpreted functions, some bytecode handlers construct a
+ // frame. We have to skip the constructed frame to find the interpreted
+ // function's frame. Check if the there is an additional frame, and if there
+ // is skip this frame. However, the pc should not be updated. The call to
+ // ICs happen from bytecode handlers.
+ Object* frame_type =
+ Memory::Object_at(fp + TypedFrameConstants::kFrameTypeOffset);
+ if (frame_type == Smi::FromInt(StackFrame::STUB)) {
+ fp = Memory::Address_at(fp + TypedFrameConstants::kCallerFPOffset);
+ }
fp_ = fp;
if (FLAG_enable_embedded_constant_pool) {
constant_pool_address_ = constant_pool;
@@ -224,11 +250,6 @@ SharedFunctionInfo* IC::GetSharedFunctionInfo() const {
// corresponding to the frame.
StackFrameIterator it(isolate());
while (it.frame()->fp() != this->fp()) it.Advance();
- if (FLAG_ignition && it.frame()->type() == StackFrame::STUB) {
- // Advance over bytecode handler frame.
- // TODO(rmcilroy): Remove this once bytecode handlers don't need a frame.
- it.Advance();
- }
JavaScriptFrame* frame = JavaScriptFrame::cast(it.frame());
// Find the function on the stack and both the active code for the
// function and the original code.
@@ -504,19 +525,6 @@ void CompareIC::Clear(Isolate* isolate, Address address, Code* target,
PatchInlinedSmiCode(isolate, address, DISABLE_INLINED_SMI_CHECK);
}
-
-// static
-Handle<Code> KeyedLoadIC::ChooseMegamorphicStub(Isolate* isolate,
- ExtraICState extra_state) {
- // TODO(ishell): remove extra_ic_state
- if (FLAG_compiled_keyed_generic_loads) {
- return KeyedLoadGenericStub(isolate).GetCode();
- } else {
- return isolate->builtins()->KeyedLoadIC_Megamorphic();
- }
-}
-
-
static bool MigrateDeprecated(Handle<Object> object) {
if (!object->IsJSObject()) return false;
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
@@ -562,11 +570,11 @@ void IC::ConfigureVectorState(Handle<Name> name, Handle<Map> map,
nexus->ConfigureMonomorphic(name, map, handler);
} else if (kind() == Code::STORE_IC) {
StoreICNexus* nexus = casted_nexus<StoreICNexus>();
- nexus->ConfigureMonomorphic(map, Handle<Code>::cast(handler));
+ nexus->ConfigureMonomorphic(map, handler);
} else {
DCHECK(kind() == Code::KEYED_STORE_IC);
KeyedStoreICNexus* nexus = casted_nexus<KeyedStoreICNexus>();
- nexus->ConfigureMonomorphic(name, map, Handle<Code>::cast(handler));
+ nexus->ConfigureMonomorphic(name, map, handler);
}
vector_set_ = true;
@@ -691,11 +699,8 @@ static bool AddOneReceiverMapIfMissing(MapHandleList* receiver_maps,
return true;
}
-bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Object> code) {
- DCHECK(code->IsSmi() || code->IsCode());
- if (!code->IsSmi() && !Code::cast(*code)->is_handler()) {
- return false;
- }
+bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Object> handler) {
+ DCHECK(IsHandler(*handler));
if (is_keyed() && state() != RECOMPUTE_HANDLER) return false;
Handle<Map> map = receiver_map();
MapHandleList maps;
@@ -735,16 +740,16 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Object> code) {
number_of_valid_maps++;
if (number_of_valid_maps > 1 && is_keyed()) return false;
if (number_of_valid_maps == 1) {
- ConfigureVectorState(name, receiver_map(), code);
+ ConfigureVectorState(name, receiver_map(), handler);
} else {
if (handler_to_overwrite >= 0) {
- handlers.Set(handler_to_overwrite, code);
+ handlers.Set(handler_to_overwrite, handler);
if (!map.is_identical_to(maps.at(handler_to_overwrite))) {
maps.Set(handler_to_overwrite, map);
}
} else {
maps.Add(map);
- handlers.Add(code);
+ handlers.Add(handler);
}
ConfigureVectorState(name, &maps, &handlers);
@@ -754,8 +759,7 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Object> code) {
}
void IC::UpdateMonomorphicIC(Handle<Object> handler, Handle<Name> name) {
- DCHECK(handler->IsSmi() ||
- (handler->IsCode() && Handle<Code>::cast(handler)->is_handler()));
+ DCHECK(IsHandler(*handler));
ConfigureVectorState(name, receiver_map(), handler);
}
@@ -786,24 +790,28 @@ bool IC::IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map) {
return transitioned_map == target_map;
}
-void IC::PatchCache(Handle<Name> name, Handle<Object> code) {
- DCHECK(code->IsCode() || (code->IsSmi() && (kind() == Code::LOAD_IC ||
- kind() == Code::KEYED_LOAD_IC)));
+void IC::PatchCache(Handle<Name> name, Handle<Object> handler) {
+ DCHECK(IsHandler(*handler));
+ // Currently only LoadIC and KeyedLoadIC support non-code handlers.
+ DCHECK_IMPLIES(!handler->IsCode(), kind() == Code::LOAD_IC ||
+ kind() == Code::KEYED_LOAD_IC ||
+ kind() == Code::STORE_IC ||
+ kind() == Code::KEYED_STORE_IC);
switch (state()) {
case UNINITIALIZED:
case PREMONOMORPHIC:
- UpdateMonomorphicIC(code, name);
+ UpdateMonomorphicIC(handler, name);
break;
case RECOMPUTE_HANDLER:
case MONOMORPHIC:
if (kind() == Code::LOAD_GLOBAL_IC) {
- UpdateMonomorphicIC(code, name);
+ UpdateMonomorphicIC(handler, name);
break;
}
// Fall through.
case POLYMORPHIC:
if (!is_keyed() || state() == RECOMPUTE_HANDLER) {
- if (UpdatePolymorphicIC(name, code)) break;
+ if (UpdatePolymorphicIC(name, handler)) break;
// For keyed stubs, we can't know whether old handlers were for the
// same key.
CopyICToMegamorphicCache(name);
@@ -812,7 +820,7 @@ void IC::PatchCache(Handle<Name> name, Handle<Object> code) {
ConfigureVectorState(MEGAMORPHIC, name);
// Fall through.
case MEGAMORPHIC:
- UpdateMegamorphicCache(*receiver_map(), *name, *code);
+ UpdateMegamorphicCache(*receiver_map(), *name, *handler);
// Indicate that we've handled this case.
DCHECK(UseVector());
vector_set_ = true;
@@ -825,6 +833,7 @@ void IC::PatchCache(Handle<Name> name, Handle<Object> code) {
Handle<Code> KeyedStoreIC::ChooseMegamorphicStub(Isolate* isolate,
ExtraICState extra_state) {
+ DCHECK(!FLAG_tf_store_ic_stub);
LanguageMode mode = StoreICState::GetLanguageMode(extra_state);
return is_strict(mode)
? isolate->builtins()->KeyedStoreIC_Megamorphic_Strict()
@@ -833,13 +842,186 @@ Handle<Code> KeyedStoreIC::ChooseMegamorphicStub(Isolate* isolate,
Handle<Object> LoadIC::SimpleFieldLoad(FieldIndex index) {
if (FLAG_tf_load_ic_stub) {
- return handle(Smi::FromInt(index.GetLoadByFieldOffset()), isolate());
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldDH);
+ return LoadHandler::LoadField(isolate(), index);
}
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldStub);
LoadFieldStub stub(isolate(), index);
return stub.GetCode();
}
+namespace {
+
+template <bool fill_array = true>
+int InitPrototypeChecks(Isolate* isolate, Handle<Map> receiver_map,
+ Handle<JSObject> holder, Handle<Name> name,
+ Handle<FixedArray> array, int first_index) {
+ DCHECK(holder.is_null() || holder->HasFastProperties());
+
+ // We don't encode the requirement to check access rights because we already
+ // passed the access check for current native context and the access
+ // can't be revoked.
+
+ HandleScope scope(isolate);
+ int checks_count = 0;
+
+ if (receiver_map->IsPrimitiveMap() || receiver_map->IsJSGlobalProxyMap()) {
+ // The validity cell check for primitive and global proxy receivers does
+ // not guarantee that certain native context ever had access to other
+ // native context. However, a handler created for one native context could
+ // be used in other native context through the megamorphic stub cache.
+ // So we record the original native context to which this handler
+ // corresponds.
+ if (fill_array) {
+ Handle<Context> native_context = isolate->native_context();
+ array->set(LoadHandler::kFirstPrototypeIndex + checks_count,
+ native_context->self_weak_cell());
+ }
+ checks_count++;
+
+ } else if (receiver_map->IsJSGlobalObjectMap()) {
+ if (fill_array) {
+ Handle<JSGlobalObject> global = isolate->global_object();
+ Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
+ global, name, PropertyCellType::kInvalidated);
+ DCHECK(cell->value()->IsTheHole(isolate));
+ Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
+ array->set(LoadHandler::kFirstPrototypeIndex + checks_count, *weak_cell);
+ }
+ checks_count++;
+ }
+
+ // Create/count entries for each global or dictionary prototype appeared in
+ // the prototype chain contains from receiver till holder.
+ PrototypeIterator::WhereToEnd end = name->IsPrivate()
+ ? PrototypeIterator::END_AT_NON_HIDDEN
+ : PrototypeIterator::END_AT_NULL;
+ for (PrototypeIterator iter(receiver_map, end); !iter.IsAtEnd();
+ iter.Advance()) {
+ Handle<JSObject> current = PrototypeIterator::GetCurrent<JSObject>(iter);
+ if (holder.is_identical_to(current)) break;
+ Handle<Map> current_map(current->map(), isolate);
+
+ if (current_map->IsJSGlobalObjectMap()) {
+ if (fill_array) {
+ Handle<JSGlobalObject> global = Handle<JSGlobalObject>::cast(current);
+ Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
+ global, name, PropertyCellType::kInvalidated);
+ DCHECK(cell->value()->IsTheHole(isolate));
+ Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
+ array->set(first_index + checks_count, *weak_cell);
+ }
+ checks_count++;
+
+ } else if (current_map->is_dictionary_map()) {
+ DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
+ if (fill_array) {
+ DCHECK_EQ(NameDictionary::kNotFound,
+ current->property_dictionary()->FindEntry(name));
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate);
+ array->set(first_index + checks_count, *weak_cell);
+ }
+ checks_count++;
+ }
+ }
+ return checks_count;
+}
+
+// Returns 0 if the validity cell check is enough to ensure that the
+// prototype chain from |receiver_map| till |holder| did not change.
+// If the |holder| is an empty handle then the full prototype chain is
+// checked.
+// Returns -1 if the handler has to be compiled or the number of prototype
+// checks otherwise.
+int GetPrototypeCheckCount(Isolate* isolate, Handle<Map> receiver_map,
+ Handle<JSObject> holder, Handle<Name> name) {
+ return InitPrototypeChecks<false>(isolate, receiver_map, holder, name,
+ Handle<FixedArray>(), 0);
+}
+
+} // namespace
+
+Handle<Object> LoadIC::LoadFromPrototype(Handle<Map> receiver_map,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Handle<Object> smi_handler) {
+ int checks_count =
+ GetPrototypeCheckCount(isolate(), receiver_map, holder, name);
+ DCHECK_LE(0, checks_count);
+
+ if (receiver_map->IsPrimitiveMap() || receiver_map->IsJSGlobalProxyMap()) {
+ DCHECK(!receiver_map->is_dictionary_map());
+ DCHECK_LE(1, checks_count); // For native context.
+ smi_handler =
+ LoadHandler::EnableAccessCheckOnReceiver(isolate(), smi_handler);
+ } else if (receiver_map->is_dictionary_map() &&
+ !receiver_map->IsJSGlobalObjectMap()) {
+ smi_handler =
+ LoadHandler::EnableNegativeLookupOnReceiver(isolate(), smi_handler);
+ }
+
+ Handle<Cell> validity_cell =
+ Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+ DCHECK(!validity_cell.is_null());
+
+ Handle<WeakCell> holder_cell =
+ Map::GetOrCreatePrototypeWeakCell(holder, isolate());
+
+ if (checks_count == 0) {
+ return isolate()->factory()->NewTuple3(holder_cell, smi_handler,
+ validity_cell);
+ }
+ Handle<FixedArray> handler_array(isolate()->factory()->NewFixedArray(
+ LoadHandler::kFirstPrototypeIndex + checks_count, TENURED));
+ handler_array->set(LoadHandler::kSmiHandlerIndex, *smi_handler);
+ handler_array->set(LoadHandler::kValidityCellIndex, *validity_cell);
+ handler_array->set(LoadHandler::kHolderCellIndex, *holder_cell);
+ InitPrototypeChecks(isolate(), receiver_map, holder, name, handler_array,
+ LoadHandler::kFirstPrototypeIndex);
+ return handler_array;
+}
+
+Handle<Object> LoadIC::LoadNonExistent(Handle<Map> receiver_map,
+ Handle<Name> name) {
+ Handle<JSObject> holder; // null handle
+ int checks_count =
+ GetPrototypeCheckCount(isolate(), receiver_map, holder, name);
+ DCHECK_LE(0, checks_count);
+
+ bool do_negative_lookup_on_receiver =
+ receiver_map->is_dictionary_map() && !receiver_map->IsJSGlobalObjectMap();
+ Handle<Object> smi_handler =
+ LoadHandler::LoadNonExistent(isolate(), do_negative_lookup_on_receiver);
+
+ if (receiver_map->IsPrimitiveMap() || receiver_map->IsJSGlobalProxyMap()) {
+ DCHECK(!receiver_map->is_dictionary_map());
+ DCHECK_LE(1, checks_count); // For native context.
+ smi_handler =
+ LoadHandler::EnableAccessCheckOnReceiver(isolate(), smi_handler);
+ }
+
+ Handle<Object> validity_cell =
+ Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+ if (validity_cell.is_null()) {
+ DCHECK_EQ(0, checks_count);
+ validity_cell = handle(Smi::FromInt(0), isolate());
+ }
+
+ Factory* factory = isolate()->factory();
+ if (checks_count == 0) {
+ return factory->NewTuple3(factory->null_value(), smi_handler,
+ validity_cell);
+ }
+ Handle<FixedArray> handler_array(factory->NewFixedArray(
+ LoadHandler::kFirstPrototypeIndex + checks_count, TENURED));
+ handler_array->set(LoadHandler::kSmiHandlerIndex, *smi_handler);
+ handler_array->set(LoadHandler::kValidityCellIndex, *validity_cell);
+ handler_array->set(LoadHandler::kHolderCellIndex, *factory->null_value());
+ InitPrototypeChecks(isolate(), receiver_map, holder, name, handler_array,
+ LoadHandler::kFirstPrototypeIndex);
+ return handler_array;
+}
bool IsCompatibleReceiver(LookupIterator* lookup, Handle<Map> receiver_map) {
DCHECK(lookup->state() == LookupIterator::ACCESSOR);
@@ -884,6 +1066,7 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
if (state() == UNINITIALIZED && kind() != Code::LOAD_GLOBAL_IC) {
// This is the first time we execute this inline cache. Set the target to
// the pre monomorphic stub to delay setting the monomorphic state.
+ TRACE_HANDLER_STATS(isolate(), LoadIC_Premonomorphic);
ConfigureVectorState(PREMONOMORPHIC, Handle<Object>());
TRACE_IC("LoadIC", lookup->name());
return;
@@ -894,7 +1077,10 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
lookup->state() == LookupIterator::ACCESS_CHECK) {
code = slow_stub();
} else if (!lookup->IsFound()) {
- if (kind() == Code::LOAD_IC || kind() == Code::LOAD_GLOBAL_IC) {
+ if (kind() == Code::LOAD_IC) {
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNonexistentDH);
+ code = LoadNonExistent(receiver_map(), lookup->name());
+ } else if (kind() == Code::LOAD_GLOBAL_IC) {
code = NamedLoadHandlerCompiler::ComputeLoadNonexistent(lookup->name(),
receiver_map());
// TODO(jkummerow/verwaest): Introduce a builtin that handles this case.
@@ -964,30 +1150,80 @@ StubCache* IC::stub_cache() {
return nullptr;
}
-void IC::UpdateMegamorphicCache(Map* map, Name* name, Object* code) {
- if (code->IsSmi()) {
- // TODO(jkummerow): Support Smis in the code cache.
- Handle<Map> map_handle(map, isolate());
- Handle<Name> name_handle(name, isolate());
- FieldIndex index =
- FieldIndex::ForLoadByFieldOffset(map, Smi::cast(code)->value());
- TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldStub);
- LoadFieldStub stub(isolate(), index);
- Code* handler = *stub.GetCode();
- stub_cache()->Set(*name_handle, *map_handle, handler);
- return;
+void IC::UpdateMegamorphicCache(Map* map, Name* name, Object* handler) {
+ stub_cache()->Set(name, map, handler);
+}
+
+void IC::TraceHandlerCacheHitStats(LookupIterator* lookup) {
+ if (!FLAG_runtime_call_stats) return;
+
+ if (kind() == Code::LOAD_IC || kind() == Code::LOAD_GLOBAL_IC ||
+ kind() == Code::KEYED_LOAD_IC) {
+ switch (lookup->state()) {
+ case LookupIterator::ACCESS_CHECK:
+ TRACE_HANDLER_STATS(isolate(), LoadIC_HandlerCacheHit_AccessCheck);
+ break;
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
+ TRACE_HANDLER_STATS(isolate(), LoadIC_HandlerCacheHit_Exotic);
+ break;
+ case LookupIterator::INTERCEPTOR:
+ TRACE_HANDLER_STATS(isolate(), LoadIC_HandlerCacheHit_Interceptor);
+ break;
+ case LookupIterator::JSPROXY:
+ TRACE_HANDLER_STATS(isolate(), LoadIC_HandlerCacheHit_JSProxy);
+ break;
+ case LookupIterator::NOT_FOUND:
+ TRACE_HANDLER_STATS(isolate(), LoadIC_HandlerCacheHit_NonExistent);
+ break;
+ case LookupIterator::ACCESSOR:
+ TRACE_HANDLER_STATS(isolate(), LoadIC_HandlerCacheHit_Accessor);
+ break;
+ case LookupIterator::DATA:
+ TRACE_HANDLER_STATS(isolate(), LoadIC_HandlerCacheHit_Data);
+ break;
+ case LookupIterator::TRANSITION:
+ TRACE_HANDLER_STATS(isolate(), LoadIC_HandlerCacheHit_Transition);
+ break;
+ }
+ } else if (kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC) {
+ switch (lookup->state()) {
+ case LookupIterator::ACCESS_CHECK:
+ TRACE_HANDLER_STATS(isolate(), StoreIC_HandlerCacheHit_AccessCheck);
+ break;
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
+ TRACE_HANDLER_STATS(isolate(), StoreIC_HandlerCacheHit_Exotic);
+ break;
+ case LookupIterator::INTERCEPTOR:
+ TRACE_HANDLER_STATS(isolate(), StoreIC_HandlerCacheHit_Interceptor);
+ break;
+ case LookupIterator::JSPROXY:
+ TRACE_HANDLER_STATS(isolate(), StoreIC_HandlerCacheHit_JSProxy);
+ break;
+ case LookupIterator::NOT_FOUND:
+ TRACE_HANDLER_STATS(isolate(), StoreIC_HandlerCacheHit_NonExistent);
+ break;
+ case LookupIterator::ACCESSOR:
+ TRACE_HANDLER_STATS(isolate(), StoreIC_HandlerCacheHit_Accessor);
+ break;
+ case LookupIterator::DATA:
+ TRACE_HANDLER_STATS(isolate(), StoreIC_HandlerCacheHit_Data);
+ break;
+ case LookupIterator::TRANSITION:
+ TRACE_HANDLER_STATS(isolate(), StoreIC_HandlerCacheHit_Transition);
+ break;
+ }
+ } else {
+ TRACE_HANDLER_STATS(isolate(), IC_HandlerCacheHit);
}
- DCHECK(code->IsCode());
- stub_cache()->Set(name, map, Code::cast(code));
}
Handle<Object> IC::ComputeHandler(LookupIterator* lookup,
Handle<Object> value) {
// Try to find a globally shared handler stub.
- Handle<Object> handler_or_index = GetMapIndependentHandler(lookup);
- if (!handler_or_index.is_null()) {
- DCHECK(handler_or_index->IsCode() || handler_or_index->IsSmi());
- return handler_or_index;
+ Handle<Object> shared_handler = GetMapIndependentHandler(lookup);
+ if (!shared_handler.is_null()) {
+ DCHECK(IC::IsHandler(*shared_handler));
+ return shared_handler;
}
// Otherwise check the map's handler cache for a map-specific handler, and
@@ -1007,16 +1243,16 @@ Handle<Object> IC::ComputeHandler(LookupIterator* lookup,
stub_holder_map = receiver_map();
}
- Handle<Code> code = PropertyHandlerCompiler::Find(
+ Handle<Object> handler = PropertyHandlerCompiler::Find(
lookup->name(), stub_holder_map, kind(), flag);
// Use the cached value if it exists, and if it is different from the
// handler that just missed.
- if (!code.is_null()) {
- Handle<Object> handler;
- if (maybe_handler_.ToHandle(&handler)) {
- if (!handler.is_identical_to(code)) {
- TRACE_HANDLER_STATS(isolate(), IC_HandlerCacheHit);
- return code;
+ if (!handler.is_null()) {
+ Handle<Object> current_handler;
+ if (maybe_handler_.ToHandle(&current_handler)) {
+ if (!current_handler.is_identical_to(handler)) {
+ TraceHandlerCacheHitStats(lookup);
+ return handler;
}
} else {
// maybe_handler_ is only populated for MONOMORPHIC and POLYMORPHIC ICs.
@@ -1024,24 +1260,27 @@ Handle<Object> IC::ComputeHandler(LookupIterator* lookup,
// cache (which just missed) is different from the cached handler.
if (state() == MEGAMORPHIC && lookup->GetReceiver()->IsHeapObject()) {
Map* map = Handle<HeapObject>::cast(lookup->GetReceiver())->map();
- Code* megamorphic_cached_code = stub_cache()->Get(*lookup->name(), map);
- if (megamorphic_cached_code != *code) {
- TRACE_HANDLER_STATS(isolate(), IC_HandlerCacheHit);
- return code;
+ Object* megamorphic_cached_handler =
+ stub_cache()->Get(*lookup->name(), map);
+ if (megamorphic_cached_handler != *handler) {
+ TraceHandlerCacheHitStats(lookup);
+ return handler;
}
} else {
- TRACE_HANDLER_STATS(isolate(), IC_HandlerCacheHit);
- return code;
+ TraceHandlerCacheHitStats(lookup);
+ return handler;
}
}
}
- code = CompileHandler(lookup, value, flag);
- DCHECK(code->is_handler());
- DCHECK(Code::ExtractCacheHolderFromFlags(code->flags()) == flag);
- Map::UpdateCodeCache(stub_holder_map, lookup->name(), code);
-
- return code;
+ handler = CompileHandler(lookup, value, flag);
+ DCHECK(IC::IsHandler(*handler));
+ if (handler->IsCode()) {
+ Handle<Code> code = Handle<Code>::cast(handler);
+ DCHECK_EQ(Code::ExtractCacheHolderFromFlags(code->flags()), flag);
+ Map::UpdateCodeCache(stub_holder_map, lookup->name(), code);
+ }
+ return handler;
}
Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
@@ -1111,17 +1350,33 @@ Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
}
// Ruled out by IsCompatibleReceiver() above.
DCHECK(AccessorInfo::IsCompatibleReceiverMap(isolate(), info, map));
- if (!holder->HasFastProperties()) return slow_stub();
- if (receiver_is_holder) {
- TRACE_HANDLER_STATS(isolate(), LoadIC_LoadApiGetterStub);
- int index = lookup->GetAccessorIndex();
- LoadApiGetterStub stub(isolate(), true, index);
- return stub.GetCode();
- }
- if (info->is_sloppy() && !receiver->IsJSReceiver()) {
+ if (!holder->HasFastProperties() ||
+ (info->is_sloppy() && !receiver->IsJSReceiver())) {
+ DCHECK(!holder->HasFastProperties() || !receiver_is_holder);
TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
return slow_stub();
}
+ if (FLAG_tf_load_ic_stub) {
+ Handle<Object> smi_handler = LoadHandler::LoadApiGetter(
+ isolate(), lookup->GetAccessorIndex());
+ if (receiver_is_holder) {
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadApiGetterDH);
+ return smi_handler;
+ }
+ if (kind() != Code::LOAD_GLOBAL_IC) {
+ TRACE_HANDLER_STATS(isolate(),
+ LoadIC_LoadApiGetterFromPrototypeDH);
+ return LoadFromPrototype(map, holder, lookup->name(),
+ smi_handler);
+ }
+ } else {
+ if (receiver_is_holder) {
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadApiGetterStub);
+ int index = lookup->GetAccessorIndex();
+ LoadApiGetterStub stub(isolate(), true, index);
+ return stub.GetCode();
+ }
+ }
break; // Custom-compiled handler.
}
}
@@ -1153,18 +1408,36 @@ Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
// -------------- Fields --------------
if (lookup->property_details().type() == DATA) {
FieldIndex field = lookup->GetFieldIndex();
+ Handle<Object> smi_handler = SimpleFieldLoad(field);
if (receiver_is_holder) {
- return SimpleFieldLoad(field);
+ return smi_handler;
+ }
+ if (FLAG_tf_load_ic_stub && kind() != Code::LOAD_GLOBAL_IC) {
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldFromPrototypeDH);
+ return LoadFromPrototype(map, holder, lookup->name(), smi_handler);
}
break; // Custom-compiled handler.
}
// -------------- Constant properties --------------
DCHECK(lookup->property_details().type() == DATA_CONSTANT);
- if (receiver_is_holder) {
- TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantStub);
- LoadConstantStub stub(isolate(), lookup->GetConstantIndex());
- return stub.GetCode();
+ if (FLAG_tf_load_ic_stub) {
+ Handle<Object> smi_handler =
+ LoadHandler::LoadConstant(isolate(), lookup->GetConstantIndex());
+ if (receiver_is_holder) {
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantDH);
+ return smi_handler;
+ }
+ if (kind() != Code::LOAD_GLOBAL_IC) {
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantFromPrototypeDH);
+ return LoadFromPrototype(map, holder, lookup->name(), smi_handler);
+ }
+ } else {
+ if (receiver_is_holder) {
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantStub);
+ LoadConstantStub stub(isolate(), lookup->GetConstantIndex());
+ return stub.GetCode();
+ }
}
break; // Custom-compiled handler.
}
@@ -1182,9 +1455,9 @@ Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
return Handle<Code>::null();
}
-Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
- Handle<Object> unused,
- CacheHolderFlag cache_holder) {
+Handle<Object> LoadIC::CompileHandler(LookupIterator* lookup,
+ Handle<Object> unused,
+ CacheHolderFlag cache_holder) {
Handle<JSObject> holder = lookup->GetHolder<JSObject>();
#ifdef DEBUG
// Only used by DCHECKs below.
@@ -1229,6 +1502,10 @@ Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
DCHECK(IsCompatibleReceiver(lookup, map));
Handle<Object> accessors = lookup->GetAccessors();
if (accessors->IsAccessorPair()) {
+ if (lookup->TryLookupCachedProperty()) {
+ DCHECK_EQ(LookupIterator::DATA, lookup->state());
+ return ComputeHandler(lookup);
+ }
DCHECK(holder->HasFastProperties());
DCHECK(!GetSharedFunctionInfo()->HasDebugInfo());
Handle<Object> getter(Handle<AccessorPair>::cast(accessors)->getter(),
@@ -1421,7 +1698,9 @@ MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
if ((object->IsJSObject() && key->IsSmi()) ||
(object->IsString() && key->IsNumber())) {
UpdateLoadElement(Handle<HeapObject>::cast(object));
- TRACE_IC("LoadIC", key);
+ if (is_vector_set()) {
+ TRACE_IC("LoadIC", key);
+ }
}
}
@@ -1580,6 +1859,7 @@ void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
if (state() == UNINITIALIZED) {
// This is the first time we execute this inline cache. Set the target to
// the pre monomorphic stub to delay setting the monomorphic state.
+ TRACE_HANDLER_STATS(isolate(), StoreIC_Premonomorphic);
ConfigureVectorState(PREMONOMORPHIC, Handle<Object>());
TRACE_IC("StoreIC", lookup->name());
return;
@@ -1589,13 +1869,72 @@ void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
if (!use_ic) {
TRACE_GENERIC_IC(isolate(), "StoreIC", "LookupForWrite said 'false'");
}
- Handle<Code> code =
- use_ic ? Handle<Code>::cast(ComputeHandler(lookup, value)) : slow_stub();
+ Handle<Object> handler = use_ic ? ComputeHandler(lookup, value)
+ : Handle<Object>::cast(slow_stub());
- PatchCache(lookup->name(), code);
+ PatchCache(lookup->name(), handler);
TRACE_IC("StoreIC", lookup->name());
}
+Handle<Object> StoreIC::StoreTransition(Handle<Map> receiver_map,
+ Handle<JSObject> holder,
+ Handle<Map> transition,
+ Handle<Name> name) {
+ int descriptor = transition->LastAdded();
+ Handle<DescriptorArray> descriptors(transition->instance_descriptors());
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ Representation representation = details.representation();
+ DCHECK(!representation.IsNone());
+
+ // Declarative handlers don't support access checks.
+ DCHECK(!transition->is_access_check_needed());
+
+ Handle<Object> smi_handler;
+ if (details.type() == DATA_CONSTANT) {
+ smi_handler = StoreHandler::TransitionToConstant(isolate(), descriptor);
+
+ } else {
+ DCHECK_EQ(DATA, details.type());
+ bool extend_storage =
+ Map::cast(transition->GetBackPointer())->unused_property_fields() == 0;
+
+ FieldIndex index = FieldIndex::ForDescriptor(*transition, descriptor);
+ smi_handler = StoreHandler::TransitionToField(
+ isolate(), descriptor, index, representation, extend_storage);
+ }
+ // |holder| is either a receiver if the property is non-existent or
+ // one of the prototypes.
+ DCHECK(!holder.is_null());
+ bool is_nonexistent = holder->map() == transition->GetBackPointer();
+ if (is_nonexistent) holder = Handle<JSObject>::null();
+
+ int checks_count =
+ GetPrototypeCheckCount(isolate(), receiver_map, holder, name);
+ DCHECK_LE(0, checks_count);
+ DCHECK(!receiver_map->IsJSGlobalObjectMap());
+
+ Handle<Object> validity_cell =
+ Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+ if (validity_cell.is_null()) {
+ DCHECK_EQ(0, checks_count);
+ validity_cell = handle(Smi::FromInt(0), isolate());
+ }
+
+ Handle<WeakCell> transition_cell = Map::WeakCellForMap(transition);
+
+ Factory* factory = isolate()->factory();
+ if (checks_count == 0) {
+ return factory->NewTuple3(transition_cell, smi_handler, validity_cell);
+ }
+ Handle<FixedArray> handler_array(factory->NewFixedArray(
+ StoreHandler::kFirstPrototypeIndex + checks_count, TENURED));
+ handler_array->set(StoreHandler::kSmiHandlerIndex, *smi_handler);
+ handler_array->set(StoreHandler::kValidityCellIndex, *validity_cell);
+ handler_array->set(StoreHandler::kTransitionCellIndex, *transition_cell);
+ InitPrototypeChecks(isolate(), receiver_map, holder, name, handler_array,
+ StoreHandler::kFirstPrototypeIndex);
+ return handler_array;
+}
static Handle<Code> PropertyCellStoreHandler(
Isolate* isolate, Handle<JSObject> receiver, Handle<JSGlobalObject> holder,
@@ -1632,8 +1971,13 @@ Handle<Object> StoreIC::GetMapIndependentHandler(LookupIterator* lookup) {
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
return slow_stub();
}
-
DCHECK(lookup->IsCacheableTransition());
+ if (FLAG_tf_store_ic_stub) {
+ Handle<Map> transition = lookup->transition_map();
+ TRACE_HANDLER_STATS(isolate(), StoreIC_StoreTransitionDH);
+ return StoreTransition(receiver_map(), holder, transition,
+ lookup->name());
+ }
break; // Custom-compiled handler.
}
@@ -1711,17 +2055,25 @@ Handle<Object> StoreIC::GetMapIndependentHandler(LookupIterator* lookup) {
// -------------- Fields --------------
if (lookup->property_details().type() == DATA) {
- bool use_stub = true;
- if (lookup->representation().IsHeapObject()) {
- // Only use a generic stub if no types need to be tracked.
- Handle<FieldType> field_type = lookup->GetFieldType();
- use_stub = !field_type->IsClass();
- }
- if (use_stub) {
- TRACE_HANDLER_STATS(isolate(), StoreIC_StoreFieldStub);
- StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
- lookup->representation());
- return stub.GetCode();
+ if (FLAG_tf_store_ic_stub) {
+ TRACE_HANDLER_STATS(isolate(), StoreIC_StoreFieldDH);
+ int descriptor = lookup->GetFieldDescriptorIndex();
+ FieldIndex index = lookup->GetFieldIndex();
+ return StoreHandler::StoreField(isolate(), descriptor, index,
+ lookup->representation());
+ } else {
+ bool use_stub = true;
+ if (lookup->representation().IsHeapObject()) {
+ // Only use a generic stub if no types need to be tracked.
+ Handle<FieldType> field_type = lookup->GetFieldType();
+ use_stub = !field_type->IsClass();
+ }
+ if (use_stub) {
+ TRACE_HANDLER_STATS(isolate(), StoreIC_StoreFieldStub);
+ StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
+ lookup->representation());
+ return stub.GetCode();
+ }
}
break; // Custom-compiled handler.
}
@@ -1742,9 +2094,9 @@ Handle<Object> StoreIC::GetMapIndependentHandler(LookupIterator* lookup) {
return Handle<Code>::null();
}
-Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
- Handle<Object> value,
- CacheHolderFlag cache_holder) {
+Handle<Object> StoreIC::CompileHandler(LookupIterator* lookup,
+ Handle<Object> value,
+ CacheHolderFlag cache_holder) {
DCHECK_NE(LookupIterator::JSPROXY, lookup->state());
// This is currently guaranteed by checks in StoreIC::Store.
@@ -1765,6 +2117,7 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
cell->set_value(isolate()->heap()->the_hole_value());
return code;
}
+ DCHECK(!FLAG_tf_store_ic_stub);
Handle<Map> transition = lookup->transition_map();
// Currently not handled by CompileStoreTransition.
DCHECK(holder->HasFastProperties());
@@ -1836,6 +2189,7 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
// -------------- Fields --------------
if (lookup->property_details().type() == DATA) {
+ DCHECK(!FLAG_tf_store_ic_stub);
#ifdef DEBUG
bool use_stub = true;
if (lookup->representation().IsHeapObject()) {
@@ -1981,7 +2335,6 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
}
}
- TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_Polymorphic);
MapHandleList transitioned_maps(target_receiver_maps.length());
CodeHandleList handlers(target_receiver_maps.length());
PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers(
@@ -2241,7 +2594,6 @@ void CallIC::HandleMiss(Handle<Object> function) {
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(Runtime_CallIC_Miss) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
// Runtime functions don't follow the IC's calling convention.
@@ -2258,7 +2610,6 @@ RUNTIME_FUNCTION(Runtime_CallIC_Miss) {
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
// Runtime functions don't follow the IC's calling convention.
@@ -2279,7 +2630,7 @@ RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
} else if (kind == FeedbackVectorSlotKind::LOAD_GLOBAL_IC) {
Handle<Name> key(vector->GetName(vector_slot), isolate);
- DCHECK_NE(*key, *isolate->factory()->empty_string());
+ DCHECK_NE(*key, isolate->heap()->empty_string());
DCHECK_EQ(*isolate->global_object(), *receiver);
LoadGlobalICNexus nexus(vector, vector_slot);
LoadGlobalIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
@@ -2298,7 +2649,6 @@ RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Miss) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
// Runtime functions don't follow the IC's calling convention.
@@ -2309,7 +2659,7 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Miss) {
DCHECK_EQ(FeedbackVectorSlotKind::LOAD_GLOBAL_IC,
vector->GetKind(vector_slot));
Handle<String> name(vector->GetName(vector_slot), isolate);
- DCHECK_NE(*name, *isolate->factory()->empty_string());
+ DCHECK_NE(*name, isolate->heap()->empty_string());
LoadGlobalICNexus nexus(vector, vector_slot);
LoadGlobalIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
@@ -2330,7 +2680,7 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Slow) {
DCHECK_EQ(FeedbackVectorSlotKind::LOAD_GLOBAL_IC,
vector->GetKind(vector_slot));
Handle<String> name(vector->GetName(vector_slot), isolate);
- DCHECK_NE(*name, *isolate->factory()->empty_string());
+ DCHECK_NE(*name, isolate->heap()->empty_string());
Handle<JSGlobalObject> global = isolate->global_object();
@@ -2343,7 +2693,7 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Slow) {
script_contexts, lookup_result.context_index);
Handle<Object> result =
FixedArray::get(*script_context, lookup_result.slot_index, isolate);
- if (*result == *isolate->factory()->the_hole_value()) {
+ if (*result == isolate->heap()->the_hole_value()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewReferenceError(MessageTemplate::kNotDefined, name));
}
@@ -2370,7 +2720,6 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Slow) {
// Used from ic-<arch>.cc
RUNTIME_FUNCTION(Runtime_KeyedLoadIC_Miss) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
// Runtime functions don't follow the IC's calling convention.
@@ -2387,7 +2736,6 @@ RUNTIME_FUNCTION(Runtime_KeyedLoadIC_Miss) {
RUNTIME_FUNCTION(Runtime_KeyedLoadIC_MissFromStubFailure) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
typedef LoadWithVectorDescriptor Descriptor;
DCHECK_EQ(Descriptor::kParameterCount, args.length());
@@ -2406,7 +2754,6 @@ RUNTIME_FUNCTION(Runtime_KeyedLoadIC_MissFromStubFailure) {
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
DCHECK_EQ(5, args.length());
// Runtime functions don't follow the IC's calling convention.
@@ -2434,7 +2781,6 @@ RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
DCHECK_EQ(5, args.length());
// Runtime functions don't follow the IC's calling convention.
@@ -2470,7 +2816,6 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Slow) {
RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
// Runtime functions don't follow the IC's calling convention.
Handle<Object> object = args.at<Object>(0);
@@ -2609,7 +2954,6 @@ MaybeHandle<Object> BinaryOpIC::Transition(
RUNTIME_FUNCTION(Runtime_BinaryOpIC_Miss) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
typedef BinaryOpDescriptor Descriptor;
@@ -2622,7 +2966,6 @@ RUNTIME_FUNCTION(Runtime_BinaryOpIC_Miss) {
RUNTIME_FUNCTION(Runtime_BinaryOpIC_MissWithAllocationSite) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
typedef BinaryOpWithAllocationSiteDescriptor Descriptor;
@@ -2686,7 +3029,6 @@ Code* CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
// Used from CompareICStub::GenerateMiss in code-stubs-<arch>.cc.
RUNTIME_FUNCTION(Runtime_CompareIC_Miss) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
DCHECK(args.length() == 3);
CompareIC ic(isolate, static_cast<Token::Value>(args.smi_at(2)));
@@ -2711,7 +3053,6 @@ Handle<Object> ToBooleanIC::ToBoolean(Handle<Object> object) {
RUNTIME_FUNCTION(Runtime_ToBooleanIC_Miss) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
DCHECK(args.length() == 1);
HandleScope scope(isolate);
Handle<Object> object = args.at<Object>(0);
@@ -2729,7 +3070,7 @@ RUNTIME_FUNCTION(Runtime_StoreCallbackProperty) {
CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 5);
HandleScope scope(isolate);
- if (FLAG_runtime_call_stats) {
+ if (V8_UNLIKELY(FLAG_runtime_stats)) {
RETURN_RESULT_OR_FAILURE(
isolate, Runtime::SetObjectProperty(isolate, receiver, name, value,
language_mode));
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index bf395f1f2a..9e69cc85d0 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -81,6 +81,8 @@ class IC {
static InlineCacheState StateFromCode(Code* code);
+ static inline bool IsHandler(Object* object);
+
protected:
Address fp() const { return fp_; }
Address pc() const { return *pc_address_; }
@@ -138,6 +140,8 @@ class IC {
static void OnTypeFeedbackChanged(Isolate* isolate, Code* host);
static void PostPatching(Address address, Code* target, Code* old_target);
+ void TraceHandlerCacheHitStats(LookupIterator* lookup);
+
// Compute the handler either by compiling or by retrieving a cached version.
Handle<Object> ComputeHandler(LookupIterator* lookup,
Handle<Object> value = Handle<Code>::null());
@@ -145,11 +149,11 @@ class IC {
UNREACHABLE();
return Handle<Code>::null();
}
- virtual Handle<Code> CompileHandler(LookupIterator* lookup,
- Handle<Object> value,
- CacheHolderFlag cache_holder) {
+ virtual Handle<Object> CompileHandler(LookupIterator* lookup,
+ Handle<Object> value,
+ CacheHolderFlag cache_holder) {
UNREACHABLE();
- return Handle<Code>::null();
+ return Handle<Object>::null();
}
void UpdateMonomorphicIC(Handle<Object> handler, Handle<Name> name);
@@ -303,12 +307,23 @@ class LoadIC : public IC {
Handle<Object> GetMapIndependentHandler(LookupIterator* lookup) override;
- Handle<Code> CompileHandler(LookupIterator* lookup, Handle<Object> unused,
- CacheHolderFlag cache_holder) override;
+ Handle<Object> CompileHandler(LookupIterator* lookup, Handle<Object> unused,
+ CacheHolderFlag cache_holder) override;
private:
+ // Creates a data handler that represents a load of a field by given index.
Handle<Object> SimpleFieldLoad(FieldIndex index);
+ // Creates a data handler that represents a prototype chain check followed
+ // by given Smi-handler that encoded a load from the holder.
+ // Can be used only if GetPrototypeCheckCount() returns non negative value.
+ Handle<Object> LoadFromPrototype(Handle<Map> receiver_map,
+ Handle<JSObject> holder, Handle<Name> name,
+ Handle<Object> smi_handler);
+
+ // Creates a data handler that represents a load of a non-existent property.
+ Handle<Object> LoadNonExistent(Handle<Map> receiver_map, Handle<Name> name);
+
friend class IC;
};
@@ -341,10 +356,6 @@ class KeyedLoadIC : public LoadIC {
// Code generator routines.
static void GenerateMiss(MacroAssembler* masm);
static void GenerateRuntimeGetProperty(MacroAssembler* masm);
- static void GenerateMegamorphic(MacroAssembler* masm);
-
- static Handle<Code> ChooseMegamorphicStub(Isolate* isolate,
- ExtraICState extra_state);
static void Clear(Isolate* isolate, Code* host, KeyedLoadICNexus* nexus);
@@ -402,10 +413,14 @@ class StoreIC : public IC {
void UpdateCaches(LookupIterator* lookup, Handle<Object> value,
JSReceiver::StoreFromKeyed store_mode);
Handle<Object> GetMapIndependentHandler(LookupIterator* lookup) override;
- Handle<Code> CompileHandler(LookupIterator* lookup, Handle<Object> value,
- CacheHolderFlag cache_holder) override;
+ Handle<Object> CompileHandler(LookupIterator* lookup, Handle<Object> value,
+ CacheHolderFlag cache_holder) override;
private:
+ Handle<Object> StoreTransition(Handle<Map> receiver_map,
+ Handle<JSObject> holder,
+ Handle<Map> transition, Handle<Name> name);
+
friend class IC;
};
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
new file mode 100644
index 0000000000..30faba85e9
--- /dev/null
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -0,0 +1,549 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/ic/keyed-store-generic.h"
+
+#include "src/compiler/code-assembler.h"
+#include "src/contexts.h"
+#include "src/isolate.h"
+
+namespace v8 {
+namespace internal {
+
+using compiler::Node;
+
+class KeyedStoreGenericAssembler : public CodeStubAssembler {
+ public:
+ void KeyedStoreGeneric(const StoreICParameters* p,
+ LanguageMode language_mode);
+
+ private:
+ enum UpdateLength {
+ kDontChangeLength,
+ kIncrementLengthByOne,
+ kBumpLengthWithGap
+ };
+
+ void EmitGenericElementStore(Node* receiver, Node* receiver_map,
+ Node* instance_type, Node* intptr_index,
+ Node* value, Node* context, Label* slow);
+
+ void EmitGenericPropertyStore(Node* receiver, Node* receiver_map,
+ const StoreICParameters* p, Label* slow);
+
+ void BranchIfPrototypesHaveNonFastElements(Node* receiver_map,
+ Label* non_fast_elements,
+ Label* only_fast_elements);
+
+ void TryRewriteElements(Node* receiver, Node* receiver_map, Node* elements,
+ Node* native_context, ElementsKind from_kind,
+ ElementsKind to_kind, Label* bailout);
+
+ void StoreElementWithCapacity(Node* receiver, Node* receiver_map,
+ Node* elements, Node* elements_kind,
+ Node* intptr_index, Node* value, Node* context,
+ Label* slow, UpdateLength update_length);
+
+ void MaybeUpdateLengthAndReturn(Node* receiver, Node* index, Node* value,
+ UpdateLength update_length);
+
+ void TryChangeToHoleyMapHelper(Node* receiver, Node* receiver_map,
+ Node* native_context, ElementsKind packed_kind,
+ ElementsKind holey_kind, Label* done,
+ Label* map_mismatch, Label* bailout);
+ void TryChangeToHoleyMap(Node* receiver, Node* receiver_map,
+ Node* current_elements_kind, Node* context,
+ ElementsKind packed_kind, Label* bailout);
+ void TryChangeToHoleyMapMulti(Node* receiver, Node* receiver_map,
+ Node* current_elements_kind, Node* context,
+ ElementsKind packed_kind,
+ ElementsKind packed_kind_2, Label* bailout);
+
+ // Do not add fields, so that this is safe to reinterpret_cast to CSA.
+};
+
+void KeyedStoreGenericGenerator::Generate(
+ CodeStubAssembler* assembler, const CodeStubAssembler::StoreICParameters* p,
+ LanguageMode language_mode) {
+ STATIC_ASSERT(sizeof(CodeStubAssembler) ==
+ sizeof(KeyedStoreGenericAssembler));
+ auto assm = reinterpret_cast<KeyedStoreGenericAssembler*>(assembler);
+ assm->KeyedStoreGeneric(p, language_mode);
+}
+
+void KeyedStoreGenericAssembler::BranchIfPrototypesHaveNonFastElements(
+ Node* receiver_map, Label* non_fast_elements, Label* only_fast_elements) {
+ Variable var_map(this, MachineRepresentation::kTagged);
+ var_map.Bind(receiver_map);
+ Label loop_body(this, &var_map);
+ Goto(&loop_body);
+
+ Bind(&loop_body);
+ {
+ Node* map = var_map.value();
+ Node* prototype = LoadMapPrototype(map);
+ GotoIf(WordEqual(prototype, NullConstant()), only_fast_elements);
+ Node* prototype_map = LoadMap(prototype);
+ var_map.Bind(prototype_map);
+ Node* instance_type = LoadMapInstanceType(prototype_map);
+ STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
+ STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
+ GotoIf(Int32LessThanOrEqual(instance_type,
+ Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)),
+ non_fast_elements);
+ Node* elements_kind = LoadMapElementsKind(prototype_map);
+ STATIC_ASSERT(FIRST_ELEMENTS_KIND == FIRST_FAST_ELEMENTS_KIND);
+ GotoIf(Int32LessThanOrEqual(elements_kind,
+ Int32Constant(LAST_FAST_ELEMENTS_KIND)),
+ &loop_body);
+ GotoIf(Word32Equal(elements_kind, Int32Constant(NO_ELEMENTS)), &loop_body);
+ Goto(non_fast_elements);
+ }
+}
+
+void KeyedStoreGenericAssembler::TryRewriteElements(
+ Node* receiver, Node* receiver_map, Node* elements, Node* native_context,
+ ElementsKind from_kind, ElementsKind to_kind, Label* bailout) {
+ DCHECK(IsFastPackedElementsKind(from_kind));
+ ElementsKind holey_from_kind = GetHoleyElementsKind(from_kind);
+ ElementsKind holey_to_kind = GetHoleyElementsKind(to_kind);
+ if (AllocationSite::GetMode(from_kind, to_kind) == TRACK_ALLOCATION_SITE) {
+ TrapAllocationMemento(receiver, bailout);
+ }
+ Label perform_transition(this), check_holey_map(this);
+ Variable var_target_map(this, MachineType::PointerRepresentation());
+ // Check if the receiver has the default |from_kind| map.
+ {
+ Node* packed_map =
+ LoadContextElement(native_context, Context::ArrayMapIndex(from_kind));
+ GotoIf(WordNotEqual(receiver_map, packed_map), &check_holey_map);
+ var_target_map.Bind(
+ LoadContextElement(native_context, Context::ArrayMapIndex(to_kind)));
+ Goto(&perform_transition);
+ }
+
+ // Check if the receiver has the default |holey_from_kind| map.
+ Bind(&check_holey_map);
+ {
+ Node* holey_map = LoadContextElement(
+ native_context, Context::ArrayMapIndex(holey_from_kind));
+ GotoIf(WordNotEqual(receiver_map, holey_map), bailout);
+ var_target_map.Bind(LoadContextElement(
+ native_context, Context::ArrayMapIndex(holey_to_kind)));
+ Goto(&perform_transition);
+ }
+
+ // Found a supported transition target map, perform the transition!
+ Bind(&perform_transition);
+ {
+ if (IsFastDoubleElementsKind(from_kind) !=
+ IsFastDoubleElementsKind(to_kind)) {
+ Node* capacity = SmiUntag(LoadFixedArrayBaseLength(elements));
+ GrowElementsCapacity(receiver, elements, from_kind, to_kind, capacity,
+ capacity, INTPTR_PARAMETERS, bailout);
+ }
+ StoreObjectField(receiver, JSObject::kMapOffset, var_target_map.value());
+ }
+}
+
+void KeyedStoreGenericAssembler::TryChangeToHoleyMapHelper(
+ Node* receiver, Node* receiver_map, Node* native_context,
+ ElementsKind packed_kind, ElementsKind holey_kind, Label* done,
+ Label* map_mismatch, Label* bailout) {
+ Node* packed_map =
+ LoadContextElement(native_context, Context::ArrayMapIndex(packed_kind));
+ GotoIf(WordNotEqual(receiver_map, packed_map), map_mismatch);
+ if (AllocationSite::GetMode(packed_kind, holey_kind) ==
+ TRACK_ALLOCATION_SITE) {
+ TrapAllocationMemento(receiver, bailout);
+ }
+ Node* holey_map =
+ LoadContextElement(native_context, Context::ArrayMapIndex(holey_kind));
+ StoreObjectField(receiver, JSObject::kMapOffset, holey_map);
+ Goto(done);
+}
+
+void KeyedStoreGenericAssembler::TryChangeToHoleyMap(
+ Node* receiver, Node* receiver_map, Node* current_elements_kind,
+ Node* context, ElementsKind packed_kind, Label* bailout) {
+ ElementsKind holey_kind = GetHoleyElementsKind(packed_kind);
+ Label already_holey(this);
+
+ GotoIf(Word32Equal(current_elements_kind, Int32Constant(holey_kind)),
+ &already_holey);
+ Node* native_context = LoadNativeContext(context);
+ TryChangeToHoleyMapHelper(receiver, receiver_map, native_context, packed_kind,
+ holey_kind, &already_holey, bailout, bailout);
+ Bind(&already_holey);
+}
+
+void KeyedStoreGenericAssembler::TryChangeToHoleyMapMulti(
+ Node* receiver, Node* receiver_map, Node* current_elements_kind,
+ Node* context, ElementsKind packed_kind, ElementsKind packed_kind_2,
+ Label* bailout) {
+ ElementsKind holey_kind = GetHoleyElementsKind(packed_kind);
+ ElementsKind holey_kind_2 = GetHoleyElementsKind(packed_kind_2);
+ Label already_holey(this), check_other_kind(this);
+
+ GotoIf(Word32Equal(current_elements_kind, Int32Constant(holey_kind)),
+ &already_holey);
+ GotoIf(Word32Equal(current_elements_kind, Int32Constant(holey_kind_2)),
+ &already_holey);
+
+ Node* native_context = LoadNativeContext(context);
+ TryChangeToHoleyMapHelper(receiver, receiver_map, native_context, packed_kind,
+ holey_kind, &already_holey, &check_other_kind,
+ bailout);
+ Bind(&check_other_kind);
+ TryChangeToHoleyMapHelper(receiver, receiver_map, native_context,
+ packed_kind_2, holey_kind_2, &already_holey,
+ bailout, bailout);
+ Bind(&already_holey);
+}
+
+void KeyedStoreGenericAssembler::MaybeUpdateLengthAndReturn(
+ Node* receiver, Node* index, Node* value, UpdateLength update_length) {
+ if (update_length != kDontChangeLength) {
+ Node* new_length = SmiTag(IntPtrAdd(index, IntPtrConstant(1)));
+ StoreObjectFieldNoWriteBarrier(receiver, JSArray::kLengthOffset, new_length,
+ MachineRepresentation::kTagged);
+ }
+ Return(value);
+}
+
+void KeyedStoreGenericAssembler::StoreElementWithCapacity(
+ Node* receiver, Node* receiver_map, Node* elements, Node* elements_kind,
+ Node* intptr_index, Node* value, Node* context, Label* slow,
+ UpdateLength update_length) {
+ if (update_length != kDontChangeLength) {
+ CSA_ASSERT(this, Word32Equal(LoadMapInstanceType(receiver_map),
+ Int32Constant(JS_ARRAY_TYPE)));
+ }
+ STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
+ const int kHeaderSize = FixedArray::kHeaderSize - kHeapObjectTag;
+
+ Label check_double_elements(this), check_cow_elements(this);
+ Node* elements_map = LoadMap(elements);
+ GotoIf(WordNotEqual(elements_map, LoadRoot(Heap::kFixedArrayMapRootIndex)),
+ &check_double_elements);
+
+ // FixedArray backing store -> Smi or object elements.
+ {
+ Node* offset = ElementOffsetFromIndex(intptr_index, FAST_ELEMENTS,
+ INTPTR_PARAMETERS, kHeaderSize);
+ // Check if we're about to overwrite the hole. We can safely do that
+ // only if there can be no setters on the prototype chain.
+ // If we know that we're storing beyond the previous array length, we
+ // can skip the hole check (and always assume the hole).
+ {
+ Label hole_check_passed(this);
+ if (update_length == kDontChangeLength) {
+ Node* element = Load(MachineType::AnyTagged(), elements, offset);
+ GotoIf(WordNotEqual(element, TheHoleConstant()), &hole_check_passed);
+ }
+ BranchIfPrototypesHaveNonFastElements(receiver_map, slow,
+ &hole_check_passed);
+ Bind(&hole_check_passed);
+ }
+
+ // Check if the value we're storing matches the elements_kind. Smis
+ // can always be stored.
+ {
+ Label non_smi_value(this);
+ GotoUnless(TaggedIsSmi(value), &non_smi_value);
+ // If we're about to introduce holes, ensure holey elements.
+ if (update_length == kBumpLengthWithGap) {
+ TryChangeToHoleyMapMulti(receiver, receiver_map, elements_kind, context,
+ FAST_SMI_ELEMENTS, FAST_ELEMENTS, slow);
+ }
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset,
+ value);
+ MaybeUpdateLengthAndReturn(receiver, intptr_index, value, update_length);
+
+ Bind(&non_smi_value);
+ }
+
+ // Check if we already have object elements; just do the store if so.
+ {
+ Label must_transition(this);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ GotoIf(Int32LessThanOrEqual(elements_kind,
+ Int32Constant(FAST_HOLEY_SMI_ELEMENTS)),
+ &must_transition);
+ if (update_length == kBumpLengthWithGap) {
+ TryChangeToHoleyMap(receiver, receiver_map, elements_kind, context,
+ FAST_ELEMENTS, slow);
+ }
+ Store(MachineRepresentation::kTagged, elements, offset, value);
+ MaybeUpdateLengthAndReturn(receiver, intptr_index, value, update_length);
+
+ Bind(&must_transition);
+ }
+
+ // Transition to the required ElementsKind.
+ {
+ Label transition_to_double(this), transition_to_object(this);
+ Node* native_context = LoadNativeContext(context);
+ Branch(WordEqual(LoadMap(value), LoadRoot(Heap::kHeapNumberMapRootIndex)),
+ &transition_to_double, &transition_to_object);
+ Bind(&transition_to_double);
+ {
+ // If we're adding holes at the end, always transition to a holey
+ // elements kind, otherwise try to remain packed.
+ ElementsKind target_kind = update_length == kBumpLengthWithGap
+ ? FAST_HOLEY_DOUBLE_ELEMENTS
+ : FAST_DOUBLE_ELEMENTS;
+ TryRewriteElements(receiver, receiver_map, elements, native_context,
+ FAST_SMI_ELEMENTS, target_kind, slow);
+ // Reload migrated elements.
+ Node* double_elements = LoadElements(receiver);
+ Node* double_offset = ElementOffsetFromIndex(
+ intptr_index, FAST_DOUBLE_ELEMENTS, INTPTR_PARAMETERS, kHeaderSize);
+ // Make sure we do not store signalling NaNs into double arrays.
+ Node* double_value = Float64SilenceNaN(LoadHeapNumberValue(value));
+ StoreNoWriteBarrier(MachineRepresentation::kFloat64, double_elements,
+ double_offset, double_value);
+ MaybeUpdateLengthAndReturn(receiver, intptr_index, value,
+ update_length);
+ }
+
+ Bind(&transition_to_object);
+ {
+ // If we're adding holes at the end, always transition to a holey
+ // elements kind, otherwise try to remain packed.
+ ElementsKind target_kind = update_length == kBumpLengthWithGap
+ ? FAST_HOLEY_ELEMENTS
+ : FAST_ELEMENTS;
+ TryRewriteElements(receiver, receiver_map, elements, native_context,
+ FAST_SMI_ELEMENTS, target_kind, slow);
+ // The elements backing store didn't change, no reload necessary.
+ CSA_ASSERT(this, WordEqual(elements, LoadElements(receiver)));
+ Store(MachineRepresentation::kTagged, elements, offset, value);
+ MaybeUpdateLengthAndReturn(receiver, intptr_index, value,
+ update_length);
+ }
+ }
+ }
+
+ Bind(&check_double_elements);
+ Node* fixed_double_array_map = LoadRoot(Heap::kFixedDoubleArrayMapRootIndex);
+ GotoIf(WordNotEqual(elements_map, fixed_double_array_map),
+ &check_cow_elements);
+ // FixedDoubleArray backing store -> double elements.
+ {
+ Node* offset = ElementOffsetFromIndex(intptr_index, FAST_DOUBLE_ELEMENTS,
+ INTPTR_PARAMETERS, kHeaderSize);
+ // Check if we're about to overwrite the hole. We can safely do that
+ // only if there can be no setters on the prototype chain.
+ {
+ Label hole_check_passed(this);
+ // If we know that we're storing beyond the previous array length, we
+ // can skip the hole check (and always assume the hole).
+ if (update_length == kDontChangeLength) {
+ Label found_hole(this);
+ LoadDoubleWithHoleCheck(elements, offset, &found_hole,
+ MachineType::None());
+ Goto(&hole_check_passed);
+ Bind(&found_hole);
+ }
+ BranchIfPrototypesHaveNonFastElements(receiver_map, slow,
+ &hole_check_passed);
+ Bind(&hole_check_passed);
+ }
+
+ // Try to store the value as a double.
+ {
+ Label non_number_value(this);
+ Node* double_value = PrepareValueForWrite(value, Representation::Double(),
+ &non_number_value);
+ // Make sure we do not store signalling NaNs into double arrays.
+ double_value = Float64SilenceNaN(double_value);
+ // If we're about to introduce holes, ensure holey elements.
+ if (update_length == kBumpLengthWithGap) {
+ TryChangeToHoleyMap(receiver, receiver_map, elements_kind, context,
+ FAST_DOUBLE_ELEMENTS, slow);
+ }
+ StoreNoWriteBarrier(MachineRepresentation::kFloat64, elements, offset,
+ double_value);
+ MaybeUpdateLengthAndReturn(receiver, intptr_index, value, update_length);
+
+ Bind(&non_number_value);
+ }
+
+ // Transition to object elements.
+ {
+ Node* native_context = LoadNativeContext(context);
+ ElementsKind target_kind = update_length == kBumpLengthWithGap
+ ? FAST_HOLEY_ELEMENTS
+ : FAST_ELEMENTS;
+ TryRewriteElements(receiver, receiver_map, elements, native_context,
+ FAST_DOUBLE_ELEMENTS, target_kind, slow);
+ // Reload migrated elements.
+ Node* fast_elements = LoadElements(receiver);
+ Node* fast_offset = ElementOffsetFromIndex(
+ intptr_index, FAST_ELEMENTS, INTPTR_PARAMETERS, kHeaderSize);
+ Store(MachineRepresentation::kTagged, fast_elements, fast_offset, value);
+ MaybeUpdateLengthAndReturn(receiver, intptr_index, value, update_length);
+ }
+ }
+
+ Bind(&check_cow_elements);
+ {
+ // TODO(jkummerow): Use GrowElementsCapacity instead of bailing out.
+ Goto(slow);
+ }
+}
+
+void KeyedStoreGenericAssembler::EmitGenericElementStore(
+ Node* receiver, Node* receiver_map, Node* instance_type, Node* intptr_index,
+ Node* value, Node* context, Label* slow) {
+ Label if_in_bounds(this), if_increment_length_by_one(this),
+ if_bump_length_with_gap(this), if_grow(this), if_nonfast(this),
+ if_typed_array(this), if_dictionary(this);
+ Node* elements = LoadElements(receiver);
+ Node* elements_kind = LoadMapElementsKind(receiver_map);
+ GotoIf(
+ Int32GreaterThan(elements_kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)),
+ &if_nonfast);
+
+ Label if_array(this);
+ GotoIf(Word32Equal(instance_type, Int32Constant(JS_ARRAY_TYPE)), &if_array);
+ {
+ Node* capacity = SmiUntag(LoadFixedArrayBaseLength(elements));
+ Branch(UintPtrLessThan(intptr_index, capacity), &if_in_bounds, &if_grow);
+ }
+ Bind(&if_array);
+ {
+ Node* length = SmiUntag(LoadJSArrayLength(receiver));
+ GotoIf(UintPtrLessThan(intptr_index, length), &if_in_bounds);
+ Node* capacity = SmiUntag(LoadFixedArrayBaseLength(elements));
+ GotoIf(UintPtrGreaterThanOrEqual(intptr_index, capacity), &if_grow);
+ Branch(WordEqual(intptr_index, length), &if_increment_length_by_one,
+ &if_bump_length_with_gap);
+ }
+
+ Bind(&if_in_bounds);
+ {
+ StoreElementWithCapacity(receiver, receiver_map, elements, elements_kind,
+ intptr_index, value, context, slow,
+ kDontChangeLength);
+ }
+
+ Bind(&if_increment_length_by_one);
+ {
+ StoreElementWithCapacity(receiver, receiver_map, elements, elements_kind,
+ intptr_index, value, context, slow,
+ kIncrementLengthByOne);
+ }
+
+ Bind(&if_bump_length_with_gap);
+ {
+ StoreElementWithCapacity(receiver, receiver_map, elements, elements_kind,
+ intptr_index, value, context, slow,
+ kBumpLengthWithGap);
+ }
+
+ // Out-of-capacity accesses (index >= capacity) jump here. Additionally,
+ // an ElementsKind transition might be necessary.
+ Bind(&if_grow);
+ {
+ Comment("Grow backing store");
+ // TODO(jkummerow): Support inline backing store growth.
+ Goto(slow);
+ }
+
+ // Any ElementsKind > LAST_FAST_ELEMENTS_KIND jumps here for further dispatch.
+ Bind(&if_nonfast);
+ {
+ STATIC_ASSERT(LAST_ELEMENTS_KIND == LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
+ GotoIf(Int32GreaterThanOrEqual(
+ elements_kind,
+ Int32Constant(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND)),
+ &if_typed_array);
+ GotoIf(Word32Equal(elements_kind, Int32Constant(DICTIONARY_ELEMENTS)),
+ &if_dictionary);
+ Goto(slow);
+ }
+
+ Bind(&if_dictionary);
+ {
+ Comment("Dictionary");
+ // TODO(jkummerow): Support storing to dictionary elements.
+ Goto(slow);
+ }
+
+ Bind(&if_typed_array);
+ {
+ Comment("Typed array");
+ // TODO(jkummerow): Support typed arrays.
+ Goto(slow);
+ }
+}
+
+void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
+ Node* receiver, Node* receiver_map, const StoreICParameters* p,
+ Label* slow) {
+ Comment("stub cache probe");
+ // TODO(jkummerow): Don't rely on the stub cache as much.
+ // - existing properties can be overwritten inline (unless readonly).
+ // - for dictionary mode receivers, we can even add properties inline
+ // (unless the prototype chain prevents it).
+ Variable var_handler(this, MachineRepresentation::kTagged);
+ Label found_handler(this, &var_handler), stub_cache_miss(this);
+ TryProbeStubCache(isolate()->store_stub_cache(), receiver, p->name,
+ &found_handler, &var_handler, &stub_cache_miss);
+ Bind(&found_handler);
+ {
+ Comment("KeyedStoreGeneric found handler");
+ HandleStoreICHandlerCase(p, var_handler.value(), slow);
+ }
+ Bind(&stub_cache_miss);
+ {
+ Comment("KeyedStoreGeneric_miss");
+ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, p->context, p->value, p->slot,
+ p->vector, p->receiver, p->name);
+ }
+}
+
+void KeyedStoreGenericAssembler::KeyedStoreGeneric(const StoreICParameters* p,
+ LanguageMode language_mode) {
+ Variable var_index(this, MachineType::PointerRepresentation());
+ Label if_index(this), if_unique_name(this), slow(this);
+
+ Node* receiver = p->receiver;
+ GotoIf(TaggedIsSmi(receiver), &slow);
+ Node* receiver_map = LoadMap(receiver);
+ Node* instance_type = LoadMapInstanceType(receiver_map);
+ // Receivers requiring non-standard element accesses (interceptors, access
+ // checks, strings and string wrappers, proxies) are handled in the runtime.
+ GotoIf(Int32LessThanOrEqual(instance_type,
+ Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)),
+ &slow);
+
+ TryToName(p->name, &if_index, &var_index, &if_unique_name, &slow);
+
+ Bind(&if_index);
+ {
+ Comment("integer index");
+ EmitGenericElementStore(receiver, receiver_map, instance_type,
+ var_index.value(), p->value, p->context, &slow);
+ }
+
+ Bind(&if_unique_name);
+ {
+ Comment("key is unique name");
+ EmitGenericPropertyStore(receiver, receiver_map, p, &slow);
+ }
+
+ Bind(&slow);
+ {
+ Comment("KeyedStoreGeneric_slow");
+ TailCallRuntime(Runtime::kSetProperty, p->context, p->receiver, p->name,
+ p->value, SmiConstant(language_mode));
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/ic/keyed-store-generic.h b/deps/v8/src/ic/keyed-store-generic.h
new file mode 100644
index 0000000000..daeb61fe68
--- /dev/null
+++ b/deps/v8/src/ic/keyed-store-generic.h
@@ -0,0 +1,23 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SRC_IC_KEYED_STORE_GENERIC_H_
+#define V8_SRC_IC_KEYED_STORE_GENERIC_H_
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+class KeyedStoreGenericGenerator {
+ public:
+ static void Generate(CodeStubAssembler* assembler,
+ const CodeStubAssembler::StoreICParameters* p,
+ LanguageMode language_mode);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SRC_IC_KEYED_STORE_GENERIC_H_
diff --git a/deps/v8/src/ic/mips/access-compiler-mips.cc b/deps/v8/src/ic/mips/access-compiler-mips.cc
index 2aa0283485..1c97ca3cad 100644
--- a/deps/v8/src/ic/mips/access-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/access-compiler-mips.cc
@@ -17,24 +17,22 @@ void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
__ Jump(code, RelocInfo::CODE_TARGET);
}
-
-Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
+void PropertyAccessCompiler::InitializePlatformSpecific(
+ AccessCompilerData* data) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- static Register registers[] = {receiver, name, a3, a0, t0};
- return registers;
-}
+ // Load calling convention.
+ // receiver, name, scratch1, scratch2, scratch3.
+ Register load_registers[] = {receiver, name, a3, a0, t0};
-Register* PropertyAccessCompiler::store_calling_convention() {
+ // Store calling convention.
// receiver, name, scratch1, scratch2.
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- static Register registers[] = {receiver, name, a3, t0};
- return registers;
-}
+ Register store_registers[] = {receiver, name, a3, t0};
+ data->Initialize(arraysize(load_registers), load_registers,
+ arraysize(store_registers), store_registers);
+}
#undef __
} // namespace internal
diff --git a/deps/v8/src/ic/mips/handler-compiler-mips.cc b/deps/v8/src/ic/mips/handler-compiler-mips.cc
index df7a0df175..b2ddea5dac 100644
--- a/deps/v8/src/ic/mips/handler-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/handler-compiler-mips.cc
@@ -393,10 +393,30 @@ void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
}
}
+void PropertyHandlerCompiler::GenerateAccessCheck(
+ Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
+ Label* miss, bool compare_native_contexts_only) {
+ Label done;
+ // Load current native context.
+ __ lw(scratch1, NativeContextMemOperand());
+ // Load expected native context.
+ __ LoadWeakValue(scratch2, native_context_cell, miss);
+
+ if (!compare_native_contexts_only) {
+ __ Branch(&done, eq, scratch1, Operand(scratch2));
+
+ // Compare security tokens of current and expected native contexts.
+ __ lw(scratch1, ContextMemOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
+ __ lw(scratch2, ContextMemOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
+ }
+ __ Branch(miss, ne, scratch1, Operand(scratch2));
+
+ __ bind(&done);
+}
Register PropertyHandlerCompiler::CheckPrototypes(
Register object_reg, Register holder_reg, Register scratch1,
- Register scratch2, Handle<Name> name, Label* miss, PrototypeCheckType check,
+ Register scratch2, Handle<Name> name, Label* miss,
ReturnHolder return_what) {
Handle<Map> receiver_map = map();
@@ -415,17 +435,6 @@ Register PropertyHandlerCompiler::CheckPrototypes(
Operand(Smi::FromInt(Map::kPrototypeChainValid)));
}
- // The prototype chain of primitives (and their JSValue wrappers) depends
- // on the native context, which can't be guarded by validity cells.
- // |object_reg| holds the native context specific prototype in this case;
- // we need to check its map.
- if (check == CHECK_ALL_MAPS) {
- __ lw(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
- __ GetWeakValue(scratch2, cell);
- __ Branch(miss, ne, scratch1, Operand(scratch2));
- }
-
// Keep track of the current object in register reg.
Register reg = object_reg;
int depth = 0;
@@ -435,46 +444,28 @@ Register PropertyHandlerCompiler::CheckPrototypes(
current = isolate()->global_object();
}
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- // This allows us to install generated handlers for accesses to the
- // global proxy (as opposed to using slow ICs). See corresponding code
- // in LookupForRead().
- if (receiver_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch2, miss);
- }
-
- Handle<JSObject> prototype = Handle<JSObject>::null();
- Handle<Map> current_map = receiver_map;
+ Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
+ isolate());
Handle<Map> holder_map(holder()->map());
// Traverse the prototype chain and check the maps in the prototype chain for
// fast and global objects or do negative lookup for normal objects.
while (!current_map.is_identical_to(holder_map)) {
++depth;
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
-
- prototype = handle(JSObject::cast(current_map->prototype()));
if (current_map->IsJSGlobalObjectMap()) {
GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
name, scratch2, miss);
} else if (current_map->is_dictionary_map()) {
DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- if (!name->IsUniqueName()) {
- DCHECK(name->IsString());
- name = factory()->InternalizeString(Handle<String>::cast(name));
- }
+ DCHECK(name->IsUniqueName());
DCHECK(current.is_null() ||
current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
if (depth > 1) {
- // TODO(jkummerow): Cache and re-use weak cell.
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
scratch2);
@@ -482,7 +473,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
reg = holder_reg; // From now on the object will be in holder_reg.
// Go to the next object in the prototype chain.
- current = prototype;
+ current = handle(JSObject::cast(current_map->prototype()));
current_map = handle(current->map());
}
@@ -493,7 +484,9 @@ Register PropertyHandlerCompiler::CheckPrototypes(
bool return_holder = return_what == RETURN_HOLDER;
if (return_holder && depth != 0) {
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
// Return the register containing the holder.
diff --git a/deps/v8/src/ic/mips/ic-mips.cc b/deps/v8/src/ic/mips/ic-mips.cc
index ce9e3d9403..561c9d331b 100644
--- a/deps/v8/src/ic/mips/ic-mips.cc
+++ b/deps/v8/src/ic/mips/ic-mips.cc
@@ -19,16 +19,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
- Label* global_object) {
- // Register usage:
- // type: holds the receiver instance type on entry.
- __ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
- __ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
-}
-
-
// Helper function used from LoadIC GenerateNormal.
//
// elements: Property dictionary. It is not clobbered if a jump to the miss
@@ -129,141 +119,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
kDontSaveFPRegs);
}
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver, Register map,
- Register scratch,
- int interceptor_bit, Label* slow) {
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, slow);
- // Get the map of the receiver.
- __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check bit field.
- __ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
- __ And(at, scratch,
- Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
- __ Branch(slow, ne, at, Operand(zero_reg));
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object,
- // we enter the runtime system to make sure that indexing into string
- // objects work as intended.
- DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE));
-}
-
-
-// Loads an indexed element from a fast case array.
-static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
- Register key, Register elements,
- Register scratch1, Register scratch2,
- Register result, Label* slow) {
- // Register use:
- //
- // receiver - holds the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the the same as 'receiver' or 'key'.
- // Unchanged on bailout so 'receiver' and 'key' can be safely
- // used by further computation.
- //
- // Scratch registers:
- //
- // elements - holds the elements of the receiver and its prototypes.
- //
- // scratch1 - used to hold elements length, bit fields, base addresses.
- //
- // scratch2 - used to hold maps, prototypes, and the loaded value.
- Label check_prototypes, check_next_prototype;
- Label done, in_bounds, absent;
-
- __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ AssertFastElements(elements);
-
- // Check that the key (index) is within bounds.
- __ lw(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Branch(&in_bounds, lo, key, Operand(scratch1));
- // Out-of-bounds. Check the prototype chain to see if we can just return
- // 'undefined'.
- // Negative keys can't take the fast OOB path.
- __ Branch(slow, lt, key, Operand(zero_reg));
- __ bind(&check_prototypes);
- __ lw(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ bind(&check_next_prototype);
- __ lw(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
- // scratch2: current prototype
- __ LoadRoot(at, Heap::kNullValueRootIndex);
- __ Branch(&absent, eq, scratch2, Operand(at));
- __ lw(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
- __ lw(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
- // elements: elements of current prototype
- // scratch2: map of current prototype
- __ lbu(scratch1, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
- __ Branch(slow, lo, scratch1, Operand(JS_OBJECT_TYPE));
- __ lbu(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
- __ And(at, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
- (1 << Map::kHasIndexedInterceptor)));
- __ Branch(slow, ne, at, Operand(zero_reg));
- __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
- __ Branch(slow, ne, elements, Operand(at));
- __ Branch(&check_next_prototype);
-
- __ bind(&absent);
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ Branch(&done);
-
- __ bind(&in_bounds);
- // Fast case: Do the load.
- __ Addu(scratch1, elements,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // The key is a smi.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ Lsa(at, scratch1, key, kPointerSizeLog2 - kSmiTagSize);
- __ lw(scratch2, MemOperand(at));
-
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- // In case the loaded value is the_hole we have to check the prototype chain.
- __ Branch(&check_prototypes, eq, scratch2, Operand(at));
- __ Move(result, scratch2);
- __ bind(&done);
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if a key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
- Register map, Register hash,
- Label* index_string, Label* not_unique) {
- // The key is not a smi.
- Label unique;
- // Is it a name?
- __ GetObjectType(key, map, hash);
- __ Branch(not_unique, hi, hash, Operand(LAST_UNIQUE_NAME_TYPE));
- STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
- __ Branch(&unique, eq, hash, Operand(LAST_UNIQUE_NAME_TYPE));
-
- // Is the string an array index, with cached numeric value?
- __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
- __ And(at, hash, Operand(Name::kContainsCachedArrayIndexMask));
- __ Branch(index_string, eq, at, Operand(zero_reg));
-
- // Is the string internalized? We know it's a string, so a single
- // bit test is enough.
- // map: key map
- __ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag == 0);
- __ And(at, hash, Operand(kIsNotInternalizedMask));
- __ Branch(not_unique, ne, at, Operand(zero_reg));
-
- __ bind(&unique);
-}
-
void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = a0;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
@@ -345,105 +200,6 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedGetProperty);
}
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // The return address is in ra.
- Label slow, check_name, index_smi, index_name, property_array_property;
- Label probe_dictionary, check_number_dictionary;
-
- Register key = LoadDescriptor::NameRegister();
- Register receiver = LoadDescriptor::ReceiverRegister();
- DCHECK(key.is(a2));
- DCHECK(receiver.is(a1));
-
- Isolate* isolate = masm->isolate();
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &check_name);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
- Map::kHasIndexedInterceptor, &slow);
-
- // Check the receiver's map to see if it has fast elements.
- __ CheckFastElements(a0, a3, &check_number_dictionary);
-
- GenerateFastArrayLoad(masm, receiver, key, a0, a3, t0, v0, &slow);
- __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, t0,
- a3);
- __ Ret();
-
- __ bind(&check_number_dictionary);
- __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ lw(a3, FieldMemOperand(t0, JSObject::kMapOffset));
-
- // Check whether the elements is a number dictionary.
- // a3: elements map
- // t0: elements
- __ LoadRoot(at, Heap::kHashTableMapRootIndex);
- __ Branch(&slow, ne, a3, Operand(at));
- __ sra(a0, key, kSmiTagSize);
- __ LoadFromNumberDictionary(&slow, t0, key, v0, a0, a3, t1);
- __ Ret();
-
- // Slow case, key and receiver still in a2 and a1.
- __ bind(&slow);
- __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, t0,
- a3);
- GenerateRuntimeGetProperty(masm);
-
- __ bind(&check_name);
- GenerateKeyNameCheck(masm, key, a0, a3, &index_name, &slow);
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
- Map::kHasNamedInterceptor, &slow);
-
-
- // If the receiver is a fast-case object, check the stub cache. Otherwise
- // probe the dictionary.
- __ lw(a3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ lw(t0, FieldMemOperand(a3, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHashTableMapRootIndex);
- __ Branch(&probe_dictionary, eq, t0, Operand(at));
-
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- Register slot = LoadWithVectorDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, t0, t1, t2, t5));
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot_index = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
- __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
- __ li(slot, Operand(Smi::FromInt(slot_index)));
-
- masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, t0, t1,
- t2, t5);
- // Cache miss.
- GenerateMiss(masm);
-
- // Do a quick inline probe of the receiver's dictionary, if it
- // exists.
- __ bind(&probe_dictionary);
- // a3: elements
- __ lw(a0, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, a0, &slow);
- // Load the property to v0.
- GenerateDictionaryLoad(masm, &slow, a3, key, v0, t1, t0);
- __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
- t0, a3);
- __ Ret();
-
- __ bind(&index_name);
- __ IndexFromHash(a3, key);
- // Now jump to the place where smi keys are handled.
- __ Branch(&index_smi);
-}
-
-
static void KeyedStoreGenerateMegamorphicHelper(
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
diff --git a/deps/v8/src/ic/mips64/access-compiler-mips64.cc b/deps/v8/src/ic/mips64/access-compiler-mips64.cc
index bf6c73e86f..16d7a3d790 100644
--- a/deps/v8/src/ic/mips64/access-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/access-compiler-mips64.cc
@@ -17,24 +17,22 @@ void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
__ Jump(code, RelocInfo::CODE_TARGET);
}
-
-Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
+void PropertyAccessCompiler::InitializePlatformSpecific(
+ AccessCompilerData* data) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- static Register registers[] = {receiver, name, a3, a0, a4};
- return registers;
-}
+ // Load calling convention.
+ // receiver, name, scratch1, scratch2, scratch3.
+ Register load_registers[] = {receiver, name, a3, a0, a4};
-Register* PropertyAccessCompiler::store_calling_convention() {
+ // Store calling convention.
// receiver, name, scratch1, scratch2.
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- static Register registers[] = {receiver, name, a3, a4};
- return registers;
-}
+ Register store_registers[] = {receiver, name, a3, a4};
+ data->Initialize(arraysize(load_registers), load_registers,
+ arraysize(store_registers), store_registers);
+}
#undef __
} // namespace internal
diff --git a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
index 2190f6d63e..249f8fedb3 100644
--- a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
@@ -393,10 +393,30 @@ void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
}
}
+void PropertyHandlerCompiler::GenerateAccessCheck(
+ Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
+ Label* miss, bool compare_native_contexts_only) {
+ Label done;
+ // Load current native context.
+ __ ld(scratch1, NativeContextMemOperand());
+ // Load expected native context.
+ __ LoadWeakValue(scratch2, native_context_cell, miss);
+
+ if (!compare_native_contexts_only) {
+ __ Branch(&done, eq, scratch1, Operand(scratch2));
+
+ // Compare security tokens of current and expected native contexts.
+ __ ld(scratch1, ContextMemOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
+ __ ld(scratch2, ContextMemOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
+ }
+ __ Branch(miss, ne, scratch1, Operand(scratch2));
+
+ __ bind(&done);
+}
Register PropertyHandlerCompiler::CheckPrototypes(
Register object_reg, Register holder_reg, Register scratch1,
- Register scratch2, Handle<Name> name, Label* miss, PrototypeCheckType check,
+ Register scratch2, Handle<Name> name, Label* miss,
ReturnHolder return_what) {
Handle<Map> receiver_map = map();
@@ -415,17 +435,6 @@ Register PropertyHandlerCompiler::CheckPrototypes(
Operand(Smi::FromInt(Map::kPrototypeChainValid)));
}
- // The prototype chain of primitives (and their JSValue wrappers) depends
- // on the native context, which can't be guarded by validity cells.
- // |object_reg| holds the native context specific prototype in this case;
- // we need to check its map.
- if (check == CHECK_ALL_MAPS) {
- __ ld(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
- __ GetWeakValue(scratch2, cell);
- __ Branch(miss, ne, scratch1, Operand(scratch2));
- }
-
// Keep track of the current object in register reg.
Register reg = object_reg;
int depth = 0;
@@ -435,46 +444,28 @@ Register PropertyHandlerCompiler::CheckPrototypes(
current = isolate()->global_object();
}
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- // This allows us to install generated handlers for accesses to the
- // global proxy (as opposed to using slow ICs). See corresponding code
- // in LookupForRead().
- if (receiver_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch2, miss);
- }
-
- Handle<JSObject> prototype = Handle<JSObject>::null();
- Handle<Map> current_map = receiver_map;
+ Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
+ isolate());
Handle<Map> holder_map(holder()->map());
// Traverse the prototype chain and check the maps in the prototype chain for
// fast and global objects or do negative lookup for normal objects.
while (!current_map.is_identical_to(holder_map)) {
++depth;
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
-
- prototype = handle(JSObject::cast(current_map->prototype()));
if (current_map->IsJSGlobalObjectMap()) {
GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
name, scratch2, miss);
} else if (current_map->is_dictionary_map()) {
DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- if (!name->IsUniqueName()) {
- DCHECK(name->IsString());
- name = factory()->InternalizeString(Handle<String>::cast(name));
- }
+ DCHECK(name->IsUniqueName());
DCHECK(current.is_null() ||
current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
if (depth > 1) {
- // TODO(jkummerow): Cache and re-use weak cell.
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
scratch2);
@@ -482,7 +473,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
reg = holder_reg; // From now on the object will be in holder_reg.
// Go to the next object in the prototype chain.
- current = prototype;
+ current = handle(JSObject::cast(current_map->prototype()));
current_map = handle(current->map());
}
@@ -493,7 +484,9 @@ Register PropertyHandlerCompiler::CheckPrototypes(
bool return_holder = return_what == RETURN_HOLDER;
if (return_holder && depth != 0) {
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
// Return the register containing the holder.
diff --git a/deps/v8/src/ic/mips64/ic-mips64.cc b/deps/v8/src/ic/mips64/ic-mips64.cc
index c2f3cb6024..57efa350c8 100644
--- a/deps/v8/src/ic/mips64/ic-mips64.cc
+++ b/deps/v8/src/ic/mips64/ic-mips64.cc
@@ -19,16 +19,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
- Label* global_object) {
- // Register usage:
- // type: holds the receiver instance type on entry.
- __ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
- __ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
-}
-
-
// Helper function used from LoadIC GenerateNormal.
//
// elements: Property dictionary. It is not clobbered if a jump to the miss
@@ -128,142 +118,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
kDontSaveFPRegs);
}
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver, Register map,
- Register scratch,
- int interceptor_bit, Label* slow) {
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, slow);
- // Get the map of the receiver.
- __ ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check bit field.
- __ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
- __ And(at, scratch,
- Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
- __ Branch(slow, ne, at, Operand(zero_reg));
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object,
- // we enter the runtime system to make sure that indexing into string
- // objects work as intended.
- DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE));
-}
-
-
-// Loads an indexed element from a fast case array.
-static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
- Register key, Register elements,
- Register scratch1, Register scratch2,
- Register result, Label* slow) {
- // Register use:
- //
- // receiver - holds the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the the same as 'receiver' or 'key'.
- // Unchanged on bailout so 'receiver' and 'key' can be safely
- // used by further computation.
- //
- // Scratch registers:
- //
- // elements - holds the elements of the receiver and its prototypes.
- //
- // scratch1 - used to hold elements length, bit fields, base addresses.
- //
- // scratch2 - used to hold maps, prototypes, and the loaded value.
- Label check_prototypes, check_next_prototype;
- Label done, in_bounds, absent;
-
- __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ AssertFastElements(elements);
-
- // Check that the key (index) is within bounds.
- __ ld(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Branch(&in_bounds, lo, key, Operand(scratch1));
- // Out-of-bounds. Check the prototype chain to see if we can just return
- // 'undefined'.
- // Negative keys can't take the fast OOB path.
- __ Branch(slow, lt, key, Operand(zero_reg));
- __ bind(&check_prototypes);
- __ ld(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ bind(&check_next_prototype);
- __ ld(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
- // scratch2: current prototype
- __ LoadRoot(at, Heap::kNullValueRootIndex);
- __ Branch(&absent, eq, scratch2, Operand(at));
- __ ld(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
- __ ld(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
- // elements: elements of current prototype
- // scratch2: map of current prototype
- __ lbu(scratch1, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
- __ Branch(slow, lo, scratch1, Operand(JS_OBJECT_TYPE));
- __ lbu(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
- __ And(at, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
- (1 << Map::kHasIndexedInterceptor)));
- __ Branch(slow, ne, at, Operand(zero_reg));
- __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
- __ Branch(slow, ne, elements, Operand(at));
- __ Branch(&check_next_prototype);
-
- __ bind(&absent);
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ Branch(&done);
-
- __ bind(&in_bounds);
- // Fast case: Do the load.
- __ Daddu(scratch1, elements,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // The key is a smi.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ SmiScale(at, key, kPointerSizeLog2);
- __ daddu(at, at, scratch1);
- __ ld(scratch2, MemOperand(at));
-
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- // In case the loaded value is the_hole we have to check the prototype chain.
- __ Branch(&check_prototypes, eq, scratch2, Operand(at));
- __ Move(result, scratch2);
- __ bind(&done);
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if a key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
- Register map, Register hash,
- Label* index_string, Label* not_unique) {
- // The key is not a smi.
- Label unique;
- // Is it a name?
- __ GetObjectType(key, map, hash);
- __ Branch(not_unique, hi, hash, Operand(LAST_UNIQUE_NAME_TYPE));
- STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
- __ Branch(&unique, eq, hash, Operand(LAST_UNIQUE_NAME_TYPE));
-
- // Is the string an array index, with cached numeric value?
- __ lwu(hash, FieldMemOperand(key, Name::kHashFieldOffset));
- __ And(at, hash, Operand(Name::kContainsCachedArrayIndexMask));
- __ Branch(index_string, eq, at, Operand(zero_reg));
-
- // Is the string internalized? We know it's a string, so a single
- // bit test is enough.
- // map: key map
- __ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag == 0);
- __ And(at, hash, Operand(kIsNotInternalizedMask));
- __ Branch(not_unique, ne, at, Operand(zero_reg));
-
- __ bind(&unique);
-}
-
void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = a0;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
@@ -344,105 +198,6 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedGetProperty);
}
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // The return address is in ra.
- Label slow, check_name, index_smi, index_name, property_array_property;
- Label probe_dictionary, check_number_dictionary;
-
- Register key = LoadDescriptor::NameRegister();
- Register receiver = LoadDescriptor::ReceiverRegister();
- DCHECK(key.is(a2));
- DCHECK(receiver.is(a1));
-
- Isolate* isolate = masm->isolate();
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &check_name);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
- Map::kHasIndexedInterceptor, &slow);
-
- // Check the receiver's map to see if it has fast elements.
- __ CheckFastElements(a0, a3, &check_number_dictionary);
-
- GenerateFastArrayLoad(masm, receiver, key, a0, a3, a4, v0, &slow);
- __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, a4,
- a3);
- __ Ret();
-
- __ bind(&check_number_dictionary);
- __ ld(a4, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ ld(a3, FieldMemOperand(a4, JSObject::kMapOffset));
-
- // Check whether the elements is a number dictionary.
- // a3: elements map
- // a4: elements
- __ LoadRoot(at, Heap::kHashTableMapRootIndex);
- __ Branch(&slow, ne, a3, Operand(at));
- __ dsra32(a0, key, 0);
- __ LoadFromNumberDictionary(&slow, a4, key, v0, a0, a3, a5);
- __ Ret();
-
- // Slow case, key and receiver still in a2 and a1.
- __ bind(&slow);
- __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, a4,
- a3);
- GenerateRuntimeGetProperty(masm);
-
- __ bind(&check_name);
- GenerateKeyNameCheck(masm, key, a0, a3, &index_name, &slow);
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
- Map::kHasNamedInterceptor, &slow);
-
-
- // If the receiver is a fast-case object, check the stub cache. Otherwise
- // probe the dictionary.
- __ ld(a3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ ld(a4, FieldMemOperand(a3, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHashTableMapRootIndex);
- __ Branch(&probe_dictionary, eq, a4, Operand(at));
-
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- Register slot = LoadWithVectorDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, a4, a5, a6, t1));
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot_index = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
- __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
- __ li(slot, Operand(Smi::FromInt(slot_index)));
-
- masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, a4, a5,
- a6, t1);
- // Cache miss.
- GenerateMiss(masm);
-
- // Do a quick inline probe of the receiver's dictionary, if it
- // exists.
- __ bind(&probe_dictionary);
- // a3: elements
- __ ld(a0, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, a0, &slow);
- // Load the property to v0.
- GenerateDictionaryLoad(masm, &slow, a3, key, v0, a5, a4);
- __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
- a4, a3);
- __ Ret();
-
- __ bind(&index_name);
- __ IndexFromHash(a3, key);
- // Now jump to the place where smi keys are handled.
- __ Branch(&index_smi);
-}
-
-
static void KeyedStoreGenerateMegamorphicHelper(
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
diff --git a/deps/v8/src/ic/ppc/access-compiler-ppc.cc b/deps/v8/src/ic/ppc/access-compiler-ppc.cc
index 6143b4ce47..f78ef57e74 100644
--- a/deps/v8/src/ic/ppc/access-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/access-compiler-ppc.cc
@@ -17,24 +17,22 @@ void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
__ Jump(code, RelocInfo::CODE_TARGET);
}
-
-Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
+void PropertyAccessCompiler::InitializePlatformSpecific(
+ AccessCompilerData* data) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- static Register registers[] = {receiver, name, r6, r3, r7};
- return registers;
-}
+ // Load calling convention.
+ // receiver, name, scratch1, scratch2, scratch3.
+ Register load_registers[] = {receiver, name, r6, r3, r7};
-Register* PropertyAccessCompiler::store_calling_convention() {
+ // Store calling convention.
// receiver, name, scratch1, scratch2.
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- static Register registers[] = {receiver, name, r6, r7};
- return registers;
-}
+ Register store_registers[] = {receiver, name, r6, r7};
+ data->Initialize(arraysize(load_registers), load_registers,
+ arraysize(store_registers), store_registers);
+}
#undef __
} // namespace internal
diff --git a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
index aafdc77c9b..e0caaa6a1f 100644
--- a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
@@ -402,10 +402,34 @@ void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
}
}
+void PropertyHandlerCompiler::GenerateAccessCheck(
+ Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
+ Label* miss, bool compare_native_contexts_only) {
+ Label done;
+ // Load current native context.
+ __ LoadP(scratch1, NativeContextMemOperand());
+ // Load expected native context.
+ __ LoadWeakValue(scratch2, native_context_cell, miss);
+ __ cmp(scratch1, scratch2);
+
+ if (!compare_native_contexts_only) {
+ __ beq(&done);
+
+ // Compare security tokens of current and expected native contexts.
+ __ LoadP(scratch1,
+ ContextMemOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
+ __ LoadP(scratch2,
+ ContextMemOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
+ __ cmp(scratch1, scratch2);
+ }
+ __ bne(miss);
+
+ __ bind(&done);
+}
Register PropertyHandlerCompiler::CheckPrototypes(
Register object_reg, Register holder_reg, Register scratch1,
- Register scratch2, Handle<Name> name, Label* miss, PrototypeCheckType check,
+ Register scratch2, Handle<Name> name, Label* miss,
ReturnHolder return_what) {
Handle<Map> receiver_map = map();
@@ -424,17 +448,6 @@ Register PropertyHandlerCompiler::CheckPrototypes(
__ bne(miss);
}
- // The prototype chain of primitives (and their JSValue wrappers) depends
- // on the native context, which can't be guarded by validity cells.
- // |object_reg| holds the native context specific prototype in this case;
- // we need to check its map.
- if (check == CHECK_ALL_MAPS) {
- __ LoadP(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
- __ CmpWeakValue(scratch1, cell, scratch2);
- __ b(ne, miss);
- }
-
// Keep track of the current object in register reg.
Register reg = object_reg;
int depth = 0;
@@ -443,18 +456,9 @@ Register PropertyHandlerCompiler::CheckPrototypes(
if (receiver_map->IsJSGlobalObjectMap()) {
current = isolate()->global_object();
}
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- // This allows us to install generated handlers for accesses to the
- // global proxy (as opposed to using slow ICs). See corresponding code
- // in LookupForRead().
- if (receiver_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch2, miss);
- }
- Handle<JSObject> prototype = Handle<JSObject>::null();
- Handle<Map> current_map = receiver_map;
+ Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
+ isolate());
Handle<Map> holder_map(holder()->map());
// Traverse the prototype chain and check the maps in the prototype chain for
// fast and global objects or do negative lookup for normal objects.
@@ -466,23 +470,20 @@ Register PropertyHandlerCompiler::CheckPrototypes(
DCHECK(current_map->IsJSGlobalProxyMap() ||
!current_map->is_access_check_needed());
- prototype = handle(JSObject::cast(current_map->prototype()));
if (current_map->IsJSGlobalObjectMap()) {
GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
name, scratch2, miss);
} else if (current_map->is_dictionary_map()) {
DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- if (!name->IsUniqueName()) {
- DCHECK(name->IsString());
- name = factory()->InternalizeString(Handle<String>::cast(name));
- }
+ DCHECK(name->IsUniqueName());
DCHECK(current.is_null() ||
current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
if (depth > 1) {
- // TODO(jkummerow): Cache and re-use weak cell.
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
scratch2);
@@ -490,7 +491,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
reg = holder_reg; // From now on the object will be in holder_reg.
// Go to the next object in the prototype chain.
- current = prototype;
+ current = handle(JSObject::cast(current_map->prototype()));
current_map = handle(current->map());
}
@@ -501,7 +502,9 @@ Register PropertyHandlerCompiler::CheckPrototypes(
bool return_holder = return_what == RETURN_HOLDER;
if (return_holder && depth != 0) {
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
// Return the register containing the holder.
diff --git a/deps/v8/src/ic/ppc/ic-ppc.cc b/deps/v8/src/ic/ppc/ic-ppc.cc
index 6dd788146b..359a6a42dd 100644
--- a/deps/v8/src/ic/ppc/ic-ppc.cc
+++ b/deps/v8/src/ic/ppc/ic-ppc.cc
@@ -19,18 +19,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
- Label* global_object) {
- // Register usage:
- // type: holds the receiver instance type on entry.
- __ cmpi(type, Operand(JS_GLOBAL_OBJECT_TYPE));
- __ beq(global_object);
- __ cmpi(type, Operand(JS_GLOBAL_PROXY_TYPE));
- __ beq(global_object);
-}
-
-
// Helper function used from LoadIC GenerateNormal.
//
// elements: Property dictionary. It is not clobbered if a jump to the miss
@@ -131,143 +119,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
kDontSaveFPRegs);
}
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver, Register map,
- Register scratch,
- int interceptor_bit, Label* slow) {
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, slow);
- // Get the map of the receiver.
- __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check bit field.
- __ lbz(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
- DCHECK(((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)) < 0x8000);
- __ andi(r0, scratch,
- Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
- __ bne(slow, cr0);
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object,
- // we enter the runtime system to make sure that indexing into string
- // objects work as intended.
- DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ lbz(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ cmpi(scratch, Operand(JS_OBJECT_TYPE));
- __ blt(slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
- Register key, Register elements,
- Register scratch1, Register scratch2,
- Register result, Label* slow) {
- // Register use:
- //
- // receiver - holds the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the the same as 'receiver' or 'key'.
- // Unchanged on bailout so 'receiver' and 'key' can be safely
- // used by further computation.
- //
- // Scratch registers:
- //
- // elements - holds the elements of the receiver and its protoypes.
- //
- // scratch1 - used to hold elements length, bit fields, base addresses.
- //
- // scratch2 - used to hold maps, prototypes, and the loaded value.
- Label check_prototypes, check_next_prototype;
- Label done, in_bounds, absent;
-
- __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ AssertFastElements(elements);
-
- // Check that the key (index) is within bounds.
- __ LoadP(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ cmpl(key, scratch1);
- __ blt(&in_bounds);
- // Out-of-bounds. Check the prototype chain to see if we can just return
- // 'undefined'.
- __ cmpi(key, Operand::Zero());
- __ blt(slow); // Negative keys can't take the fast OOB path.
- __ bind(&check_prototypes);
- __ LoadP(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ bind(&check_next_prototype);
- __ LoadP(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
- // scratch2: current prototype
- __ CompareRoot(scratch2, Heap::kNullValueRootIndex);
- __ beq(&absent);
- __ LoadP(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
- __ LoadP(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
- // elements: elements of current prototype
- // scratch2: map of current prototype
- __ CompareInstanceType(scratch2, scratch1, JS_OBJECT_TYPE);
- __ blt(slow);
- __ lbz(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
- __ andi(r0, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
- (1 << Map::kHasIndexedInterceptor)));
- __ bne(slow, cr0);
- __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
- __ bne(slow);
- __ jmp(&check_next_prototype);
-
- __ bind(&absent);
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ jmp(&done);
-
- __ bind(&in_bounds);
- // Fast case: Do the load.
- __ addi(scratch1, elements,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // The key is a smi.
- __ SmiToPtrArrayOffset(scratch2, key);
- __ LoadPX(scratch2, MemOperand(scratch2, scratch1));
- __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
- // In case the loaded value is the_hole we have to check the prototype chain.
- __ beq(&check_prototypes);
- __ mr(result, scratch2);
- __ bind(&done);
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if a key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
- Register map, Register hash,
- Label* index_string, Label* not_unique) {
- // The key is not a smi.
- Label unique;
- // Is it a name?
- __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE);
- __ bgt(not_unique);
- STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
- __ beq(&unique);
-
- // Is the string an array index, with cached numeric value?
- __ lwz(hash, FieldMemOperand(key, Name::kHashFieldOffset));
- __ mov(r8, Operand(Name::kContainsCachedArrayIndexMask));
- __ and_(r0, hash, r8, SetRC);
- __ beq(index_string, cr0);
-
- // Is the string internalized? We know it's a string, so a single
- // bit test is enough.
- // map: key map
- __ lbz(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag == 0);
- __ andi(r0, hash, Operand(kIsNotInternalizedMask));
- __ bne(not_unique, cr0);
-
- __ bind(&unique);
-}
-
void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = r3;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
@@ -349,107 +200,6 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedGetProperty);
}
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // The return address is in lr.
- Label slow, check_name, index_smi, index_name, property_array_property;
- Label probe_dictionary, check_number_dictionary;
-
- Register key = LoadDescriptor::NameRegister();
- Register receiver = LoadDescriptor::ReceiverRegister();
- DCHECK(key.is(r5));
- DCHECK(receiver.is(r4));
-
- Isolate* isolate = masm->isolate();
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &check_name);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, r3, r6,
- Map::kHasIndexedInterceptor, &slow);
-
- // Check the receiver's map to see if it has fast elements.
- __ CheckFastElements(r3, r6, &check_number_dictionary);
-
- GenerateFastArrayLoad(masm, receiver, key, r3, r6, r7, r3, &slow);
- __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, r7,
- r6);
- __ Ret();
-
- __ bind(&check_number_dictionary);
- __ LoadP(r7, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ LoadP(r6, FieldMemOperand(r7, JSObject::kMapOffset));
-
- // Check whether the elements is a number dictionary.
- // r6: elements map
- // r7: elements
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r6, ip);
- __ bne(&slow);
- __ SmiUntag(r3, key);
- __ LoadFromNumberDictionary(&slow, r7, key, r3, r3, r6, r8);
- __ Ret();
-
- // Slow case, key and receiver still in r3 and r4.
- __ bind(&slow);
- __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, r7,
- r6);
- GenerateRuntimeGetProperty(masm);
-
- __ bind(&check_name);
- GenerateKeyNameCheck(masm, key, r3, r6, &index_name, &slow);
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, r3, r6,
- Map::kHasNamedInterceptor, &slow);
-
- // If the receiver is a fast-case object, check the stub cache. Otherwise
- // probe the dictionary.
- __ LoadP(r6, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ LoadP(r7, FieldMemOperand(r6, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r7, ip);
- __ beq(&probe_dictionary);
-
-
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- Register slot = LoadWithVectorDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, r7, r8, r9, r10));
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot_index = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
- __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
- __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
-
- masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, r7, r8,
- r9, r10);
- // Cache miss.
- GenerateMiss(masm);
-
- // Do a quick inline probe of the receiver's dictionary, if it
- // exists.
- __ bind(&probe_dictionary);
- // r6: elements
- __ LoadP(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, r3, &slow);
- // Load the property to r3.
- GenerateDictionaryLoad(masm, &slow, r6, key, r3, r8, r7);
- __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
- r7, r6);
- __ Ret();
-
- __ bind(&index_name);
- __ IndexFromHash(r6, key);
- // Now jump to the place where smi keys are handled.
- __ b(&index_smi);
-}
-
-
static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(StoreWithVectorDescriptor::ValueRegister(),
StoreWithVectorDescriptor::SlotRegister(),
diff --git a/deps/v8/src/ic/s390/access-compiler-s390.cc b/deps/v8/src/ic/s390/access-compiler-s390.cc
index 0a3285d5aa..ed8c089b9c 100644
--- a/deps/v8/src/ic/s390/access-compiler-s390.cc
+++ b/deps/v8/src/ic/s390/access-compiler-s390.cc
@@ -18,20 +18,21 @@ void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
__ Jump(code, RelocInfo::CODE_TARGET);
}
-Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
+void PropertyAccessCompiler::InitializePlatformSpecific(
+ AccessCompilerData* data) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- static Register registers[] = {receiver, name, r5, r2, r6};
- return registers;
-}
-Register* PropertyAccessCompiler::store_calling_convention() {
+ // Load calling convention.
+ // receiver, name, scratch1, scratch2, scratch3.
+ Register load_registers[] = {receiver, name, r5, r2, r6};
+
+ // Store calling convention.
// receiver, name, scratch1, scratch2.
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- static Register registers[] = {receiver, name, r5, r6};
- return registers;
+ Register store_registers[] = {receiver, name, r5, r6};
+
+ data->Initialize(arraysize(load_registers), load_registers,
+ arraysize(store_registers), store_registers);
}
#undef __
diff --git a/deps/v8/src/ic/s390/handler-compiler-s390.cc b/deps/v8/src/ic/s390/handler-compiler-s390.cc
index 504bacebaf..72658ec1d1 100644
--- a/deps/v8/src/ic/s390/handler-compiler-s390.cc
+++ b/deps/v8/src/ic/s390/handler-compiler-s390.cc
@@ -383,9 +383,34 @@ void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
}
}
+void PropertyHandlerCompiler::GenerateAccessCheck(
+ Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
+ Label* miss, bool compare_native_contexts_only) {
+ Label done;
+ // Load current native context.
+ __ LoadP(scratch1, NativeContextMemOperand());
+ // Load expected native context.
+ __ LoadWeakValue(scratch2, native_context_cell, miss);
+ __ CmpP(scratch1, scratch2);
+
+ if (!compare_native_contexts_only) {
+ __ beq(&done);
+
+ // Compare security tokens of current and expected native contexts.
+ __ LoadP(scratch1,
+ ContextMemOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
+ __ LoadP(scratch2,
+ ContextMemOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
+ __ CmpP(scratch1, scratch2);
+ }
+ __ bne(miss);
+
+ __ bind(&done);
+}
+
Register PropertyHandlerCompiler::CheckPrototypes(
Register object_reg, Register holder_reg, Register scratch1,
- Register scratch2, Handle<Name> name, Label* miss, PrototypeCheckType check,
+ Register scratch2, Handle<Name> name, Label* miss,
ReturnHolder return_what) {
Handle<Map> receiver_map = map();
@@ -404,17 +429,6 @@ Register PropertyHandlerCompiler::CheckPrototypes(
__ bne(miss);
}
- // The prototype chain of primitives (and their JSValue wrappers) depends
- // on the native context, which can't be guarded by validity cells.
- // |object_reg| holds the native context specific prototype in this case;
- // we need to check its map.
- if (check == CHECK_ALL_MAPS) {
- __ LoadP(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
- __ CmpWeakValue(scratch1, cell, scratch2);
- __ b(ne, miss);
- }
-
// Keep track of the current object in register reg.
Register reg = object_reg;
int depth = 0;
@@ -423,46 +437,29 @@ Register PropertyHandlerCompiler::CheckPrototypes(
if (receiver_map->IsJSGlobalObjectMap()) {
current = isolate()->global_object();
}
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- // This allows us to install generated handlers for accesses to the
- // global proxy (as opposed to using slow ICs). See corresponding code
- // in LookupForRead().
- if (receiver_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch2, miss);
- }
- Handle<JSObject> prototype = Handle<JSObject>::null();
- Handle<Map> current_map = receiver_map;
+ Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
+ isolate());
Handle<Map> holder_map(holder()->map());
// Traverse the prototype chain and check the maps in the prototype chain for
// fast and global objects or do negative lookup for normal objects.
while (!current_map.is_identical_to(holder_map)) {
++depth;
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
-
- prototype = handle(JSObject::cast(current_map->prototype()));
if (current_map->IsJSGlobalObjectMap()) {
GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
name, scratch2, miss);
} else if (current_map->is_dictionary_map()) {
DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- if (!name->IsUniqueName()) {
- DCHECK(name->IsString());
- name = factory()->InternalizeString(Handle<String>::cast(name));
- }
+ DCHECK(name->IsUniqueName());
DCHECK(current.is_null() ||
current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
if (depth > 1) {
- // TODO(jkummerow): Cache and re-use weak cell.
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
scratch2);
@@ -470,7 +467,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
reg = holder_reg; // From now on the object will be in holder_reg.
// Go to the next object in the prototype chain.
- current = prototype;
+ current = handle(JSObject::cast(current_map->prototype()));
current_map = handle(current->map());
}
@@ -481,7 +478,9 @@ Register PropertyHandlerCompiler::CheckPrototypes(
bool return_holder = return_what == RETURN_HOLDER;
if (return_holder && depth != 0) {
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
// Return the register containing the holder.
diff --git a/deps/v8/src/ic/s390/ic-s390.cc b/deps/v8/src/ic/s390/ic-s390.cc
index 08eb3e4ff1..bd83af1f59 100644
--- a/deps/v8/src/ic/s390/ic-s390.cc
+++ b/deps/v8/src/ic/s390/ic-s390.cc
@@ -18,16 +18,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
- Label* global_object) {
- // Register usage:
- // type: holds the receiver instance type on entry.
- __ CmpP(type, Operand(JS_GLOBAL_OBJECT_TYPE));
- __ beq(global_object);
- __ CmpP(type, Operand(JS_GLOBAL_PROXY_TYPE));
- __ beq(global_object);
-}
-
// Helper function used from LoadIC GenerateNormal.
//
// elements: Property dictionary. It is not clobbered if a jump to the miss
@@ -127,141 +117,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
kDontSaveFPRegs);
}
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver, Register map,
- Register scratch,
- int interceptor_bit, Label* slow) {
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, slow);
- // Get the map of the receiver.
- __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check bit field.
- __ LoadlB(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
- DCHECK(((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)) < 0x8000);
- __ mov(r0,
- Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
- __ AndP(r0, scratch);
- __ bne(slow /*, cr0*/);
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object,
- // we enter the runtime system to make sure that indexing into string
- // objects work as intended.
- DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ LoadlB(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ CmpP(scratch, Operand(JS_OBJECT_TYPE));
- __ blt(slow);
-}
-
-// Loads an indexed element from a fast case array.
-static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
- Register key, Register elements,
- Register scratch1, Register scratch2,
- Register result, Label* slow) {
- // Register use:
- //
- // receiver - holds the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the the same as 'receiver' or 'key'.
- // Unchanged on bailout so 'receiver' and 'key' can be safely
- // used by further computation.
- //
- // Scratch registers:
- //
- // elements - holds the elements of the receiver and its protoypes.
- //
- // scratch1 - used to hold elements length, bit fields, base addresses.
- //
- // scratch2 - used to hold maps, prototypes, and the loaded value.
- Label check_prototypes, check_next_prototype;
- Label done, in_bounds, absent;
-
- __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ AssertFastElements(elements);
-
- // Check that the key (index) is within bounds.
- __ LoadP(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ CmpLogicalP(key, scratch1);
- __ blt(&in_bounds, Label::kNear);
- // Out-of-bounds. Check the prototype chain to see if we can just return
- // 'undefined'.
- __ CmpP(key, Operand::Zero());
- __ blt(slow); // Negative keys can't take the fast OOB path.
- __ bind(&check_prototypes);
- __ LoadP(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ bind(&check_next_prototype);
- __ LoadP(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
- // scratch2: current prototype
- __ CompareRoot(scratch2, Heap::kNullValueRootIndex);
- __ beq(&absent, Label::kNear);
- __ LoadP(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
- __ LoadP(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
- // elements: elements of current prototype
- // scratch2: map of current prototype
- __ CompareInstanceType(scratch2, scratch1, JS_OBJECT_TYPE);
- __ blt(slow);
- __ LoadlB(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
- __ AndP(r0, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
- (1 << Map::kHasIndexedInterceptor)));
- __ bne(slow);
- __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
- __ bne(slow);
- __ jmp(&check_next_prototype);
-
- __ bind(&absent);
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ jmp(&done);
-
- __ bind(&in_bounds);
- // Fast case: Do the load.
- __ AddP(scratch1, elements,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // The key is a smi.
- __ SmiToPtrArrayOffset(scratch2, key);
- __ LoadP(scratch2, MemOperand(scratch2, scratch1));
- __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
- // In case the loaded value is the_hole we have to check the prototype chain.
- __ beq(&check_prototypes);
- __ LoadRR(result, scratch2);
- __ bind(&done);
-}
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if a key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
- Register map, Register hash,
- Label* index_string, Label* not_unique) {
- // The key is not a smi.
- Label unique;
- // Is it a name?
- __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE);
- __ bgt(not_unique);
- STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
- __ beq(&unique, Label::kNear);
-
- // Is the string an array index, with cached numeric value?
- __ LoadlW(hash, FieldMemOperand(key, Name::kHashFieldOffset));
- __ mov(r7, Operand(Name::kContainsCachedArrayIndexMask));
- __ AndP(r0, hash, r7);
- __ beq(index_string);
-
- // Is the string internalized? We know it's a string, so a single
- // bit test is enough.
- // map: key map
- __ LoadlB(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag == 0);
- __ tmll(hash, Operand(kIsNotInternalizedMask));
- __ bne(not_unique);
-
- __ bind(&unique);
-}
-
void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = r2;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
@@ -339,103 +194,6 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedGetProperty);
}
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // The return address is in lr.
- Label slow, check_name, index_smi, index_name, property_array_property;
- Label probe_dictionary, check_number_dictionary;
-
- Register key = LoadDescriptor::NameRegister();
- Register receiver = LoadDescriptor::ReceiverRegister();
- DCHECK(key.is(r4));
- DCHECK(receiver.is(r3));
-
- Isolate* isolate = masm->isolate();
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &check_name);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, r2, r5,
- Map::kHasIndexedInterceptor, &slow);
-
- // Check the receiver's map to see if it has fast elements.
- __ CheckFastElements(r2, r5, &check_number_dictionary);
-
- GenerateFastArrayLoad(masm, receiver, key, r2, r5, r6, r2, &slow);
- __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, r6,
- r5);
- __ Ret();
-
- __ bind(&check_number_dictionary);
- __ LoadP(r6, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ LoadP(r5, FieldMemOperand(r6, JSObject::kMapOffset));
-
- // Check whether the elements is a number dictionary.
- // r5: elements map
- // r6: elements
- __ CompareRoot(r5, Heap::kHashTableMapRootIndex);
- __ bne(&slow, Label::kNear);
- __ SmiUntag(r2, key);
- __ LoadFromNumberDictionary(&slow, r6, key, r2, r2, r5, r7);
- __ Ret();
-
- // Slow case, key and receiver still in r2 and r3.
- __ bind(&slow);
- __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, r6,
- r5);
- GenerateRuntimeGetProperty(masm);
-
- __ bind(&check_name);
- GenerateKeyNameCheck(masm, key, r2, r5, &index_name, &slow);
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, r2, r5,
- Map::kHasNamedInterceptor, &slow);
-
- // If the receiver is a fast-case object, check the stub cache. Otherwise
- // probe the dictionary.
- __ LoadP(r5, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ LoadP(r6, FieldMemOperand(r5, HeapObject::kMapOffset));
- __ CompareRoot(r6, Heap::kHashTableMapRootIndex);
- __ beq(&probe_dictionary);
-
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- Register slot = LoadWithVectorDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, r6, r7, r8, r9));
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot_index = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
- __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
- __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
-
- masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, r6, r7,
- r8, r9);
- // Cache miss.
- GenerateMiss(masm);
-
- // Do a quick inline probe of the receiver's dictionary, if it
- // exists.
- __ bind(&probe_dictionary);
- // r5: elements
- __ LoadP(r2, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ LoadlB(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, r2, &slow);
- // Load the property to r2.
- GenerateDictionaryLoad(masm, &slow, r5, key, r2, r7, r6);
- __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
- r6, r5);
- __ Ret();
-
- __ bind(&index_name);
- __ IndexFromHash(r5, key);
- // Now jump to the place where smi keys are handled.
- __ b(&index_smi);
-}
-
static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(StoreWithVectorDescriptor::ValueRegister(),
StoreWithVectorDescriptor::SlotRegister(),
diff --git a/deps/v8/src/ic/stub-cache.cc b/deps/v8/src/ic/stub-cache.cc
index fe1adaaadb..84dbf48436 100644
--- a/deps/v8/src/ic/stub-cache.cc
+++ b/deps/v8/src/ic/stub-cache.cc
@@ -6,13 +6,18 @@
#include "src/ast/ast.h"
#include "src/base/bits.h"
+#include "src/ic/ic-inl.h"
#include "src/type-info.h"
namespace v8 {
namespace internal {
StubCache::StubCache(Isolate* isolate, Code::Kind ic_kind)
- : isolate_(isolate), ic_kind_(ic_kind) {}
+ : isolate_(isolate), ic_kind_(ic_kind) {
+ // Ensure the nullptr (aka Smi::kZero) which StubCache::Get() returns
+ // when the entry is not found is not considered as a handler.
+ DCHECK(!IC::IsHandler(nullptr));
+}
void StubCache::Initialize() {
DCHECK(base::bits::IsPowerOfTwo32(kPrimaryTableSize));
@@ -24,18 +29,23 @@ void StubCache::Initialize() {
namespace {
bool CommonStubCacheChecks(StubCache* stub_cache, Name* name, Map* map,
- Code* code) {
- // Validate that the name does not move on scavenge, and that we
+ Object* handler) {
+ // Validate that the name and handler do not move on scavenge, and that we
// can use identity checks instead of structural equality checks.
DCHECK(!name->GetHeap()->InNewSpace(name));
+ DCHECK(!name->GetHeap()->InNewSpace(handler));
DCHECK(name->IsUniqueName());
DCHECK(name->HasHashCode());
- if (code) {
- Code::Flags expected_flags = Code::RemoveHolderFromFlags(
- Code::ComputeHandlerFlags(stub_cache->ic_kind()));
- Code::Flags flags = Code::RemoveHolderFromFlags(code->flags());
- DCHECK_EQ(expected_flags, flags);
- DCHECK_EQ(Code::HANDLER, Code::ExtractKindFromFlags(code->flags()));
+ if (handler) {
+ DCHECK(IC::IsHandler(handler));
+ if (handler->IsCode()) {
+ Code* code = Code::cast(handler);
+ Code::Flags expected_flags = Code::RemoveHolderFromFlags(
+ Code::ComputeHandlerFlags(stub_cache->ic_kind()));
+ Code::Flags flags = Code::RemoveHolderFromFlags(code->flags());
+ DCHECK_EQ(expected_flags, flags);
+ DCHECK_EQ(Code::HANDLER, Code::ExtractKindFromFlags(code->flags()));
+ }
}
return true;
}
@@ -43,17 +53,17 @@ bool CommonStubCacheChecks(StubCache* stub_cache, Name* name, Map* map,
} // namespace
#endif
-Code* StubCache::Set(Name* name, Map* map, Code* code) {
- DCHECK(CommonStubCacheChecks(this, name, map, code));
+Object* StubCache::Set(Name* name, Map* map, Object* handler) {
+ DCHECK(CommonStubCacheChecks(this, name, map, handler));
// Compute the primary entry.
int primary_offset = PrimaryOffset(name, map);
Entry* primary = entry(primary_, primary_offset);
- Code* old_code = primary->value;
+ Object* old_handler = primary->value;
// If the primary entry has useful data in it, we retire it to the
// secondary cache before overwriting it.
- if (old_code != isolate_->builtins()->builtin(Builtins::kIllegal)) {
+ if (old_handler != isolate_->builtins()->builtin(Builtins::kIllegal)) {
Map* old_map = primary->map;
int seed = PrimaryOffset(primary->key, old_map);
int secondary_offset = SecondaryOffset(primary->key, seed);
@@ -63,13 +73,13 @@ Code* StubCache::Set(Name* name, Map* map, Code* code) {
// Update primary cache.
primary->key = name;
- primary->value = code;
+ primary->value = handler;
primary->map = map;
isolate()->counters()->megamorphic_stub_cache_updates()->Increment();
- return code;
+ return handler;
}
-Code* StubCache::Get(Name* name, Map* map) {
+Object* StubCache::Get(Name* name, Map* map) {
DCHECK(CommonStubCacheChecks(this, name, map, nullptr));
int primary_offset = PrimaryOffset(name, map);
Entry* primary = entry(primary_, primary_offset);
@@ -81,7 +91,7 @@ Code* StubCache::Get(Name* name, Map* map) {
if (secondary->key == name && secondary->map == map) {
return secondary->value;
}
- return NULL;
+ return nullptr;
}
diff --git a/deps/v8/src/ic/stub-cache.h b/deps/v8/src/ic/stub-cache.h
index ebcff448ad..bdd7f4a3be 100644
--- a/deps/v8/src/ic/stub-cache.h
+++ b/deps/v8/src/ic/stub-cache.h
@@ -35,14 +35,14 @@ class StubCache {
public:
struct Entry {
Name* key;
- Code* value;
+ Object* value;
Map* map;
};
void Initialize();
// Access cache for entry hash(name, map).
- Code* Set(Name* name, Map* map, Code* code);
- Code* Get(Name* name, Map* map);
+ Object* Set(Name* name, Map* map, Object* handler);
+ Object* Get(Name* name, Map* map);
// Clear the lookup table (@ mark compact collection).
void Clear();
// Collect all maps that match the name.
diff --git a/deps/v8/src/ic/x64/access-compiler-x64.cc b/deps/v8/src/ic/x64/access-compiler-x64.cc
index 2b292528c8..9e95b9506c 100644
--- a/deps/v8/src/ic/x64/access-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/access-compiler-x64.cc
@@ -11,30 +11,27 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-
void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
Handle<Code> code) {
__ jmp(code, RelocInfo::CODE_TARGET);
}
-
-Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
+void PropertyAccessCompiler::InitializePlatformSpecific(
+ AccessCompilerData* data) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- static Register registers[] = {receiver, name, rax, rbx, rdi};
- return registers;
-}
+ // Load calling convention.
+ // receiver, name, scratch1, scratch2, scratch3.
+ Register load_registers[] = {receiver, name, rax, rbx, rdi};
-Register* PropertyAccessCompiler::store_calling_convention() {
+ // Store calling convention.
// receiver, name, scratch1, scratch2.
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- static Register registers[] = {receiver, name, rbx, rdi};
- return registers;
-}
+ Register store_registers[] = {receiver, name, rbx, rdi};
+ data->Initialize(arraysize(load_registers), load_registers,
+ arraysize(store_registers), store_registers);
+}
#undef __
} // namespace internal
diff --git a/deps/v8/src/ic/x64/handler-compiler-x64.cc b/deps/v8/src/ic/x64/handler-compiler-x64.cc
index f386fc5b65..36acccc007 100644
--- a/deps/v8/src/ic/x64/handler-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/handler-compiler-x64.cc
@@ -401,10 +401,32 @@ void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
}
}
+void PropertyHandlerCompiler::GenerateAccessCheck(
+ Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
+ Label* miss, bool compare_native_contexts_only) {
+ Label done;
+ // Load current native context.
+ __ movp(scratch1, NativeContextOperand());
+ // Load expected native context.
+ __ LoadWeakValue(scratch2, native_context_cell, miss);
+ __ cmpp(scratch1, scratch2);
+
+ if (!compare_native_contexts_only) {
+ __ j(equal, &done);
+
+ // Compare security tokens of current and expected native contexts.
+ __ movp(scratch1, ContextOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
+ __ movp(scratch2, ContextOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
+ __ cmpp(scratch1, scratch2);
+ }
+ __ j(not_equal, miss);
+
+ __ bind(&done);
+}
Register PropertyHandlerCompiler::CheckPrototypes(
Register object_reg, Register holder_reg, Register scratch1,
- Register scratch2, Handle<Name> name, Label* miss, PrototypeCheckType check,
+ Register scratch2, Handle<Name> name, Label* miss,
ReturnHolder return_what) {
Handle<Map> receiver_map = map();
@@ -424,17 +446,6 @@ Register PropertyHandlerCompiler::CheckPrototypes(
__ j(not_equal, miss);
}
- // The prototype chain of primitives (and their JSValue wrappers) depends
- // on the native context, which can't be guarded by validity cells.
- // |object_reg| holds the native context specific prototype in this case;
- // we need to check its map.
- if (check == CHECK_ALL_MAPS) {
- __ movp(scratch1, FieldOperand(object_reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
- __ CmpWeakValue(scratch1, cell, scratch2);
- __ j(not_equal, miss);
- }
-
// Keep track of the current object in register reg. On the first
// iteration, reg is an alias for object_reg, on later iterations,
// it is an alias for holder_reg.
@@ -446,46 +457,28 @@ Register PropertyHandlerCompiler::CheckPrototypes(
current = isolate()->global_object();
}
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- // This allows us to install generated handlers for accesses to the
- // global proxy (as opposed to using slow ICs). See corresponding code
- // in LookupForRead().
- if (receiver_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch2, miss);
- }
-
- Handle<JSObject> prototype = Handle<JSObject>::null();
- Handle<Map> current_map = receiver_map;
+ Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
+ isolate());
Handle<Map> holder_map(holder()->map());
// Traverse the prototype chain and check the maps in the prototype chain for
// fast and global objects or do negative lookup for normal objects.
while (!current_map.is_identical_to(holder_map)) {
++depth;
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
-
- prototype = handle(JSObject::cast(current_map->prototype()));
if (current_map->IsJSGlobalObjectMap()) {
GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
name, scratch2, miss);
} else if (current_map->is_dictionary_map()) {
DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- if (!name->IsUniqueName()) {
- DCHECK(name->IsString());
- name = factory()->InternalizeString(Handle<String>::cast(name));
- }
+ DCHECK(name->IsUniqueName());
DCHECK(current.is_null() ||
current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
if (depth > 1) {
- // TODO(jkummerow): Cache and re-use weak cell.
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
scratch2);
@@ -493,7 +486,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
reg = holder_reg; // From now on the object will be in holder_reg.
// Go to the next object in the prototype chain.
- current = prototype;
+ current = handle(JSObject::cast(current_map->prototype()));
current_map = handle(current->map());
}
@@ -504,7 +497,9 @@ Register PropertyHandlerCompiler::CheckPrototypes(
bool return_holder = return_what == RETURN_HOLDER;
if (return_holder && depth != 0) {
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
// Return the register containing the holder.
diff --git a/deps/v8/src/ic/x64/ic-x64.cc b/deps/v8/src/ic/x64/ic-x64.cc
index d0445a229a..a916e22fa5 100644
--- a/deps/v8/src/ic/x64/ic-x64.cc
+++ b/deps/v8/src/ic/x64/ic-x64.cc
@@ -18,18 +18,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
- Label* global_object) {
- // Register usage:
- // type: holds the receiver instance type on entry.
- __ cmpb(type, Immediate(JS_GLOBAL_OBJECT_TYPE));
- __ j(equal, global_object);
- __ cmpb(type, Immediate(JS_GLOBAL_PROXY_TYPE));
- __ j(equal, global_object);
-}
-
-
// Helper function used to load a property from a dictionary backing storage.
// This function may return false negatives, so miss_label
// must always call a backup property load that is complete.
@@ -133,237 +121,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label,
__ RecordWrite(elements, scratch1, scratch0, kDontSaveFPRegs);
}
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver, Register map,
- int interceptor_bit, Label* slow) {
- // Register use:
- // receiver - holds the receiver and is unchanged.
- // Scratch registers:
- // map - used to hold the map of the receiver.
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, slow);
-
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object,
- // we enter the runtime system to make sure that indexing
- // into string objects work as intended.
- DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ CmpObjectType(receiver, JS_OBJECT_TYPE, map);
- __ j(below, slow);
-
- // Check bit field.
- __ testb(
- FieldOperand(map, Map::kBitFieldOffset),
- Immediate((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
- __ j(not_zero, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
- Register key, Register elements,
- Register scratch, Register result,
- Label* slow) {
- // Register use:
- //
- // receiver - holds the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the the same as 'receiver' or 'key'.
- // Unchanged on bailout so 'receiver' and 'key' can be safely
- // used by further computation.
- //
- // Scratch registers:
- //
- // elements - holds the elements of the receiver and its prototypes.
- //
- // scratch - used to hold maps, prototypes, and the loaded value.
- Label check_prototypes, check_next_prototype;
- Label done, in_bounds, absent;
-
- __ movp(elements, FieldOperand(receiver, JSObject::kElementsOffset));
- __ AssertFastElements(elements);
- // Check that the key (index) is within bounds.
- __ SmiCompare(key, FieldOperand(elements, FixedArray::kLengthOffset));
- // Unsigned comparison rejects negative indices.
- __ j(below, &in_bounds);
-
- // Out-of-bounds. Check the prototype chain to see if we can just return
- // 'undefined'.
- __ SmiCompare(key, Smi::FromInt(0));
- __ j(less, slow); // Negative keys can't take the fast OOB path.
- __ bind(&check_prototypes);
- __ movp(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ bind(&check_next_prototype);
- __ movp(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
- // scratch: current prototype
- __ CompareRoot(scratch, Heap::kNullValueRootIndex);
- __ j(equal, &absent);
- __ movp(elements, FieldOperand(scratch, JSObject::kElementsOffset));
- __ movp(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- // elements: elements of current prototype
- // scratch: map of current prototype
- __ CmpInstanceType(scratch, JS_OBJECT_TYPE);
- __ j(below, slow);
- __ testb(FieldOperand(scratch, Map::kBitFieldOffset),
- Immediate((1 << Map::kIsAccessCheckNeeded) |
- (1 << Map::kHasIndexedInterceptor)));
- __ j(not_zero, slow);
- __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
- __ j(not_equal, slow);
- __ jmp(&check_next_prototype);
-
- __ bind(&absent);
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ jmp(&done);
-
- __ bind(&in_bounds);
- // Fast case: Do the load.
- SmiIndex index = masm->SmiToIndex(scratch, key, kPointerSizeLog2);
- __ movp(scratch, FieldOperand(elements, index.reg, index.scale,
- FixedArray::kHeaderSize));
- __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
- // In case the loaded value is the_hole we have to check the prototype chain.
- __ j(equal, &check_prototypes);
- __ Move(result, scratch);
- __ bind(&done);
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if the key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
- Register map, Register hash,
- Label* index_string, Label* not_unique) {
- // Register use:
- // key - holds the key and is unchanged. Assumed to be non-smi.
- // Scratch registers:
- // map - used to hold the map of the key.
- // hash - used to hold the hash of the key.
- Label unique;
- __ CmpObjectType(key, LAST_UNIQUE_NAME_TYPE, map);
- __ j(above, not_unique);
- STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
- __ j(equal, &unique);
-
- // Is the string an array index, with cached numeric value?
- __ movl(hash, FieldOperand(key, Name::kHashFieldOffset));
- __ testl(hash, Immediate(Name::kContainsCachedArrayIndexMask));
- __ j(zero, index_string); // The value in hash is used at jump target.
-
- // Is the string internalized? We already know it's a string so a single
- // bit test is enough.
- STATIC_ASSERT(kNotInternalizedTag != 0);
- __ testb(FieldOperand(map, Map::kInstanceTypeOffset),
- Immediate(kIsNotInternalizedMask));
- __ j(not_zero, not_unique);
-
- __ bind(&unique);
-}
-
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // The return address is on the stack.
- Label slow, check_name, index_smi, index_name, property_array_property;
- Label probe_dictionary, check_number_dictionary;
-
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
- DCHECK(receiver.is(rdx));
- DCHECK(key.is(rcx));
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &check_name);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, rax,
- Map::kHasIndexedInterceptor, &slow);
-
- // Check the receiver's map to see if it has fast elements.
- __ CheckFastElements(rax, &check_number_dictionary);
-
- GenerateFastArrayLoad(masm, receiver, key, rax, rbx, rax, &slow);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->ic_keyed_load_generic_smi(), 1);
- __ ret(0);
-
- __ bind(&check_number_dictionary);
- __ SmiToInteger32(rbx, key);
- __ movp(rax, FieldOperand(receiver, JSObject::kElementsOffset));
-
- // Check whether the elements is a number dictionary.
- // rbx: key as untagged int32
- // rax: elements
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(not_equal, &slow);
- __ LoadFromNumberDictionary(&slow, rax, key, rbx, r9, rdi, rax);
- __ ret(0);
-
- __ bind(&slow);
- // Slow case: Jump to runtime.
- __ IncrementCounter(counters->ic_keyed_load_generic_slow(), 1);
- KeyedLoadIC::GenerateRuntimeGetProperty(masm);
-
- __ bind(&check_name);
- GenerateKeyNameCheck(masm, key, rax, rbx, &index_name, &slow);
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, rax, Map::kHasNamedInterceptor,
- &slow);
-
- // If the receiver is a fast-case object, check the stub cache. Otherwise
- // probe the dictionary.
- __ movp(rbx, FieldOperand(receiver, JSObject::kPropertiesOffset));
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(equal, &probe_dictionary);
-
- Register megamorphic_scratch = rdi;
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- Register slot = LoadDescriptor::SlotRegister();
- DCHECK(!AreAliased(megamorphic_scratch, vector, slot));
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot_index = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
- __ Move(vector, dummy_vector);
- __ Move(slot, Smi::FromInt(slot_index));
-
- masm->isolate()->load_stub_cache()->GenerateProbe(
- masm, receiver, key, megamorphic_scratch, no_reg);
- // Cache miss.
- GenerateMiss(masm);
-
- // Do a quick inline probe of the receiver's dictionary, if it
- // exists.
- __ bind(&probe_dictionary);
- // rbx: elements
-
- __ movp(rax, FieldOperand(receiver, JSObject::kMapOffset));
- __ movb(rax, FieldOperand(rax, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, rax, &slow);
-
- GenerateDictionaryLoad(masm, &slow, rbx, key, rax, rdi, rax);
- __ IncrementCounter(counters->ic_keyed_load_generic_symbol(), 1);
- __ ret(0);
-
- __ bind(&index_name);
- __ IndexFromHash(rbx, key);
- __ jmp(&index_smi);
-}
-
-
static void KeyedStoreGenerateMegamorphicHelper(
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
diff --git a/deps/v8/src/ic/x87/access-compiler-x87.cc b/deps/v8/src/ic/x87/access-compiler-x87.cc
index e528de65ba..d1867553cd 100644
--- a/deps/v8/src/ic/x87/access-compiler-x87.cc
+++ b/deps/v8/src/ic/x87/access-compiler-x87.cc
@@ -16,22 +16,21 @@ void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
__ jmp(code, RelocInfo::CODE_TARGET);
}
-
-Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
+void PropertyAccessCompiler::InitializePlatformSpecific(
+ AccessCompilerData* data) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- static Register registers[] = {receiver, name, ebx, eax, edi};
- return registers;
-}
+ // Load calling convention.
+ // receiver, name, scratch1, scratch2, scratch3.
+ Register load_registers[] = {receiver, name, ebx, eax, edi};
-Register* PropertyAccessCompiler::store_calling_convention() {
+ // Store calling convention.
// receiver, name, scratch1, scratch2.
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- static Register registers[] = {receiver, name, ebx, edi};
- return registers;
+ Register store_registers[] = {receiver, name, ebx, edi};
+
+ data->Initialize(arraysize(load_registers), load_registers,
+ arraysize(store_registers), store_registers);
}
#undef __
diff --git a/deps/v8/src/ic/x87/handler-compiler-x87.cc b/deps/v8/src/ic/x87/handler-compiler-x87.cc
index 5eca3dc0cb..a5c32d37cc 100644
--- a/deps/v8/src/ic/x87/handler-compiler-x87.cc
+++ b/deps/v8/src/ic/x87/handler-compiler-x87.cc
@@ -411,10 +411,32 @@ void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
}
}
+void PropertyHandlerCompiler::GenerateAccessCheck(
+ Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
+ Label* miss, bool compare_native_contexts_only) {
+ Label done;
+ // Load current native context.
+ __ mov(scratch1, NativeContextOperand());
+ // Load expected native context.
+ __ LoadWeakValue(scratch2, native_context_cell, miss);
+ __ cmp(scratch1, scratch2);
+
+ if (!compare_native_contexts_only) {
+ __ j(equal, &done);
+
+ // Compare security tokens of current and expected native contexts.
+ __ mov(scratch1, ContextOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
+ __ mov(scratch2, ContextOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
+ __ cmp(scratch1, scratch2);
+ }
+ __ j(not_equal, miss);
+
+ __ bind(&done);
+}
Register PropertyHandlerCompiler::CheckPrototypes(
Register object_reg, Register holder_reg, Register scratch1,
- Register scratch2, Handle<Name> name, Label* miss, PrototypeCheckType check,
+ Register scratch2, Handle<Name> name, Label* miss,
ReturnHolder return_what) {
Handle<Map> receiver_map = map();
@@ -433,17 +455,6 @@ Register PropertyHandlerCompiler::CheckPrototypes(
__ j(not_equal, miss);
}
- // The prototype chain of primitives (and their JSValue wrappers) depends
- // on the native context, which can't be guarded by validity cells.
- // |object_reg| holds the native context specific prototype in this case;
- // we need to check its map.
- if (check == CHECK_ALL_MAPS) {
- __ mov(scratch1, FieldOperand(object_reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
- __ CmpWeakValue(scratch1, cell, scratch2);
- __ j(not_equal, miss);
- }
-
// Keep track of the current object in register reg.
Register reg = object_reg;
int depth = 0;
@@ -453,46 +464,28 @@ Register PropertyHandlerCompiler::CheckPrototypes(
current = isolate()->global_object();
}
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- // This allows us to install generated handlers for accesses to the
- // global proxy (as opposed to using slow ICs). See corresponding code
- // in LookupForRead().
- if (receiver_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
- }
-
- Handle<JSObject> prototype = Handle<JSObject>::null();
- Handle<Map> current_map = receiver_map;
+ Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
+ isolate());
Handle<Map> holder_map(holder()->map());
// Traverse the prototype chain and check the maps in the prototype chain for
// fast and global objects or do negative lookup for normal objects.
while (!current_map.is_identical_to(holder_map)) {
++depth;
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
-
- prototype = handle(JSObject::cast(current_map->prototype()));
if (current_map->IsJSGlobalObjectMap()) {
GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
name, scratch2, miss);
} else if (current_map->is_dictionary_map()) {
DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- if (!name->IsUniqueName()) {
- DCHECK(name->IsString());
- name = factory()->InternalizeString(Handle<String>::cast(name));
- }
+ DCHECK(name->IsUniqueName());
DCHECK(current.is_null() ||
current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
if (depth > 1) {
- // TODO(jkummerow): Cache and re-use weak cell.
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
scratch2);
@@ -500,7 +493,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
reg = holder_reg; // From now on the object will be in holder_reg.
// Go to the next object in the prototype chain.
- current = prototype;
+ current = handle(JSObject::cast(current_map->prototype()));
current_map = handle(current->map());
}
@@ -511,7 +504,9 @@ Register PropertyHandlerCompiler::CheckPrototypes(
bool return_holder = return_what == RETURN_HOLDER;
if (return_holder && depth != 0) {
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
// Return the register containing the holder.
diff --git a/deps/v8/src/ic/x87/ic-x87.cc b/deps/v8/src/ic/x87/ic-x87.cc
index baf435e0f2..f96e509f53 100644
--- a/deps/v8/src/ic/x87/ic-x87.cc
+++ b/deps/v8/src/ic/x87/ic-x87.cc
@@ -18,18 +18,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
- Label* global_object) {
- // Register usage:
- // type: holds the receiver instance type on entry.
- __ cmp(type, JS_GLOBAL_OBJECT_TYPE);
- __ j(equal, global_object);
- __ cmp(type, JS_GLOBAL_PROXY_TYPE);
- __ j(equal, global_object);
-}
-
-
// Helper function used to load a property from a dictionary backing
// storage. This function may fail to load a property even though it is
// in the dictionary, so code at miss_label must always call a backup
@@ -132,238 +120,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label,
__ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
}
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver, Register map,
- int interceptor_bit, Label* slow) {
- // Register use:
- // receiver - holds the receiver and is unchanged.
- // Scratch registers:
- // map - used to hold the map of the receiver.
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, slow);
-
- // Get the map of the receiver.
- __ mov(map, FieldOperand(receiver, HeapObject::kMapOffset));
-
- // Check bit field.
- __ test_b(
- FieldOperand(map, Map::kBitFieldOffset),
- Immediate((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
- __ j(not_zero, slow);
- // Check that the object is some kind of JS object EXCEPT JS Value type. In
- // the case that the object is a value-wrapper object, we enter the runtime
- // system to make sure that indexing into string objects works as intended.
- DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
-
- __ CmpInstanceType(map, JS_OBJECT_TYPE);
- __ j(below, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
- Register key, Register scratch,
- Register scratch2, Register result,
- Label* slow) {
- // Register use:
- // receiver - holds the receiver and is unchanged.
- // key - holds the key and is unchanged (must be a smi).
- // Scratch registers:
- // scratch - used to hold elements of the receiver and the loaded value.
- // scratch2 - holds maps and prototypes during prototype chain check.
- // result - holds the result on exit if the load succeeds and
- // we fall through.
- Label check_prototypes, check_next_prototype;
- Label done, in_bounds, absent;
-
- __ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset));
- __ AssertFastElements(scratch);
-
- // Check that the key (index) is within bounds.
- __ cmp(key, FieldOperand(scratch, FixedArray::kLengthOffset));
- __ j(below, &in_bounds);
- // Out-of-bounds. Check the prototype chain to see if we can just return
- // 'undefined'.
- __ cmp(key, 0);
- __ j(less, slow); // Negative keys can't take the fast OOB path.
- __ bind(&check_prototypes);
- __ mov(scratch2, FieldOperand(receiver, HeapObject::kMapOffset));
- __ bind(&check_next_prototype);
- __ mov(scratch2, FieldOperand(scratch2, Map::kPrototypeOffset));
- // scratch2: current prototype
- __ cmp(scratch2, masm->isolate()->factory()->null_value());
- __ j(equal, &absent);
- __ mov(scratch, FieldOperand(scratch2, JSObject::kElementsOffset));
- __ mov(scratch2, FieldOperand(scratch2, HeapObject::kMapOffset));
- // scratch: elements of current prototype
- // scratch2: map of current prototype
- __ CmpInstanceType(scratch2, JS_OBJECT_TYPE);
- __ j(below, slow);
- __ test_b(FieldOperand(scratch2, Map::kBitFieldOffset),
- Immediate((1 << Map::kIsAccessCheckNeeded) |
- (1 << Map::kHasIndexedInterceptor)));
- __ j(not_zero, slow);
- __ cmp(scratch, masm->isolate()->factory()->empty_fixed_array());
- __ j(not_equal, slow);
- __ jmp(&check_next_prototype);
-
- __ bind(&absent);
- __ mov(result, masm->isolate()->factory()->undefined_value());
- __ jmp(&done);
-
- __ bind(&in_bounds);
- // Fast case: Do the load.
- STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
- __ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
- __ cmp(scratch, Immediate(masm->isolate()->factory()->the_hole_value()));
- // In case the loaded value is the_hole we have to check the prototype chain.
- __ j(equal, &check_prototypes);
- __ Move(result, scratch);
- __ bind(&done);
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if the key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
- Register map, Register hash,
- Label* index_string, Label* not_unique) {
- // Register use:
- // key - holds the key and is unchanged. Assumed to be non-smi.
- // Scratch registers:
- // map - used to hold the map of the key.
- // hash - used to hold the hash of the key.
- Label unique;
- __ CmpObjectType(key, LAST_UNIQUE_NAME_TYPE, map);
- __ j(above, not_unique);
- STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
- __ j(equal, &unique);
-
- // Is the string an array index, with cached numeric value?
- __ mov(hash, FieldOperand(key, Name::kHashFieldOffset));
- __ test(hash, Immediate(Name::kContainsCachedArrayIndexMask));
- __ j(zero, index_string);
-
- // Is the string internalized? We already know it's a string so a single
- // bit test is enough.
- STATIC_ASSERT(kNotInternalizedTag != 0);
- __ test_b(FieldOperand(map, Map::kInstanceTypeOffset),
- Immediate(kIsNotInternalizedMask));
- __ j(not_zero, not_unique);
-
- __ bind(&unique);
-}
-
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // The return address is on the stack.
- Label slow, check_name, index_smi, index_name, property_array_property;
- Label probe_dictionary, check_number_dictionary;
-
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
- DCHECK(receiver.is(edx));
- DCHECK(key.is(ecx));
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &check_name);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, eax,
- Map::kHasIndexedInterceptor, &slow);
-
- // Check the receiver's map to see if it has fast elements.
- __ CheckFastElements(eax, &check_number_dictionary);
-
- GenerateFastArrayLoad(masm, receiver, key, eax, ebx, eax, &slow);
- Isolate* isolate = masm->isolate();
- Counters* counters = isolate->counters();
- __ IncrementCounter(counters->ic_keyed_load_generic_smi(), 1);
- __ ret(0);
-
- __ bind(&check_number_dictionary);
- __ mov(ebx, key);
- __ SmiUntag(ebx);
- __ mov(eax, FieldOperand(receiver, JSObject::kElementsOffset));
-
- // Check whether the elements is a number dictionary.
- // ebx: untagged index
- // eax: elements
- __ CheckMap(eax, isolate->factory()->hash_table_map(), &slow,
- DONT_DO_SMI_CHECK);
- Label slow_pop_receiver;
- // Push receiver on the stack to free up a register for the dictionary
- // probing.
- __ push(receiver);
- __ LoadFromNumberDictionary(&slow_pop_receiver, eax, key, ebx, edx, edi, eax);
- // Pop receiver before returning.
- __ pop(receiver);
- __ ret(0);
-
- __ bind(&slow_pop_receiver);
- // Pop the receiver from the stack and jump to runtime.
- __ pop(receiver);
-
- __ bind(&slow);
- // Slow case: jump to runtime.
- __ IncrementCounter(counters->ic_keyed_load_generic_slow(), 1);
- GenerateRuntimeGetProperty(masm);
-
- __ bind(&check_name);
- GenerateKeyNameCheck(masm, key, eax, ebx, &index_name, &slow);
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, eax, Map::kHasNamedInterceptor,
- &slow);
-
- // If the receiver is a fast-case object, check the stub cache. Otherwise
- // probe the dictionary.
- __ mov(ebx, FieldOperand(receiver, JSObject::kPropertiesOffset));
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(isolate->factory()->hash_table_map()));
- __ j(equal, &probe_dictionary);
-
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(isolate);
- int slot = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
- __ push(Immediate(Smi::FromInt(slot)));
- __ push(Immediate(dummy_vector));
-
- masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, ebx,
- edi);
-
- __ pop(LoadWithVectorDescriptor::VectorRegister());
- __ pop(LoadDescriptor::SlotRegister());
-
- // Cache miss.
- GenerateMiss(masm);
-
- // Do a quick inline probe of the receiver's dictionary, if it
- // exists.
- __ bind(&probe_dictionary);
-
- __ mov(eax, FieldOperand(receiver, JSObject::kMapOffset));
- __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, eax, &slow);
-
- GenerateDictionaryLoad(masm, &slow, ebx, key, eax, edi, eax);
- __ IncrementCounter(counters->ic_keyed_load_generic_symbol(), 1);
- __ ret(0);
-
- __ bind(&index_name);
- __ IndexFromHash(ebx, key);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
static void KeyedStoreGenerateMegamorphicHelper(
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {