summaryrefslogtreecommitdiff
path: root/deps
diff options
context:
space:
mode:
authorTrevor Norris <trev.norris@gmail.com>2014-09-09 14:03:08 -0700
committerTrevor Norris <trev.norris@gmail.com>2014-09-09 16:52:10 -0700
commitf9ce97084cbaf90d0669c7cd829da34232b75bdb (patch)
treee9705d703811c1fd6a0133a66ad11deec148b631 /deps
parentbf5e2f246eff55dfc33318f0ffb4572a56f7645a (diff)
downloadandroid-node-v8-f9ce97084cbaf90d0669c7cd829da34232b75bdb.tar.gz
android-node-v8-f9ce97084cbaf90d0669c7cd829da34232b75bdb.tar.bz2
android-node-v8-f9ce97084cbaf90d0669c7cd829da34232b75bdb.zip
v8: Upgrade 3.26.33 with 14 patches
V8 3.26.31 has received 14 patches since the upgrade to 3.26.33. Since 3.26.33 is technically a tag on the 3.27 branch, reverting back to 3.26.31 would remove now default functionality like WeakMaps. Because of that the patches have simply been cherry-picked and squashed. Here is a summary of all patches: * Fix index register assignment in LoadFieldByIndex for arm, arm64, and mips. * Fix invalid attributes when generalizing because of incompatible map change. * Skip write barriers when updating the weak hash table. * MIPS: Avoid HeapObject check in HStoreNamedField. * Do GC if CodeRange fails to allocate a block. * Array.concat: properly go to dictionary mode when required. * Keep CodeRange::current_allocation_block_index_ in range. * Grow heap slower if GC freed many global handles. * Do not eliminate bounds checks for "<const> - x". * Add missing map check to optimized f.apply(...). * In GrowMode, force the value to the right representation to avoid deopts between storing the length and storing the value. * Reduce max executable size limit. * Fix invalid condition in check elimination effects. * Fix off-by-one error in Array.concat slow mode check. For more information see: https://github.com/v8/v8/commits/3.26 Reviewed-By: Fedor Indutny <fedor@indutny.com>
Diffstat (limited to 'deps')
-rw-r--r--deps/v8/build/features.gypi2
-rw-r--r--deps/v8/include/v8.h6
-rw-r--r--deps/v8/src/accessors.cc24
-rw-r--r--deps/v8/src/arm/lithium-arm.cc10
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc25
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc5
-rw-r--r--deps/v8/src/arm64/lithium-arm64.cc10
-rw-r--r--deps/v8/src/arm64/lithium-codegen-arm64.cc24
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc5
-rw-r--r--deps/v8/src/global-handles.cc18
-rw-r--r--deps/v8/src/global-handles.h6
-rw-r--r--deps/v8/src/heap.cc67
-rw-r--r--deps/v8/src/heap.h27
-rw-r--r--deps/v8/src/hydrogen-bce.cc5
-rw-r--r--deps/v8/src/hydrogen-check-elimination.cc2
-rw-r--r--deps/v8/src/hydrogen-instructions.cc2
-rw-r--r--deps/v8/src/hydrogen-instructions.h47
-rw-r--r--deps/v8/src/hydrogen.cc24
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc30
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc11
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc5
-rw-r--r--deps/v8/src/mark-compact.cc1
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.cc25
-rw-r--r--deps/v8/src/mips/lithium-mips.cc10
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc5
-rw-r--r--deps/v8/src/objects.cc61
-rw-r--r--deps/v8/src/objects.h5
-rw-r--r--deps/v8/src/runtime.cc12
-rw-r--r--deps/v8/src/spaces.cc18
-rw-r--r--deps/v8/src/spaces.h4
-rw-r--r--deps/v8/src/stub-cache.cc13
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc29
-rw-r--r--deps/v8/src/x64/lithium-x64.cc10
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc5
-rw-r--r--deps/v8/test/cctest/test-api.cc17
-rw-r--r--deps/v8/test/cctest/test-heap.cc37
-rw-r--r--deps/v8/test/mjsunit/regress/regress-368243.js25
-rw-r--r--deps/v8/test/mjsunit/regress/regress-385054.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-386034.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-382143.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-387031.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-grow-deopt.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-gvn-ftt.js27
-rw-r--r--deps/v8/tools/gen-postmortem-metadata.py45
44 files changed, 430 insertions, 356 deletions
diff --git a/deps/v8/build/features.gypi b/deps/v8/build/features.gypi
index d542d05bb0..e8f5b2f08f 100644
--- a/deps/v8/build/features.gypi
+++ b/deps/v8/build/features.gypi
@@ -111,7 +111,7 @@
'Release': {
'variables': {
'v8_enable_extra_checks%': 0,
- 'v8_enable_handle_zapping%': 0,
+ 'v8_enable_handle_zapping%': 1,
},
'conditions': [
['v8_enable_extra_checks==1', {
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index 538b6581f1..d39dca96bb 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -2493,7 +2493,7 @@ class PropertyCallbackInfo {
public:
V8_INLINE Isolate* GetIsolate() const;
V8_INLINE Local<Value> Data() const;
- V8_INLINE Local<Object> This() const;
+ V8_INLINE Local<Value> This() const;
V8_INLINE Local<Object> Holder() const;
V8_INLINE ReturnValue<T> GetReturnValue() const;
// This shouldn't be public, but the arm compiler needs it.
@@ -6488,8 +6488,8 @@ Local<Value> PropertyCallbackInfo<T>::Data() const {
template<typename T>
-Local<Object> PropertyCallbackInfo<T>::This() const {
- return Local<Object>(reinterpret_cast<Object*>(&args_[kThisIndex]));
+Local<Value> PropertyCallbackInfo<T>::This() const {
+ return Local<Value>(reinterpret_cast<Value*>(&args_[kThisIndex]));
}
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index f219bed3b3..8c8fcdd999 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -20,16 +20,6 @@ namespace v8 {
namespace internal {
-// We have a slight impedance mismatch between the external API and the way we
-// use callbacks internally: Externally, callbacks can only be used with
-// v8::Object, but internally we even have callbacks on entities which are
-// higher in the hierarchy, so we can only return i::Object here, not
-// i::JSObject.
-Handle<Object> GetThisFrom(const v8::PropertyCallbackInfo<v8::Value>& info) {
- return Utils::OpenHandle(*v8::Local<v8::Value>(info.This()));
-}
-
-
Handle<AccessorInfo> Accessors::MakeAccessor(
Isolate* isolate,
Handle<String> name,
@@ -156,7 +146,7 @@ void Accessors::ArrayLengthGetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
- Object* object = *GetThisFrom(info);
+ Object* object = *Utils::OpenHandle(*info.This());
// Traverse the prototype chain until we reach an array.
JSArray* holder = FindInstanceOf<JSArray>(isolate, object);
Object* result;
@@ -239,7 +229,7 @@ void Accessors::StringLengthGetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
- Object* value = *GetThisFrom(info);
+ Object* value = *Utils::OpenHandle(*info.This());
Object* result;
if (value->IsJSValue()) value = JSValue::cast(value)->value();
if (value->IsString()) {
@@ -834,7 +824,7 @@ void Accessors::FunctionPrototypeGetter(
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
- Handle<Object> object = GetThisFrom(info);
+ Handle<Object> object = Utils::OpenHandle(*info.This());
Handle<Object> result = GetFunctionPrototype(isolate, object);
info.GetReturnValue().Set(Utils::ToLocal(result));
}
@@ -874,7 +864,7 @@ void Accessors::FunctionLengthGetter(
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
- Handle<Object> object = GetThisFrom(info);
+ Handle<Object> object = Utils::OpenHandle(*info.This());
MaybeHandle<JSFunction> maybe_function;
{
@@ -932,7 +922,7 @@ void Accessors::FunctionNameGetter(
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
- Handle<Object> object = GetThisFrom(info);
+ Handle<Object> object = Utils::OpenHandle(*info.This());
MaybeHandle<JSFunction> maybe_function;
{
@@ -1081,7 +1071,7 @@ void Accessors::FunctionArgumentsGetter(
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
- Handle<Object> object = GetThisFrom(info);
+ Handle<Object> object = Utils::OpenHandle(*info.This());
MaybeHandle<JSFunction> maybe_function;
{
@@ -1220,7 +1210,7 @@ void Accessors::FunctionCallerGetter(
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
- Handle<Object> object = GetThisFrom(info);
+ Handle<Object> object = Utils::OpenHandle(*info.This());
MaybeHandle<JSFunction> maybe_function;
{
DisallowHeapAllocation no_allocation;
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index 0c10a65c21..b26a88217f 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -2297,13 +2297,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
// We need a temporary register for write barrier of the map field.
LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
- LInstruction* result = new(zone()) LStoreNamedField(obj, val, temp);
- if (!instr->access().IsExternalMemory() &&
- instr->field_representation().IsHeapObject() &&
- !instr->value()->type().IsHeapObject()) {
- result = AssignEnvironment(result);
- }
- return result;
+ return new(zone()) LStoreNamedField(obj, val, temp);
}
@@ -2556,7 +2550,7 @@ LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
LOperand* object = UseRegister(instr->object());
- LOperand* index = UseRegister(instr->index());
+ LOperand* index = UseTempRegister(instr->index());
LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
LInstruction* result = DefineSameAsFirst(load);
return AssignPointerMap(result);
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index 5a01d3bc84..25411fb9ac 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -4076,23 +4076,12 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
return;
}
- SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ __ AssertNotSmi(object);
- ASSERT(!(representation.IsSmi() &&
- instr->value()->IsConstantOperand() &&
- !IsSmi(LConstantOperand::cast(instr->value()))));
- if (representation.IsHeapObject()) {
- Register value = ToRegister(instr->value());
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- __ SmiTst(value);
- DeoptimizeIf(eq, instr->environment());
-
- // We know now that value is not a smi, so we can omit the check below.
- check_needed = OMIT_SMI_CHECK;
- }
- } else if (representation.IsDouble()) {
+ ASSERT(!representation.IsSmi() ||
+ !instr->value()->IsConstantOperand() ||
+ IsSmi(LConstantOperand::cast(instr->value())));
+ if (representation.IsDouble()) {
ASSERT(access.IsInobject());
ASSERT(!instr->hydrogen()->has_transition());
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
@@ -4134,7 +4123,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
GetLinkRegisterState(),
kSaveFPRegs,
EMIT_REMEMBERED_SET,
- check_needed);
+ instr->hydrogen()->SmiCheckForWriteBarrier());
}
} else {
__ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
@@ -4150,7 +4139,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
GetLinkRegisterState(),
kSaveFPRegs,
EMIT_REMEMBERED_SET,
- check_needed);
+ instr->hydrogen()->SmiCheckForWriteBarrier());
}
}
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 9752622447..0485161a6c 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -405,6 +405,11 @@ void MacroAssembler::Store(Register src,
} else if (r.IsInteger16() || r.IsUInteger16()) {
strh(src, dst);
} else {
+ if (r.IsHeapObject()) {
+ AssertNotSmi(src);
+ } else if (r.IsSmi()) {
+ AssertSmi(src);
+ }
str(src, dst);
}
}
diff --git a/deps/v8/src/arm64/lithium-arm64.cc b/deps/v8/src/arm64/lithium-arm64.cc
index 2411b7074d..a0d3c298f1 100644
--- a/deps/v8/src/arm64/lithium-arm64.cc
+++ b/deps/v8/src/arm64/lithium-arm64.cc
@@ -2381,13 +2381,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
temp0 = TempRegister();
}
- LStoreNamedField* result =
- new(zone()) LStoreNamedField(object, value, temp0, temp1);
- if (instr->field_representation().IsHeapObject() &&
- !instr->value()->type().IsHeapObject()) {
- return AssignEnvironment(result);
- }
- return result;
+ return new(zone()) LStoreNamedField(object, value, temp0, temp1);
}
@@ -2686,7 +2680,7 @@ LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
LOperand* object = UseRegisterAtStart(instr->object());
- LOperand* index = UseRegister(instr->index());
+ LOperand* index = UseRegisterAndClobber(instr->index());
LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
LInstruction* result = DefineSameAsFirst(load);
return AssignPointerMap(result);
diff --git a/deps/v8/src/arm64/lithium-codegen-arm64.cc b/deps/v8/src/arm64/lithium-codegen-arm64.cc
index b064d3da9f..610502a7fd 100644
--- a/deps/v8/src/arm64/lithium-codegen-arm64.cc
+++ b/deps/v8/src/arm64/lithium-codegen-arm64.cc
@@ -5304,7 +5304,11 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register value = ToRegister(instr->value());
__ Store(value, MemOperand(object, offset), representation);
return;
- } else if (representation.IsDouble()) {
+ }
+
+ __ AssertNotSmi(object);
+
+ if (representation.IsDouble()) {
ASSERT(access.IsInobject());
ASSERT(!instr->hydrogen()->has_transition());
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
@@ -5315,19 +5319,9 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register value = ToRegister(instr->value());
- SmiCheck check_needed = instr->hydrogen()->value()->IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
-
- ASSERT(!(representation.IsSmi() &&
- instr->value()->IsConstantOperand() &&
- !IsInteger32Constant(LConstantOperand::cast(instr->value()))));
- if (representation.IsHeapObject() &&
- !instr->hydrogen()->value()->type().IsHeapObject()) {
- DeoptimizeIfSmi(value, instr->environment());
-
- // We know now that value is not a smi, so we can omit the check below.
- check_needed = OMIT_SMI_CHECK;
- }
+ ASSERT(!representation.IsSmi() ||
+ !instr->value()->IsConstantOperand() ||
+ IsInteger32Constant(LConstantOperand::cast(instr->value())));
if (instr->hydrogen()->has_transition()) {
Handle<Map> transition = instr->hydrogen()->transition_map();
@@ -5387,7 +5381,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
GetLinkRegisterState(),
kSaveFPRegs,
EMIT_REMEMBERED_SET,
- check_needed);
+ instr->hydrogen()->SmiCheckForWriteBarrier());
}
}
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index c5ce99be99..352f3c2ac7 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -557,6 +557,11 @@ void MacroAssembler::Store(const Register& rt,
Str(rt.W(), addr);
} else {
ASSERT(rt.Is64Bits());
+ if (r.IsHeapObject()) {
+ AssertNotSmi(rt);
+ } else if (r.IsSmi()) {
+ AssertSmi(rt);
+ }
Str(rt, addr);
}
}
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index d6cd479181..168a670d2b 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -610,21 +610,21 @@ bool GlobalHandles::IterateObjectGroups(ObjectVisitor* v,
}
-bool GlobalHandles::PostGarbageCollectionProcessing(
+int GlobalHandles::PostGarbageCollectionProcessing(
GarbageCollector collector, GCTracer* tracer) {
// Process weak global handle callbacks. This must be done after the
// GC is completely done, because the callbacks may invoke arbitrary
// API functions.
ASSERT(isolate_->heap()->gc_state() == Heap::NOT_IN_GC);
const int initial_post_gc_processing_count = ++post_gc_processing_count_;
- bool next_gc_likely_to_collect_more = false;
+ int freed_nodes = 0;
if (collector == SCAVENGER) {
for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i];
ASSERT(node->is_in_new_space_list());
if (!node->IsRetainer()) {
// Free nodes do not have weak callbacks. Do not use them to compute
- // the next_gc_likely_to_collect_more.
+ // the freed_nodes.
continue;
}
// Skip dependent handles. Their weak callbacks might expect to be
@@ -640,29 +640,29 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
// PostGarbageCollection processing. The current node might
// have been deleted in that round, so we need to bail out (or
// restart the processing).
- return next_gc_likely_to_collect_more;
+ return freed_nodes;
}
}
if (!node->IsRetainer()) {
- next_gc_likely_to_collect_more = true;
+ freed_nodes++;
}
}
} else {
for (NodeIterator it(this); !it.done(); it.Advance()) {
if (!it.node()->IsRetainer()) {
// Free nodes do not have weak callbacks. Do not use them to compute
- // the next_gc_likely_to_collect_more.
+ // the freed_nodes.
continue;
}
it.node()->clear_partially_dependent();
if (it.node()->PostGarbageCollectionProcessing(isolate_)) {
if (initial_post_gc_processing_count != post_gc_processing_count_) {
// See the comment above.
- return next_gc_likely_to_collect_more;
+ return freed_nodes;
}
}
if (!it.node()->IsRetainer()) {
- next_gc_likely_to_collect_more = true;
+ freed_nodes++;
}
}
}
@@ -685,7 +685,7 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
}
}
new_space_nodes_.Rewind(last);
- return next_gc_likely_to_collect_more;
+ return freed_nodes;
}
diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h
index 210c0565c0..34bd8d9ec2 100644
--- a/deps/v8/src/global-handles.h
+++ b/deps/v8/src/global-handles.h
@@ -155,9 +155,9 @@ class GlobalHandles {
static bool IsWeak(Object** location);
// Process pending weak handles.
- // Returns true if next major GC is likely to collect more garbage.
- bool PostGarbageCollectionProcessing(GarbageCollector collector,
- GCTracer* tracer);
+ // Returns the number of freed nodes.
+ int PostGarbageCollectionProcessing(GarbageCollector collector,
+ GCTracer* tracer);
// Iterates over all strong handles.
void IterateStrongRoots(ObjectVisitor* v);
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index 13771e613e..ff9c26c40f 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -58,7 +58,6 @@ Heap::Heap()
// Will be 4 * reserved_semispace_size_ to ensure that young
// generation can be aligned to its size.
maximum_committed_(0),
- old_space_growing_factor_(4),
survived_since_last_expansion_(0),
sweep_generation_(0),
always_allocate_scope_depth_(0),
@@ -86,7 +85,6 @@ Heap::Heap()
#endif // DEBUG
new_space_high_promotion_mode_active_(false),
old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
- size_of_old_gen_at_last_old_space_gc_(0),
external_allocation_limit_(0),
amount_of_external_allocated_memory_(0),
amount_of_external_allocated_memory_at_last_global_gc_(0),
@@ -1038,7 +1036,7 @@ bool Heap::PerformGarbageCollection(
GarbageCollector collector,
GCTracer* tracer,
const v8::GCCallbackFlags gc_callback_flags) {
- bool next_gc_likely_to_collect_more = false;
+ int freed_global_handles = 0;
if (collector != SCAVENGER) {
PROFILE(isolate_, CodeMovingGCEvent());
@@ -1081,10 +1079,11 @@ bool Heap::PerformGarbageCollection(
UpdateSurvivalRateTrend(start_new_space_size);
- size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
-
+ // Temporarily set the limit for case when PostGarbageCollectionProcessing
+ // allocates and triggers GC. The real limit is set at after
+ // PostGarbageCollectionProcessing.
old_generation_allocation_limit_ =
- OldGenerationAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
+ OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
old_gen_exhausted_ = false;
} else {
@@ -1148,7 +1147,7 @@ bool Heap::PerformGarbageCollection(
gc_post_processing_depth_++;
{ AllowHeapAllocation allow_allocation;
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
- next_gc_likely_to_collect_more =
+ freed_global_handles =
isolate_->global_handles()->PostGarbageCollectionProcessing(
collector, tracer);
}
@@ -1163,6 +1162,9 @@ bool Heap::PerformGarbageCollection(
// Register the amount of external allocated memory.
amount_of_external_allocated_memory_at_last_global_gc_ =
amount_of_external_allocated_memory_;
+ old_generation_allocation_limit_ =
+ OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(),
+ freed_global_handles);
}
{ GCCallbacksScope scope(this);
@@ -1181,7 +1183,7 @@ bool Heap::PerformGarbageCollection(
}
#endif
- return next_gc_likely_to_collect_more;
+ return freed_global_handles > 0;
}
@@ -5069,12 +5071,6 @@ bool Heap::ConfigureHeap(int max_semispace_size,
code_range_size_ = code_range_size;
- // We set the old generation growing factor to 2 to grow the heap slower on
- // memory-constrained devices.
- if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
- old_space_growing_factor_ = 2;
- }
-
configured_ = true;
return true;
}
@@ -5146,6 +5142,47 @@ int64_t Heap::PromotedExternalMemorySize() {
}
+intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size,
+ int freed_global_handles) {
+ const int kMaxHandles = 1000;
+ const int kMinHandles = 100;
+ double min_factor = 1.1;
+ double max_factor = 4;
+ // We set the old generation growing factor to 2 to grow the heap slower on
+ // memory-constrained devices.
+ if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
+ max_factor = 2;
+ }
+ // If there are many freed global handles, then the next full GC will
+ // likely collect a lot of garbage. Choose the heap growing factor
+ // depending on freed global handles.
+ // TODO(ulan, hpayer): Take into account mutator utilization.
+ double factor;
+ if (freed_global_handles <= kMinHandles) {
+ factor = max_factor;
+ } else if (freed_global_handles >= kMaxHandles) {
+ factor = min_factor;
+ } else {
+ // Compute factor using linear interpolation between points
+ // (kMinHandles, max_factor) and (kMaxHandles, min_factor).
+ factor = max_factor -
+ (freed_global_handles - kMinHandles) * (max_factor - min_factor) /
+ (kMaxHandles - kMinHandles);
+ }
+
+ if (FLAG_stress_compaction ||
+ mark_compact_collector()->reduce_memory_footprint_) {
+ factor = min_factor;
+ }
+
+ intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
+ limit = Max(limit, kMinimumOldGenerationAllocationLimit);
+ limit += new_space_.Capacity();
+ intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
+ return Min(limit, halfway_to_the_max);
+}
+
+
void Heap::EnableInlineAllocation() {
if (!inline_allocation_disabled_) return;
inline_allocation_disabled_ = false;
@@ -5473,6 +5510,8 @@ void Heap::AddWeakObjectToCodeDependency(Handle<Object> obj,
Handle<DependentCode> dep) {
ASSERT(!InNewSpace(*obj));
ASSERT(!InNewSpace(*dep));
+ // This handle scope keeps the table handle local to this function, which
+ // allows us to safely skip write barriers in table update operations.
HandleScope scope(isolate());
Handle<WeakHashTable> table(WeakHashTable::cast(weak_object_to_code_table_),
isolate());
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index 9c100fcf1f..f88526e255 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -1087,20 +1087,13 @@ class Heap {
static const int kMaxOldSpaceSizeHugeMemoryDevice = 700 * kLumpOfMemory;
// The executable size has to be a multiple of Page::kPageSize.
- static const int kMaxExecutableSizeLowMemoryDevice = 128 * kLumpOfMemory;
- static const int kMaxExecutableSizeMediumMemoryDevice = 256 * kLumpOfMemory;
- static const int kMaxExecutableSizeHighMemoryDevice = 512 * kLumpOfMemory;
- static const int kMaxExecutableSizeHugeMemoryDevice = 700 * kLumpOfMemory;
+ static const int kMaxExecutableSizeLowMemoryDevice = 96 * kLumpOfMemory;
+ static const int kMaxExecutableSizeMediumMemoryDevice = 192 * kLumpOfMemory;
+ static const int kMaxExecutableSizeHighMemoryDevice = 256 * kLumpOfMemory;
+ static const int kMaxExecutableSizeHugeMemoryDevice = 256 * kLumpOfMemory;
- intptr_t OldGenerationAllocationLimit(intptr_t old_gen_size) {
- intptr_t limit = FLAG_stress_compaction
- ? old_gen_size + old_gen_size / 10
- : old_gen_size * old_space_growing_factor_;
- limit = Max(limit, kMinimumOldGenerationAllocationLimit);
- limit += new_space_.Capacity();
- intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
- return Min(limit, halfway_to_the_max);
- }
+ intptr_t OldGenerationAllocationLimit(intptr_t old_gen_size,
+ int freed_global_handles);
// Indicates whether inline bump-pointer allocation has been disabled.
bool inline_allocation_disabled() { return inline_allocation_disabled_; }
@@ -1495,11 +1488,6 @@ class Heap {
intptr_t max_executable_size_;
intptr_t maximum_committed_;
- // The old space growing factor is used in the old space heap growing
- // strategy. The new old space size is the current old space size times
- // old_space_growing_factor_.
- int old_space_growing_factor_;
-
// For keeping track of how much data has survived
// scavenge since last new space expansion.
int survived_since_last_expansion_;
@@ -1572,9 +1560,6 @@ class Heap {
// generation and on every allocation in large object space.
intptr_t old_generation_allocation_limit_;
- // Used to adjust the limits that control the timing of the next GC.
- intptr_t size_of_old_gen_at_last_old_space_gc_;
-
// Limit on the amount of externally allocated memory allowed
// between global GCs. If reached a global GC is forced.
intptr_t external_allocation_limit_;
diff --git a/deps/v8/src/hydrogen-bce.cc b/deps/v8/src/hydrogen-bce.cc
index 1f3f449a0c..5f45c1fd0d 100644
--- a/deps/v8/src/hydrogen-bce.cc
+++ b/deps/v8/src/hydrogen-bce.cc
@@ -47,10 +47,7 @@ class BoundsCheckKey : public ZoneObject {
} else if (check->index()->IsSub()) {
HSub* index = HSub::cast(check->index());
is_sub = true;
- if (index->left()->IsConstant()) {
- constant = HConstant::cast(index->left());
- index_base = index->right();
- } else if (index->right()->IsConstant()) {
+ if (index->right()->IsConstant()) {
constant = HConstant::cast(index->right());
index_base = index->left();
}
diff --git a/deps/v8/src/hydrogen-check-elimination.cc b/deps/v8/src/hydrogen-check-elimination.cc
index 701d9654b5..de1e5b0648 100644
--- a/deps/v8/src/hydrogen-check-elimination.cc
+++ b/deps/v8/src/hydrogen-check-elimination.cc
@@ -580,7 +580,7 @@ class HCheckMapsEffects : public ZoneObject {
switch (instr->opcode()) {
case HValue::kStoreNamedField: {
HStoreNamedField* store = HStoreNamedField::cast(instr);
- if (store->access().IsMap() && store->has_transition()) {
+ if (store->access().IsMap() || store->has_transition()) {
objects_.Add(store->object(), zone);
}
break;
diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc
index f5c5c32f40..56ac7196c3 100644
--- a/deps/v8/src/hydrogen-instructions.cc
+++ b/deps/v8/src/hydrogen-instructions.cc
@@ -873,6 +873,7 @@ bool HInstruction::CanDeoptimize() {
case HValue::kSeqStringGetChar:
case HValue::kStoreCodeEntry:
case HValue::kStoreKeyed:
+ case HValue::kStoreNamedField:
case HValue::kStoreNamedGeneric:
case HValue::kStringCharCodeAt:
case HValue::kStringCharFromCode:
@@ -921,7 +922,6 @@ bool HInstruction::CanDeoptimize() {
case HValue::kStoreContextSlot:
case HValue::kStoreGlobalCell:
case HValue::kStoreKeyedGeneric:
- case HValue::kStoreNamedField:
case HValue::kStringAdd:
case HValue::kStringCompareAndBranch:
case HValue::kSub:
diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h
index 1cdca4c46e..9ac3bfb59b 100644
--- a/deps/v8/src/hydrogen-instructions.h
+++ b/deps/v8/src/hydrogen-instructions.h
@@ -6728,6 +6728,12 @@ class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
new_space_dominator());
}
+ SmiCheck SmiCheckForWriteBarrier() const {
+ if (field_representation().IsHeapObject()) return OMIT_SMI_CHECK;
+ if (value()->IsHeapObject()) return OMIT_SMI_CHECK;
+ return INLINE_SMI_CHECK;
+ }
+
Representation field_representation() const {
return access_.representation();
}
@@ -6825,19 +6831,28 @@ class HStoreKeyed V8_FINAL
}
ASSERT_EQ(index, 2);
- if (IsDoubleOrFloatElementsKind(elements_kind())) {
+ return RequiredValueRepresentation(elements_kind_, store_mode_);
+ }
+
+ static Representation RequiredValueRepresentation(
+ ElementsKind kind, StoreFieldOrKeyedMode mode) {
+ if (IsDoubleOrFloatElementsKind(kind)) {
return Representation::Double();
}
- if (SmiValuesAre32Bits() && store_mode_ == STORE_TO_INITIALIZED_ENTRY) {
+
+ if (kind == FAST_SMI_ELEMENTS && SmiValuesAre32Bits() &&
+ mode == STORE_TO_INITIALIZED_ENTRY) {
return Representation::Integer32();
}
- if (IsFastSmiElementsKind(elements_kind())) {
+
+ if (IsFastSmiElementsKind(kind)) {
return Representation::Smi();
}
- return is_external() || is_fixed_typed_array()
- ? Representation::Integer32()
- : Representation::Tagged();
+ return IsExternalArrayElementsKind(kind) ||
+ IsFixedTypedArrayElementsKind(kind)
+ ? Representation::Integer32()
+ : Representation::Tagged();
}
bool is_external() const {
@@ -6857,20 +6872,9 @@ class HStoreKeyed V8_FINAL
if (IsUninitialized()) {
return Representation::None();
}
- if (IsDoubleOrFloatElementsKind(elements_kind())) {
- return Representation::Double();
- }
- if (SmiValuesAre32Bits() && store_mode_ == STORE_TO_INITIALIZED_ENTRY) {
- return Representation::Integer32();
- }
- if (IsFastSmiElementsKind(elements_kind())) {
- return Representation::Smi();
- }
- if (is_typed_elements()) {
- return Representation::Integer32();
- }
- // For fast object elements kinds, don't assume anything.
- return Representation::None();
+ Representation r = RequiredValueRepresentation(elements_kind_, store_mode_);
+ if (r.IsTagged()) return Representation::None();
+ return r;
}
HValue* elements() { return OperandAt(0); }
@@ -6938,9 +6942,6 @@ class HStoreKeyed V8_FINAL
SetOperandAt(1, key);
SetOperandAt(2, val);
- ASSERT(store_mode != STORE_TO_INITIALIZED_ENTRY ||
- elements_kind == FAST_SMI_ELEMENTS);
-
if (IsFastObjectElementsKind(elements_kind)) {
SetFlag(kTrackSideEffectDominators);
SetDependsOnFlag(kNewSpacePromotion);
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc
index ee9f8e4167..06dfcfc536 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/hydrogen.cc
@@ -2199,6 +2199,9 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
if (IsGrowStoreMode(store_mode)) {
NoObservableSideEffectsScope no_effects(this);
+ Representation representation = HStoreKeyed::RequiredValueRepresentation(
+ elements_kind, STORE_TO_INITIALIZED_ENTRY);
+ val = AddUncasted<HForceRepresentation>(val, representation);
elements = BuildCheckForCapacityGrow(checked_object, elements,
elements_kind, length, key,
is_js_array, access_type);
@@ -2370,9 +2373,7 @@ HInstruction* HGraphBuilder::AddElementAccess(
val = Add<HClampToUint8>(val);
}
return Add<HStoreKeyed>(elements, checked_key, val, elements_kind,
- elements_kind == FAST_SMI_ELEMENTS
- ? STORE_TO_INITIALIZED_ENTRY
- : INITIALIZING_STORE);
+ STORE_TO_INITIALIZED_ENTRY);
}
ASSERT(access_type == LOAD);
@@ -5409,16 +5410,13 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
value, STORE_TO_INITIALIZED_ENTRY);
}
} else {
+ if (field_access.representation().IsHeapObject()) {
+ BuildCheckHeapObject(value);
+ }
+
if (!info->field_maps()->is_empty()) {
ASSERT(field_access.representation().IsHeapObject());
- BuildCheckHeapObject(value);
value = Add<HCheckMaps>(value, info->field_maps());
-
- // TODO(bmeurer): This is a dirty hack to avoid repeating the smi check
- // that was already performed by the HCheckHeapObject above in the
- // HStoreNamedField below. We should really do this right instead and
- // make Crankshaft aware of Representation::HeapObject().
- field_access = field_access.WithRepresentation(Representation::Tagged());
}
// This is a normal store.
@@ -8050,10 +8048,12 @@ bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
HValue* function = Pop(); // f
Drop(1); // apply
+ HValue* checked_function = AddCheckMap(function, function_map);
+
if (function_state()->outer() == NULL) {
HInstruction* elements = Add<HArgumentsElements>(false);
HInstruction* length = Add<HArgumentsLength>(elements);
- HValue* wrapped_receiver = BuildWrapReceiver(receiver, function);
+ HValue* wrapped_receiver = BuildWrapReceiver(receiver, checked_function);
HInstruction* result = New<HApplyArguments>(function,
wrapped_receiver,
length,
@@ -8069,7 +8069,7 @@ bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
const ZoneList<HValue*>* arguments_values = args->arguments_values();
int arguments_count = arguments_values->length();
Push(function);
- Push(BuildWrapReceiver(receiver, function));
+ Push(BuildWrapReceiver(receiver, checked_function));
for (int i = 1; i < arguments_count; i++) {
Push(arguments_values->at(i));
}
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index 2872d4dc0d..d2b4f2f7db 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -4366,30 +4366,12 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
Register object = ToRegister(instr->object());
- SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
-
- ASSERT(!(representation.IsSmi() &&
- instr->value()->IsConstantOperand() &&
- !IsSmi(LConstantOperand::cast(instr->value()))));
- if (representation.IsHeapObject()) {
- if (instr->value()->IsConstantOperand()) {
- LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (chunk_->LookupConstant(operand_value)->HasSmiValue()) {
- DeoptimizeIf(no_condition, instr->environment());
- }
- } else {
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- Register value = ToRegister(instr->value());
- __ test(value, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
+ __ AssertNotSmi(object);
- // We know now that value is not a smi, so we can omit the check below.
- check_needed = OMIT_SMI_CHECK;
- }
- }
- } else if (representation.IsDouble()) {
+ ASSERT(!representation.IsSmi() ||
+ !instr->value()->IsConstantOperand() ||
+ IsSmi(LConstantOperand::cast(instr->value())));
+ if (representation.IsDouble()) {
ASSERT(access.IsInobject());
ASSERT(!instr->hydrogen()->has_transition());
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
@@ -4462,7 +4444,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
temp,
GetSaveFPRegsMode(isolate()),
EMIT_REMEMBERED_SET,
- check_needed);
+ instr->hydrogen()->SmiCheckForWriteBarrier());
}
}
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index 3231095adb..8c2687ee9c 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -2419,16 +2419,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
// We need a temporary register for write barrier of the map field.
LOperand* temp_map = needs_write_barrier_for_map ? TempRegister() : NULL;
- LInstruction* result =
- new(zone()) LStoreNamedField(obj, val, temp, temp_map);
- if (!instr->access().IsExternalMemory() &&
- instr->field_representation().IsHeapObject() &&
- (val->IsConstantOperand()
- ? HConstant::cast(instr->value())->HasSmiValue()
- : !instr->value()->type().IsHeapObject())) {
- result = AssignEnvironment(result);
- }
- return result;
+ return new(zone()) LStoreNamedField(obj, val, temp, temp_map);
}
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index f27927de96..8b17baa2a7 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -55,6 +55,11 @@ void MacroAssembler::Store(Register src, const Operand& dst, Representation r) {
} else if (r.IsInteger16() || r.IsUInteger16()) {
mov_w(dst, src);
} else {
+ if (r.IsHeapObject()) {
+ AssertNotSmi(src);
+ } else if (r.IsSmi()) {
+ AssertSmi(src);
+ }
mov(dst, src);
}
}
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index ec8e941795..784098ba71 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -2634,6 +2634,7 @@ void MarkCompactCollector::ClearNonLiveReferences() {
ClearDependentCode(DependentCode::cast(value));
table->set(key_index, heap_->the_hole_value());
table->set(value_index, heap_->the_hole_value());
+ table->ElementRemoved();
}
}
}
diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc
index 95ee3a6c12..fe3e349c2d 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/mips/lithium-codegen-mips.cc
@@ -4073,23 +4073,12 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
return;
}
- SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ __ AssertNotSmi(object);
- ASSERT(!(representation.IsSmi() &&
- instr->value()->IsConstantOperand() &&
- !IsSmi(LConstantOperand::cast(instr->value()))));
- if (representation.IsHeapObject()) {
- Register value = ToRegister(instr->value());
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- __ SmiTst(value, scratch);
- DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
-
- // We know now that value is not a smi, so we can omit the check below.
- check_needed = OMIT_SMI_CHECK;
- }
- } else if (representation.IsDouble()) {
+ ASSERT(!representation.IsSmi() ||
+ !instr->value()->IsConstantOperand() ||
+ IsSmi(LConstantOperand::cast(instr->value())));
+ if (representation.IsDouble()) {
ASSERT(access.IsInobject());
ASSERT(!instr->hydrogen()->has_transition());
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
@@ -4131,7 +4120,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
GetRAState(),
kSaveFPRegs,
EMIT_REMEMBERED_SET,
- check_needed);
+ instr->hydrogen()->SmiCheckForWriteBarrier());
}
} else {
__ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
@@ -4147,7 +4136,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
GetRAState(),
kSaveFPRegs,
EMIT_REMEMBERED_SET,
- check_needed);
+ instr->hydrogen()->SmiCheckForWriteBarrier());
}
}
}
diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc
index eb960a4bb2..56e1511943 100644
--- a/deps/v8/src/mips/lithium-mips.cc
+++ b/deps/v8/src/mips/lithium-mips.cc
@@ -2248,13 +2248,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
// We need a temporary register for write barrier of the map field.
LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
- LInstruction* result = new(zone()) LStoreNamedField(obj, val, temp);
- if (!instr->access().IsExternalMemory() &&
- instr->field_representation().IsHeapObject() &&
- !instr->value()->type().IsHeapObject()) {
- result = AssignEnvironment(result);
- }
- return result;
+ return new(zone()) LStoreNamedField(obj, val, temp);
}
@@ -2507,7 +2501,7 @@ LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
LOperand* object = UseRegister(instr->object());
- LOperand* index = UseRegister(instr->index());
+ LOperand* index = UseTempRegister(instr->index());
LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
LInstruction* result = DefineSameAsFirst(load);
return AssignPointerMap(result);
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 9291e20fa6..62067a26dc 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -56,6 +56,11 @@ void MacroAssembler::Store(Register src,
} else if (r.IsInteger16() || r.IsUInteger16()) {
sh(src, dst);
} else {
+ if (r.IsHeapObject()) {
+ AssertNotSmi(src);
+ } else if (r.IsSmi()) {
+ AssertSmi(src);
+ }
sw(src, dst);
}
}
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 8aa2eb1b4b..8ffd462db1 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -2345,6 +2345,18 @@ Handle<Map> Map::CopyGeneralizeAllRepresentations(Handle<Map> map,
}
+// static
+Handle<Map> Map::CopyGeneralizeAllRepresentations(Handle<Map> map,
+ int modify_index,
+ StoreMode store_mode,
+ const char* reason) {
+ PropertyDetails details =
+ map->instance_descriptors()->GetDetails(modify_index);
+ return CopyGeneralizeAllRepresentations(map, modify_index, store_mode,
+ details.attributes(), reason);
+}
+
+
void Map::DeprecateTransitionTree() {
if (is_deprecated()) return;
if (HasTransitionArray()) {
@@ -2590,8 +2602,8 @@ Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
// Check the state of the root map.
Handle<Map> root_map(old_map->FindRootMap(), isolate);
if (!old_map->EquivalentToForTransition(*root_map)) {
- return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
- old_details.attributes(), "not equivalent");
+ return CopyGeneralizeAllRepresentations(
+ old_map, modify_index, store_mode, "not equivalent");
}
int root_nof = root_map->NumberOfOwnDescriptors();
if (modify_index < root_nof) {
@@ -2600,8 +2612,8 @@ Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
(old_details.type() == FIELD &&
(!new_field_type->NowIs(old_descriptors->GetFieldType(modify_index)) ||
!new_representation.fits_into(old_details.representation())))) {
- return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
- old_details.attributes(), "root modification");
+ return CopyGeneralizeAllRepresentations(
+ old_map, modify_index, store_mode, "root modification");
}
}
@@ -2623,8 +2635,7 @@ Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
(tmp_type != old_type ||
tmp_descriptors->GetValue(i) != old_descriptors->GetValue(i)))) {
return CopyGeneralizeAllRepresentations(
- old_map, modify_index, store_mode,
- old_details.attributes(), "incompatible");
+ old_map, modify_index, store_mode, "incompatible");
}
Representation old_representation = old_details.representation();
Representation tmp_representation = tmp_details.representation();
@@ -2688,8 +2699,7 @@ Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
(tmp_details.type() != old_details.type() ||
tmp_descriptors->GetValue(i) != old_descriptors->GetValue(i)))) {
return CopyGeneralizeAllRepresentations(
- old_map, modify_index, store_mode,
- old_details.attributes(), "incompatible");
+ old_map, modify_index, store_mode, "incompatible");
}
target_map = tmp_map;
}
@@ -2732,6 +2742,7 @@ Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
target_details = target_details.CopyWithRepresentation(
new_representation.generalize(target_details.representation()));
}
+ ASSERT_EQ(old_details.attributes(), target_details.attributes());
if (old_details.type() == FIELD ||
target_details.type() == FIELD ||
(modify_index == i && store_mode == FORCE_FIELD) ||
@@ -3362,16 +3373,9 @@ static Map* FindClosestElementsTransition(Map* map, ElementsKind to_kind) {
? to_kind
: TERMINAL_FAST_ELEMENTS_KIND;
- // Support for legacy API: SetIndexedPropertiesTo{External,Pixel}Data
- // allows to change elements from arbitrary kind to any ExternalArray
- // elements kind. Satisfy its requirements, checking whether we already
- // have the cached transition.
+ // Support for legacy API.
if (IsExternalArrayElementsKind(to_kind) &&
!IsFixedTypedArrayElementsKind(map->elements_kind())) {
- if (map->HasElementsTransition()) {
- Map* next_map = map->elements_transition_map();
- if (next_map->elements_kind() == to_kind) return next_map;
- }
return map;
}
@@ -12375,8 +12379,17 @@ void DependentCode::AddToDependentICList(Handle<Code> stub) {
DisallowHeapAllocation no_heap_allocation;
GroupStartIndexes starts(this);
int i = starts.at(kWeakICGroup);
- stub->set_next_code_link(object_at(i));
- set_object_at(i, *stub);
+ Object* head = object_at(i);
+ // Try to insert the stub after the head of the list to minimize number of
+ // writes to the DependentCode array, since a write to the array can make it
+ // strong if it was alread marked by incremental marker.
+ if (head->IsCode()) {
+ stub->set_next_code_link(Code::cast(head)->next_code_link());
+ Code::cast(head)->set_next_code_link(*stub);
+ } else {
+ stub->set_next_code_link(head);
+ set_object_at(i, *stub);
+ }
}
@@ -16177,7 +16190,10 @@ Handle<WeakHashTable> WeakHashTable::Put(Handle<WeakHashTable> table,
int entry = table->FindEntry(key);
// Key is already in table, just overwrite value.
if (entry != kNotFound) {
- table->set(EntryToValueIndex(entry), *value);
+ // TODO(ulan): Skipping write barrier is a temporary solution to avoid
+ // memory leaks. Remove this once we have special visitor for weak fixed
+ // arrays.
+ table->set(EntryToValueIndex(entry), *value, SKIP_WRITE_BARRIER);
return table;
}
@@ -16193,8 +16209,11 @@ void WeakHashTable::AddEntry(int entry,
Handle<Object> key,
Handle<Object> value) {
DisallowHeapAllocation no_allocation;
- set(EntryToIndex(entry), *key);
- set(EntryToValueIndex(entry), *value);
+ // TODO(ulan): Skipping write barrier is a temporary solution to avoid
+ // memory leaks. Remove this once we have special visitor for weak fixed
+ // arrays.
+ set(EntryToIndex(entry), *key, SKIP_WRITE_BARRIER);
+ set(EntryToValueIndex(entry), *value, SKIP_WRITE_BARRIER);
ElementAdded();
}
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 12f2ee0e5c..ab974e3ee4 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -6141,6 +6141,11 @@ class Map: public HeapObject {
StoreMode store_mode,
PropertyAttributes attributes,
const char* reason);
+ static Handle<Map> CopyGeneralizeAllRepresentations(
+ Handle<Map> map,
+ int modify_index,
+ StoreMode store_mode,
+ const char* reason);
static Handle<Map> Normalize(Handle<Map> map, PropertyNormalizationMode mode);
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index b82d377f71..079332b982 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -10026,7 +10026,7 @@ class ArrayConcatVisitor {
// getters on the arrays increasing the length of later arrays
// during iteration.
// This shouldn't happen in anything but pathological cases.
- SetDictionaryMode(index);
+ SetDictionaryMode();
// Fall-through to dictionary mode.
}
ASSERT(!fast_elements_);
@@ -10047,6 +10047,14 @@ class ArrayConcatVisitor {
} else {
index_offset_ += delta;
}
+ // If the initial length estimate was off (see special case in visit()),
+ // but the array blowing the limit didn't contain elements beyond the
+ // provided-for index range, go to dictionary mode now.
+ if (fast_elements_ &&
+ index_offset_ >
+ static_cast<uint32_t>(FixedArrayBase::cast(*storage_)->length())) {
+ SetDictionaryMode();
+ }
}
bool exceeds_array_limit() {
@@ -10068,7 +10076,7 @@ class ArrayConcatVisitor {
private:
// Convert storage to dictionary mode.
- void SetDictionaryMode(uint32_t index) {
+ void SetDictionaryMode() {
ASSERT(fast_elements_);
Handle<FixedArray> current_storage(*storage_);
Handle<SeededNumberDictionary> slow_storage(
diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc
index 8e923af548..fd319ab717 100644
--- a/deps/v8/src/spaces.cc
+++ b/deps/v8/src/spaces.cc
@@ -156,12 +156,12 @@ int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
}
-void CodeRange::GetNextAllocationBlock(size_t requested) {
+bool CodeRange::GetNextAllocationBlock(size_t requested) {
for (current_allocation_block_index_++;
current_allocation_block_index_ < allocation_list_.length();
current_allocation_block_index_++) {
if (requested <= allocation_list_[current_allocation_block_index_].size) {
- return; // Found a large enough allocation block.
+ return true; // Found a large enough allocation block.
}
}
@@ -188,12 +188,12 @@ void CodeRange::GetNextAllocationBlock(size_t requested) {
current_allocation_block_index_ < allocation_list_.length();
current_allocation_block_index_++) {
if (requested <= allocation_list_[current_allocation_block_index_].size) {
- return; // Found a large enough allocation block.
+ return true; // Found a large enough allocation block.
}
}
-
+ current_allocation_block_index_ = 0;
// Code range is full or too fragmented.
- V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
+ return false;
}
@@ -203,9 +203,8 @@ Address CodeRange::AllocateRawMemory(const size_t requested_size,
ASSERT(commit_size <= requested_size);
ASSERT(current_allocation_block_index_ < allocation_list_.length());
if (requested_size > allocation_list_[current_allocation_block_index_].size) {
- // Find an allocation block large enough. This function call may
- // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
- GetNextAllocationBlock(requested_size);
+ // Find an allocation block large enough.
+ if (!GetNextAllocationBlock(requested_size)) return NULL;
}
// Commit the requested memory at the start of the current allocation block.
size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
@@ -228,7 +227,8 @@ Address CodeRange::AllocateRawMemory(const size_t requested_size,
allocation_list_[current_allocation_block_index_].start += *allocated;
allocation_list_[current_allocation_block_index_].size -= *allocated;
if (*allocated == current.size) {
- GetNextAllocationBlock(0); // This block is used up, get the next one.
+ // This block is used up, get the next one.
+ if (!GetNextAllocationBlock(0)) return NULL;
}
return current.start;
}
diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h
index 735f1fbbf2..dd410754b0 100644
--- a/deps/v8/src/spaces.h
+++ b/deps/v8/src/spaces.h
@@ -985,8 +985,8 @@ class CodeRange {
// Finds a block on the allocation list that contains at least the
// requested amount of memory. If none is found, sorts and merges
// the existing free memory blocks, and searches again.
- // If none can be found, terminates V8 with FatalProcessOutOfMemory.
- void GetNextAllocationBlock(size_t requested);
+ // If none can be found, returns false.
+ bool GetNextAllocationBlock(size_t requested);
// Compares the start addresses of two free blocks.
static int CompareFreeBlockAddress(const FreeBlock* left,
const FreeBlock* right);
diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc
index ef9faefc83..6bf209bc0a 100644
--- a/deps/v8/src/stub-cache.cc
+++ b/deps/v8/src/stub-cache.cc
@@ -633,18 +633,7 @@ RUNTIME_FUNCTION(StoreInterceptorProperty) {
Handle<JSObject> receiver = args.at<JSObject>(0);
Handle<Name> name = args.at<Name>(1);
Handle<Object> value = args.at<Object>(2);
- if (receiver->IsJSGlobalProxy()) {
- Object* proto = Object::cast(*receiver)->GetPrototype(isolate);
-#ifdef DEBUG
- ASSERT(proto == NULL ||
- JSGlobalObject::cast(proto)->HasNamedInterceptor());
-#endif
- receiver = Handle<JSObject>(JSObject::cast(proto));
- } else {
-#ifdef DEBUG
- ASSERT(receiver->HasNamedInterceptor());
-#endif
- }
+ ASSERT(receiver->HasNamedInterceptor());
PropertyAttributes attr = NONE;
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index c3dc8ac30c..57fbc93186 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -3998,29 +3998,12 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
Register object = ToRegister(instr->object());
- SmiCheck check_needed = hinstr->value()->IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
-
- ASSERT(!(representation.IsSmi() &&
- instr->value()->IsConstantOperand() &&
- !IsInteger32Constant(LConstantOperand::cast(instr->value()))));
- if (representation.IsHeapObject()) {
- if (instr->value()->IsConstantOperand()) {
- LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (chunk_->LookupConstant(operand_value)->HasSmiValue()) {
- DeoptimizeIf(no_condition, instr->environment());
- }
- } else {
- if (!hinstr->value()->type().IsHeapObject()) {
- Register value = ToRegister(instr->value());
- Condition cc = masm()->CheckSmi(value);
- DeoptimizeIf(cc, instr->environment());
+ __ AssertNotSmi(object);
- // We know now that value is not a smi, so we can omit the check below.
- check_needed = OMIT_SMI_CHECK;
- }
- }
- } else if (representation.IsDouble()) {
+ ASSERT(!representation.IsSmi() ||
+ !instr->value()->IsConstantOperand() ||
+ IsInteger32Constant(LConstantOperand::cast(instr->value())));
+ if (representation.IsDouble()) {
ASSERT(access.IsInobject());
ASSERT(!hinstr->has_transition());
ASSERT(!hinstr->NeedsWriteBarrier());
@@ -4105,7 +4088,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
temp,
kSaveFPRegs,
EMIT_REMEMBERED_SET,
- check_needed);
+ hinstr->SmiCheckForWriteBarrier());
}
}
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index eb9e7dd00a..a5ef1192e9 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -2311,15 +2311,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LOperand* temp = (!is_in_object || needs_write_barrier ||
needs_write_barrier_for_map) ? TempRegister() : NULL;
- LInstruction* result = new(zone()) LStoreNamedField(obj, val, temp);
- if (!instr->access().IsExternalMemory() &&
- instr->field_representation().IsHeapObject() &&
- (val->IsConstantOperand()
- ? HConstant::cast(instr->value())->HasSmiValue()
- : !instr->value()->type().IsHeapObject())) {
- result = AssignEnvironment(result);
- }
- return result;
+ return new(zone()) LStoreNamedField(obj, val, temp);
}
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 17db9bf1a5..832cf52c66 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -924,6 +924,11 @@ void MacroAssembler::Store(const Operand& dst, Register src, Representation r) {
} else if (r.IsInteger32()) {
movl(dst, src);
} else {
+ if (r.IsHeapObject()) {
+ AssertNotSmi(src);
+ } else if (r.IsSmi()) {
+ AssertSmi(src);
+ }
movp(dst, src);
}
}
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 14df05a8e8..d8fa648bf2 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -21341,23 +21341,6 @@ THREADED_TEST(Regress142088) {
}
-THREADED_TEST(Regress3337) {
- LocalContext context;
- v8::Isolate* isolate = context->GetIsolate();
- v8::HandleScope scope(isolate);
- Local<v8::Object> o1 = Object::New(isolate);
- Local<v8::Object> o2 = Object::New(isolate);
- i::Handle<i::JSObject> io1 = v8::Utils::OpenHandle(*o1);
- i::Handle<i::JSObject> io2 = v8::Utils::OpenHandle(*o2);
- CHECK(io1->map() == io2->map());
- o1->SetIndexedPropertiesToExternalArrayData(
- NULL, v8::kExternalUint32Array, 0);
- o2->SetIndexedPropertiesToExternalArrayData(
- NULL, v8::kExternalUint32Array, 0);
- CHECK(io1->map() == io2->map());
-}
-
-
THREADED_TEST(Regress137496) {
i::FLAG_expose_gc = true;
LocalContext context;
diff --git a/deps/v8/test/cctest/test-heap.cc b/deps/v8/test/cctest/test-heap.cc
index 913d80a180..3cc61ed5fd 100644
--- a/deps/v8/test/cctest/test-heap.cc
+++ b/deps/v8/test/cctest/test-heap.cc
@@ -40,7 +40,6 @@
using namespace v8::internal;
-
// Go through all incremental marking steps in one swoop.
static void SimulateIncrementalMarking() {
MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector();
@@ -3899,6 +3898,42 @@ TEST(ObjectsInOptimizedCodeAreWeak) {
}
+TEST(NoWeakHashTableLeakWithIncrementalMarking) {
+ if (i::FLAG_always_opt || !i::FLAG_crankshaft) return;
+ if (!i::FLAG_incremental_marking) return;
+ i::FLAG_weak_embedded_objects_in_optimized_code = true;
+ i::FLAG_allow_natives_syntax = true;
+ i::FLAG_compilation_cache = false;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ v8::internal::Heap* heap = CcTest::heap();
+
+ if (!isolate->use_crankshaft()) return;
+ HandleScope outer_scope(heap->isolate());
+ for (int i = 0; i < 3; i++) {
+ SimulateIncrementalMarking();
+ {
+ LocalContext context;
+ HandleScope scope(heap->isolate());
+ EmbeddedVector<char, 256> source;
+ OS::SNPrintF(source,
+ "function bar%d() {"
+ " return foo%d(1);"
+ "};"
+ "function foo%d(x) { with (x) { return 1 + x; } };"
+ "bar%d();"
+ "bar%d();"
+ "bar%d();"
+ "%OptimizeFunctionOnNextCall(bar%d);"
+ "bar%d();", i, i, i, i, i, i, i, i);
+ CompileRun(source.start());
+ }
+ heap->CollectAllGarbage(i::Heap::kNoGCFlags);
+ }
+ WeakHashTable* table = WeakHashTable::cast(heap->weak_object_to_code_table());
+ CHECK_EQ(0, table->NumberOfElements());
+}
+
static Handle<JSFunction> OptimizeDummyFunction(const char* name) {
EmbeddedVector<char, 256> source;
diff --git a/deps/v8/test/mjsunit/regress/regress-368243.js b/deps/v8/test/mjsunit/regress/regress-368243.js
new file mode 100644
index 0000000000..6647d12286
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-368243.js
@@ -0,0 +1,25 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(a, c){
+ for(var f in c) {
+ if ("object" === typeof c[f]) {
+ a[f] = c[f];
+ foo(a[f], c[f]);
+ }
+ }
+};
+
+c = {
+ "one" : { x : 1},
+ "two" : { x : 2},
+ "thr" : { x : 3, z : 4},
+};
+
+foo({}, c);
+foo({}, c);
+%OptimizeFunctionOnNextCall(foo);
+foo({}, c);
diff --git a/deps/v8/test/mjsunit/regress/regress-385054.js b/deps/v8/test/mjsunit/regress/regress-385054.js
new file mode 100644
index 0000000000..115bca0d21
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-385054.js
@@ -0,0 +1,16 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f(x) {
+ var a = [1, 2];
+ a[x];
+ return a[0 - x];
+}
+
+f(0);
+f(0);
+%OptimizeFunctionOnNextCall(f);
+assertEquals(undefined, f(1));
diff --git a/deps/v8/test/mjsunit/regress/regress-386034.js b/deps/v8/test/mjsunit/regress/regress-386034.js
new file mode 100644
index 0000000000..d770ce91bd
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-386034.js
@@ -0,0 +1,19 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f(x) {
+ var v = x;
+ for (i = 0; i < 1; i++) {
+ v.apply(this, arguments);
+ }
+}
+
+function g() {}
+
+f(g);
+f(g);
+%OptimizeFunctionOnNextCall(f);
+assertThrows(function() { f('----'); }, TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-382143.js b/deps/v8/test/mjsunit/regress/regress-crbug-382143.js
new file mode 100644
index 0000000000..9f37b2e478
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-382143.js
@@ -0,0 +1,16 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function A() {
+ Object.defineProperty(this, "x", { set: function () {}, get: function () {}});
+ this.a = function () { return 1; }
+}
+
+function B() {
+ A.apply( this );
+ this.a = function () { return 2; }
+}
+
+var b = new B();
+assertTrue(Object.getOwnPropertyDescriptor(b, "a").enumerable);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-387031.js b/deps/v8/test/mjsunit/regress/regress-crbug-387031.js
new file mode 100644
index 0000000000..77f52a9d35
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-387031.js
@@ -0,0 +1,15 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+a = [1];
+b = [];
+a.__defineGetter__(0, function () {
+ b.length = 0xffffffff;
+});
+c = a.concat(b);
+for (var i = 0; i < 20; i++) {
+ assertEquals(undefined, (c[i]));
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-grow-deopt.js b/deps/v8/test/mjsunit/regress/regress-grow-deopt.js
new file mode 100644
index 0000000000..df3a83fe8b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-grow-deopt.js
@@ -0,0 +1,16 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f(a, v) {
+ a[a.length] = v;
+}
+
+var a = [1.4];
+f(a, 1);
+f(a, 2);
+%OptimizeFunctionOnNextCall(f);
+f(a, {});
+assertEquals(4, a.length);
diff --git a/deps/v8/test/mjsunit/regress/regress-gvn-ftt.js b/deps/v8/test/mjsunit/regress/regress-gvn-ftt.js
new file mode 100644
index 0000000000..d2cb44381d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-gvn-ftt.js
@@ -0,0 +1,27 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --track-field-types --use-gvn
+
+function A(id) {
+ this.id = id;
+}
+
+var a1 = new A(1);
+var a2 = new A(2);
+
+
+var g;
+function f(o, value) {
+ g = o.o;
+ o.o = value;
+ return o.o;
+}
+
+var obj = {o: a1};
+
+f(obj, a1);
+f(obj, a1);
+%OptimizeFunctionOnNextCall(f);
+assertEquals(a2.id, f(obj, a2).id);
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index 2ce085393e..fff2e34b7c 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -80,28 +80,14 @@ consts_misc = [
{ 'name': 'SmiShiftSize', 'value': 'kSmiShiftSize' },
{ 'name': 'PointerSizeLog2', 'value': 'kPointerSizeLog2' },
- { 'name': 'OddballFalse', 'value': 'Oddball::kFalse' },
- { 'name': 'OddballTrue', 'value': 'Oddball::kTrue' },
- { 'name': 'OddballTheHole', 'value': 'Oddball::kTheHole' },
- { 'name': 'OddballNull', 'value': 'Oddball::kNull' },
- { 'name': 'OddballArgumentMarker', 'value': 'Oddball::kArgumentMarker' },
- { 'name': 'OddballUndefined', 'value': 'Oddball::kUndefined' },
- { 'name': 'OddballUninitialized', 'value': 'Oddball::kUninitialized' },
- { 'name': 'OddballOther', 'value': 'Oddball::kOther' },
- { 'name': 'OddballException', 'value': 'Oddball::kException' },
-
{ 'name': 'prop_idx_first',
'value': 'DescriptorArray::kFirstIndex' },
{ 'name': 'prop_type_field',
'value': 'FIELD' },
{ 'name': 'prop_type_first_phantom',
- 'value': 'INTERCEPTOR' },
+ 'value': 'TRANSITION' },
{ 'name': 'prop_type_mask',
'value': 'PropertyDetails::TypeField::kMask' },
- { 'name': 'prop_index_mask',
- 'value': 'PropertyDetails::FieldIndexField::kMask' },
- { 'name': 'prop_index_shift',
- 'value': 'PropertyDetails::FieldIndexField::kShift' },
{ 'name': 'prop_desc_key',
'value': 'DescriptorArray::kDescriptorKey' },
@@ -112,20 +98,6 @@ consts_misc = [
{ 'name': 'prop_desc_size',
'value': 'DescriptorArray::kDescriptorSize' },
- { 'name': 'bit_field2_elements_kind_mask',
- 'value': 'Map::kElementsKindMask' },
- { 'name': 'bit_field2_elements_kind_shift',
- 'value': 'Map::kElementsKindShift' },
- { 'name': 'bit_field3_dictionary_map_shift',
- 'value': 'Map::DictionaryMap::kShift' },
-
- { 'name': 'elements_fast_holey_elements',
- 'value': 'FAST_HOLEY_ELEMENTS' },
- { 'name': 'elements_fast_elements',
- 'value': 'FAST_ELEMENTS' },
- { 'name': 'elements_dictionary_elements',
- 'value': 'DICTIONARY_ELEMENTS' },
-
{ 'name': 'off_fp_context',
'value': 'StandardFrameConstants::kContextOffset' },
{ 'name': 'off_fp_constant_pool',
@@ -148,16 +120,6 @@ extras_accessors = [
'Map, instance_attributes, int, kInstanceAttributesOffset',
'Map, inobject_properties, int, kInObjectPropertiesOffset',
'Map, instance_size, int, kInstanceSizeOffset',
- 'Map, bit_field, char, kBitFieldOffset',
- 'Map, bit_field2, char, kBitField2Offset',
- 'Map, bit_field3, SMI, kBitField3Offset',
- 'Map, prototype, Object, kPrototypeOffset',
- 'NameDictionaryShape, prefix_size, int, kPrefixSize',
- 'NameDictionaryShape, entry_size, int, kEntrySize',
- 'SeededNumberDictionaryShape, prefix_size, int, kPrefixSize',
- 'UnseededNumberDictionaryShape, prefix_size, int, kPrefixSize',
- 'NumberDictionaryShape, entry_size, int, kEntrySize',
- 'Oddball, kind_offset, int, kKindOffset',
'HeapNumber, value, double, kValueOffset',
'ConsString, first, String, kFirstOffset',
'ConsString, second, String, kSecondOffset',
@@ -399,7 +361,7 @@ def parse_field(call):
'value': '%s::%s' % (klass, offset)
});
- assert(kind == 'SMI_ACCESSORS' or kind == 'ACCESSORS_TO_SMI');
+ assert(kind == 'SMI_ACCESSORS');
klass = args[0];
field = args[1];
offset = args[2];
@@ -423,8 +385,7 @@ def load_fields():
# may span multiple lines and may contain nested parentheses. We also
# call parse_field() to pick apart the invocation.
#
- prefixes = [ 'ACCESSORS', 'ACCESSORS_GCSAFE',
- 'SMI_ACCESSORS', 'ACCESSORS_TO_SMI' ];
+ prefixes = [ 'ACCESSORS', 'ACCESSORS_GCSAFE', 'SMI_ACCESSORS' ];
current = '';
opens = 0;