summaryrefslogtreecommitdiff
path: root/deps/v8/src
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2018-10-24 15:30:26 +0200
committerMichaël Zasso <targos@protonmail.com>2018-10-28 10:17:58 +0100
commit124d91667abb203098baa7d6cf6011e8df5eeeff (patch)
tree105c8717e6a97f67656b3744863a1b8b310c74ce /deps/v8/src
parent1b473542dc54cbf6185c12021076d5c7626046ea (diff)
downloadandroid-node-v8-124d91667abb203098baa7d6cf6011e8df5eeeff.tar.gz
android-node-v8-124d91667abb203098baa7d6cf6011e8df5eeeff.tar.bz2
android-node-v8-124d91667abb203098baa7d6cf6011e8df5eeeff.zip
deps: patch V8 to 7.0.276.32
Refs: https://github.com/v8/v8/compare/7.0.276.28...7.0.276.32 PR-URL: https://github.com/nodejs/node/pull/23851 Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Refael Ackermann <refack@gmail.com>
Diffstat (limited to 'deps/v8/src')
-rw-r--r--deps/v8/src/debug/debug-coverage.cc39
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc7
-rw-r--r--deps/v8/src/debug/debug.cc15
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.cc13
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.h2
-rw-r--r--deps/v8/src/value-serializer.cc14
6 files changed, 72 insertions, 18 deletions
diff --git a/deps/v8/src/debug/debug-coverage.cc b/deps/v8/src/debug/debug-coverage.cc
index a71c9e572b..f8b716f7c9 100644
--- a/deps/v8/src/debug/debug-coverage.cc
+++ b/deps/v8/src/debug/debug-coverage.cc
@@ -171,6 +171,12 @@ class CoverageBlockIterator final {
return function_->blocks[read_index_ + 1];
}
+ CoverageBlock& GetPreviousBlock() {
+ DCHECK(IsActive());
+ DCHECK_GT(read_index_, 0);
+ return function_->blocks[read_index_ - 1];
+ }
+
CoverageBlock& GetParent() {
DCHECK(IsActive());
return nesting_stack_.back();
@@ -325,6 +331,30 @@ void MergeNestedRanges(CoverageFunction* function) {
}
}
+void FilterAliasedSingletons(CoverageFunction* function) {
+ CoverageBlockIterator iter(function);
+
+ iter.Next(); // Advance once since we reference the previous block later.
+
+ while (iter.Next()) {
+ CoverageBlock& previous_block = iter.GetPreviousBlock();
+ CoverageBlock& block = iter.GetBlock();
+
+ bool is_singleton = block.end == kNoSourcePosition;
+ bool aliases_start = block.start == previous_block.start;
+
+ if (is_singleton && aliases_start) {
+ // The previous block must have a full range since duplicate singletons
+ // have already been merged.
+ DCHECK_NE(previous_block.end, kNoSourcePosition);
+ // Likewise, the next block must have another start position since
+ // singletons are sorted to the end.
+ DCHECK_IMPLIES(iter.HasNext(), iter.GetNextBlock().start != block.start);
+ iter.DeleteBlock();
+ }
+ }
+}
+
void FilterUncoveredRanges(CoverageFunction* function) {
CoverageBlockIterator iter(function);
@@ -397,6 +427,15 @@ void CollectBlockCoverage(CoverageFunction* function, SharedFunctionInfo* info,
// Remove duplicate singleton ranges, keeping the max count.
MergeDuplicateSingletons(function);
+ // Remove singleton ranges with the same start position as a full range and
+ // throw away their counts.
+ // Singleton ranges are only intended to split existing full ranges and should
+ // never expand into a full range. Consider 'if (cond) { ... } else { ... }'
+ // as a problematic example; if the then-block produces a continuation
+ // singleton, it would incorrectly expand into the else range.
+ // For more context, see https://crbug.com/v8/8237.
+ FilterAliasedSingletons(function);
+
// Rewrite all singletons (created e.g. by continuations and unconditional
// control flow) to ranges.
RewritePositionSingletonsToRanges(function);
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index 5466ca050b..583b41f1b2 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -995,12 +995,7 @@ void DebugEvaluate::ApplySideEffectChecks(
for (interpreter::BytecodeArrayIterator it(bytecode_array); !it.done();
it.Advance()) {
interpreter::Bytecode bytecode = it.current_bytecode();
- if (BytecodeRequiresRuntimeCheck(bytecode)) {
- interpreter::Bytecode debugbreak =
- interpreter::Bytecodes::GetDebugBreak(bytecode);
- bytecode_array->set(it.current_offset(),
- interpreter::Bytecodes::ToByte(debugbreak));
- }
+ if (BytecodeRequiresRuntimeCheck(bytecode)) it.ApplyDebugBreak();
}
}
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index a7114b1434..3a3a48b699 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -279,15 +279,12 @@ void BreakIterator::SkipToPosition(int position) {
void BreakIterator::SetDebugBreak() {
DebugBreakType debug_break_type = GetDebugBreakType();
if (debug_break_type == DEBUGGER_STATEMENT) return;
+ HandleScope scope(isolate());
DCHECK(debug_break_type >= DEBUG_BREAK_SLOT);
- BytecodeArray* bytecode_array = debug_info_->DebugBytecodeArray();
- interpreter::Bytecode bytecode =
- interpreter::Bytecodes::FromByte(bytecode_array->get(code_offset()));
- if (interpreter::Bytecodes::IsDebugBreak(bytecode)) return;
- interpreter::Bytecode debugbreak =
- interpreter::Bytecodes::GetDebugBreak(bytecode);
- bytecode_array->set(code_offset(),
- interpreter::Bytecodes::ToByte(debugbreak));
+ Handle<BytecodeArray> bytecode_array(debug_info_->DebugBytecodeArray(),
+ isolate());
+ interpreter::BytecodeArrayAccessor(bytecode_array, code_offset())
+ .ApplyDebugBreak();
}
void BreakIterator::ClearDebugBreak() {
@@ -2127,6 +2124,8 @@ void Debug::ClearSideEffectChecks(Handle<DebugInfo> debug_info) {
Handle<BytecodeArray> original(debug_info->OriginalBytecodeArray(), isolate_);
for (interpreter::BytecodeArrayIterator it(debug_bytecode); !it.done();
it.Advance()) {
+ // Restore from original. This may copy only the scaling prefix, which is
+ // correct, since we patch scaling prefixes to debug breaks if exists.
debug_bytecode->set(it.current_offset(),
original->get(it.current_offset()));
}
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.cc b/deps/v8/src/interpreter/bytecode-array-accessor.cc
index ef6bdd30a1..3ec2cc595b 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.cc
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.cc
@@ -28,6 +28,19 @@ void BytecodeArrayAccessor::SetOffset(int offset) {
UpdateOperandScale();
}
+void BytecodeArrayAccessor::ApplyDebugBreak() {
+ // Get the raw bytecode from the bytecode array. This may give us a
+ // scaling prefix, which we can patch with the matching debug-break
+ // variant.
+ interpreter::Bytecode bytecode =
+ interpreter::Bytecodes::FromByte(bytecode_array_->get(bytecode_offset_));
+ if (interpreter::Bytecodes::IsDebugBreak(bytecode)) return;
+ interpreter::Bytecode debugbreak =
+ interpreter::Bytecodes::GetDebugBreak(bytecode);
+ bytecode_array_->set(bytecode_offset_,
+ interpreter::Bytecodes::ToByte(debugbreak));
+}
+
void BytecodeArrayAccessor::UpdateOperandScale() {
if (OffsetInBounds()) {
uint8_t current_byte = bytecode_array()->get(bytecode_offset_);
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.h b/deps/v8/src/interpreter/bytecode-array-accessor.h
index 443929aefe..e36eed8ade 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.h
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.h
@@ -70,6 +70,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayAccessor {
void SetOffset(int offset);
+ void ApplyDebugBreak();
+
Bytecode current_bytecode() const;
int current_bytecode_size() const;
int current_offset() const { return bytecode_offset_; }
diff --git a/deps/v8/src/value-serializer.cc b/deps/v8/src/value-serializer.cc
index 26ab746e8c..0633d19a2a 100644
--- a/deps/v8/src/value-serializer.cc
+++ b/deps/v8/src/value-serializer.cc
@@ -517,12 +517,14 @@ Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
case JS_TYPED_ARRAY_TYPE:
case JS_DATA_VIEW_TYPE:
return WriteJSArrayBufferView(JSArrayBufferView::cast(*receiver));
- case WASM_MODULE_TYPE:
- if (!FLAG_wasm_disable_structured_cloning) {
+ case WASM_MODULE_TYPE: {
+ auto enabled_features = wasm::WasmFeaturesFromIsolate(isolate_);
+ if (!FLAG_wasm_disable_structured_cloning || enabled_features.threads) {
// Only write WebAssembly modules if not disabled by a flag.
return WriteWasmModule(Handle<WasmModuleObject>::cast(receiver));
}
break;
+ }
case WASM_MEMORY_TYPE: {
auto enabled_features = wasm::WasmFeaturesFromIsolate(isolate_);
if (enabled_features.threads) {
@@ -1753,7 +1755,9 @@ MaybeHandle<JSArrayBufferView> ValueDeserializer::ReadJSArrayBufferView(
}
MaybeHandle<JSObject> ValueDeserializer::ReadWasmModuleTransfer() {
- if (FLAG_wasm_disable_structured_cloning || expect_inline_wasm()) {
+ auto enabled_features = wasm::WasmFeaturesFromIsolate(isolate_);
+ if ((FLAG_wasm_disable_structured_cloning && !enabled_features.threads) ||
+ expect_inline_wasm()) {
return MaybeHandle<JSObject>();
}
@@ -1775,7 +1779,9 @@ MaybeHandle<JSObject> ValueDeserializer::ReadWasmModuleTransfer() {
}
MaybeHandle<JSObject> ValueDeserializer::ReadWasmModule() {
- if (FLAG_wasm_disable_structured_cloning || !expect_inline_wasm()) {
+ auto enabled_features = wasm::WasmFeaturesFromIsolate(isolate_);
+ if ((FLAG_wasm_disable_structured_cloning && !enabled_features.threads) ||
+ !expect_inline_wasm()) {
return MaybeHandle<JSObject>();
}