summaryrefslogtreecommitdiff
path: root/deps/v8/src
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src')
-rw-r--r--deps/v8/src/builtins/builtins-wasm-gen.cc27
-rw-r--r--deps/v8/src/compiler/heap-refs.h1
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc11
-rw-r--r--deps/v8/src/compiler/map-inference.cc7
-rw-r--r--deps/v8/src/compiler/map-inference.h1
5 files changed, 31 insertions, 16 deletions
diff --git a/deps/v8/src/builtins/builtins-wasm-gen.cc b/deps/v8/src/builtins/builtins-wasm-gen.cc
index 12270495c1..d6346fb9aa 100644
--- a/deps/v8/src/builtins/builtins-wasm-gen.cc
+++ b/deps/v8/src/builtins/builtins-wasm-gen.cc
@@ -121,18 +121,19 @@ TF_BUILTIN(WasmAtomicNotify, WasmBuiltinsAssembler) {
TNode<Code> centry = LoadCEntryFromInstance(instance);
TNode<Code> target = LoadBuiltinFromFrame(Builtins::kAllocateHeapNumber);
+ TNode<Object> context = LoadContextFromInstance(instance);
// TODO(aseemgarg): Use SMIs if possible for address and count
TNode<HeapNumber> address_heap = UncheckedCast<HeapNumber>(
- CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant()));
+ CallStub(AllocateHeapNumberDescriptor(), target, context));
StoreHeapNumberValue(address_heap, ChangeUint32ToFloat64(address));
TNode<HeapNumber> count_heap = UncheckedCast<HeapNumber>(
- CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant()));
+ CallStub(AllocateHeapNumberDescriptor(), target, context));
StoreHeapNumberValue(count_heap, ChangeUint32ToFloat64(count));
TNode<Smi> result_smi = UncheckedCast<Smi>(CallRuntimeWithCEntry(
- Runtime::kWasmAtomicNotify, centry, NoContextConstant(), instance,
+ Runtime::kWasmAtomicNotify, centry, context, instance,
address_heap, count_heap));
ReturnRaw(SmiToInt32(result_smi));
}
@@ -149,23 +150,24 @@ TF_BUILTIN(WasmI32AtomicWait, WasmBuiltinsAssembler) {
TNode<Code> centry = LoadCEntryFromInstance(instance);
TNode<Code> target = LoadBuiltinFromFrame(Builtins::kAllocateHeapNumber);
+ TNode<Object> context = LoadContextFromInstance(instance);
// TODO(aseemgarg): Use SMIs if possible for address and expected_value
TNode<HeapNumber> address_heap = UncheckedCast<HeapNumber>(
- CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant()));
+ CallStub(AllocateHeapNumberDescriptor(), target, context));
StoreHeapNumberValue(address_heap, ChangeUint32ToFloat64(address));
TNode<HeapNumber> expected_value_heap = UncheckedCast<HeapNumber>(
- CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant()));
+ CallStub(AllocateHeapNumberDescriptor(), target, context));
StoreHeapNumberValue(expected_value_heap,
ChangeInt32ToFloat64(expected_value));
TNode<HeapNumber> timeout_heap = UncheckedCast<HeapNumber>(
- CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant()));
+ CallStub(AllocateHeapNumberDescriptor(), target, context));
StoreHeapNumberValue(timeout_heap, timeout);
TNode<Smi> result_smi = UncheckedCast<Smi>(CallRuntimeWithCEntry(
- Runtime::kWasmI32AtomicWait, centry, NoContextConstant(), instance,
+ Runtime::kWasmI32AtomicWait, centry, context, instance,
address_heap, expected_value_heap, timeout_heap));
ReturnRaw(SmiToInt32(result_smi));
}
@@ -184,28 +186,29 @@ TF_BUILTIN(WasmI64AtomicWait, WasmBuiltinsAssembler) {
TNode<Code> centry = LoadCEntryFromInstance(instance);
TNode<Code> target = LoadBuiltinFromFrame(Builtins::kAllocateHeapNumber);
+ TNode<Object> context = LoadContextFromInstance(instance);
// TODO(aseemgarg): Use SMIs if possible for address and expected_value
TNode<HeapNumber> address_heap = UncheckedCast<HeapNumber>(
- CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant()));
+ CallStub(AllocateHeapNumberDescriptor(), target, context));
StoreHeapNumberValue(address_heap, ChangeUint32ToFloat64(address));
TNode<HeapNumber> expected_value_high_heap = UncheckedCast<HeapNumber>(
- CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant()));
+ CallStub(AllocateHeapNumberDescriptor(), target, context));
StoreHeapNumberValue(expected_value_high_heap,
ChangeUint32ToFloat64(expected_value_high));
TNode<HeapNumber> expected_value_low_heap = UncheckedCast<HeapNumber>(
- CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant()));
+ CallStub(AllocateHeapNumberDescriptor(), target, context));
StoreHeapNumberValue(expected_value_low_heap,
ChangeUint32ToFloat64(expected_value_low));
TNode<HeapNumber> timeout_heap = UncheckedCast<HeapNumber>(
- CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant()));
+ CallStub(AllocateHeapNumberDescriptor(), target, context));
StoreHeapNumberValue(timeout_heap, timeout);
TNode<Smi> result_smi = UncheckedCast<Smi>(CallRuntimeWithCEntry(
- Runtime::kWasmI64AtomicWait, centry, NoContextConstant(), instance,
+ Runtime::kWasmI64AtomicWait, centry, context, instance,
address_heap, expected_value_high_heap, expected_value_low_heap,
timeout_heap));
ReturnRaw(SmiToInt32(result_smi));
diff --git a/deps/v8/src/compiler/heap-refs.h b/deps/v8/src/compiler/heap-refs.h
index c6322ebe69..f08e49832e 100644
--- a/deps/v8/src/compiler/heap-refs.h
+++ b/deps/v8/src/compiler/heap-refs.h
@@ -389,6 +389,7 @@ class ContextRef : public HeapObjectRef {
V(JSFunction, object_function) \
V(JSFunction, promise_function) \
V(JSFunction, promise_then) \
+ V(JSFunction, regexp_function) \
V(JSFunction, string_function) \
V(JSFunction, symbol_function) \
V(JSGlobalObject, global_object) \
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index d400fa2673..b86b1e6baf 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -7098,11 +7098,14 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
Node* regexp = NodeProperties::GetValueInput(node, 1);
+ // Only the initial JSRegExp map is valid here, since the following lastIndex
+ // check as well as the lowered builtin call rely on a known location of the
+ // lastIndex field.
+ Handle<Map> regexp_initial_map =
+ native_context().regexp_function().initial_map().object();
+
MapInference inference(broker(), regexp, effect);
- if (!inference.HaveMaps() ||
- !inference.AllOfInstanceTypes(InstanceTypeChecker::IsJSRegExp)) {
- return inference.NoChange();
- }
+ if (!inference.Is(regexp_initial_map)) return inference.NoChange();
MapHandles const& regexp_maps = inference.GetMaps();
ZoneVector<PropertyAccessInfo> access_infos(graph()->zone());
diff --git a/deps/v8/src/compiler/map-inference.cc b/deps/v8/src/compiler/map-inference.cc
index 1e2434f4ae..6ce036aa0b 100644
--- a/deps/v8/src/compiler/map-inference.cc
+++ b/deps/v8/src/compiler/map-inference.cc
@@ -91,6 +91,13 @@ MapHandles const& MapInference::GetMaps() {
return maps_;
}
+bool MapInference::Is(Handle<Map> expected_map) {
+ if (!HaveMaps()) return false;
+ const MapHandles& maps = GetMaps();
+ if (maps.size() != 1) return false;
+ return maps[0].equals(expected_map);
+}
+
void MapInference::InsertMapChecks(JSGraph* jsgraph, Node** effect,
Node* control,
const FeedbackSource& feedback) {
diff --git a/deps/v8/src/compiler/map-inference.h b/deps/v8/src/compiler/map-inference.h
index acba2eb0f2..498b6bc15e 100644
--- a/deps/v8/src/compiler/map-inference.h
+++ b/deps/v8/src/compiler/map-inference.h
@@ -55,6 +55,7 @@ class MapInference {
V8_WARN_UNUSED_RESULT MapHandles const& GetMaps();
V8_WARN_UNUSED_RESULT bool AllOfInstanceTypes(
std::function<bool(InstanceType)> f);
+ V8_WARN_UNUSED_RESULT bool Is(Handle<Map> expected_map);
// These methods provide a guard.
//