aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/interpreter
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2019-11-08 15:39:11 +0100
committerMichaël Zasso <targos@protonmail.com>2019-11-08 15:46:25 +0100
commit6ca81ad72a3c6fdf16c683335be748f22aaa9a0d (patch)
tree33c8ee75f729aed76c2c0b89c63f9bf1b4dd66aa /deps/v8/src/interpreter
parent1eee0b8bf8bba39b600fb16a9223e545e3bac2bc (diff)
downloadandroid-node-v8-6ca81ad72a3c6fdf16c683335be748f22aaa9a0d.tar.gz
android-node-v8-6ca81ad72a3c6fdf16c683335be748f22aaa9a0d.tar.bz2
android-node-v8-6ca81ad72a3c6fdf16c683335be748f22aaa9a0d.zip
deps: update V8 to 7.9.317.20
PR-URL: https://github.com/nodejs/node/pull/30020 Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Anna Henningsen <anna@addaleax.net> Reviewed-By: Matteo Collina <matteo.collina@gmail.com>
Diffstat (limited to 'deps/v8/src/interpreter')
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.cc2
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.h2
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc13
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h7
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.h2
-rw-r--r--deps/v8/src/interpreter/bytecode-array-random-iterator.h2
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc276
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h11
-rw-r--r--deps/v8/src/interpreter/bytecodes.cc1
-rw-r--r--deps/v8/src/interpreter/bytecodes.h3
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.cc2
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc592
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h274
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc601
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.h4
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics-generator.cc211
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics-generator.h6
-rw-r--r--deps/v8/src/interpreter/interpreter.cc8
18 files changed, 967 insertions, 1050 deletions
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.cc b/deps/v8/src/interpreter/bytecode-array-accessor.cc
index d460c1a45f..0690e16aa9 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.cc
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.cc
@@ -66,7 +66,7 @@ BytecodeArrayAccessor::BytecodeArrayAccessor(
BytecodeArrayAccessor::BytecodeArrayAccessor(
Handle<BytecodeArray> bytecode_array, int initial_offset)
: BytecodeArrayAccessor(
- base::make_unique<OnHeapBytecodeArray>(bytecode_array),
+ std::make_unique<OnHeapBytecodeArray>(bytecode_array),
initial_offset) {}
void BytecodeArrayAccessor::SetOffset(int offset) {
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.h b/deps/v8/src/interpreter/bytecode-array-accessor.h
index 97278af7bd..92d0da6607 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.h
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.h
@@ -5,6 +5,8 @@
#ifndef V8_INTERPRETER_BYTECODE_ARRAY_ACCESSOR_H_
#define V8_INTERPRETER_BYTECODE_ARRAY_ACCESSOR_H_
+#include <memory>
+
#include "src/base/optional.h"
#include "src/common/globals.h"
#include "src/handles/handles.h"
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index cfc3eb36c1..1c61776cdf 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -824,9 +824,16 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadKeyedProperty(
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::GetIterator(Register object,
- int feedback_slot) {
- OutputGetIterator(object, feedback_slot);
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadIteratorProperty(
+ Register object, int feedback_slot) {
+ size_t name_index = IteratorSymbolConstantPoolEntry();
+ OutputLdaNamedProperty(object, name_index, feedback_slot);
+ return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::GetIterator(
+ Register object, int load_feedback_slot, int call_feedback_slot) {
+ OutputGetIterator(object, load_feedback_slot, call_feedback_slot);
return *this;
}
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index 06230f9270..39cd4fa6f6 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -135,7 +135,12 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
BytecodeArrayBuilder& LoadKeyedProperty(Register object, int feedback_slot);
// Named load property of the @@iterator symbol.
- BytecodeArrayBuilder& GetIterator(Register object, int feedback_slot);
+ BytecodeArrayBuilder& LoadIteratorProperty(Register object,
+ int feedback_slot);
+
+ // Load and call property of the @@iterator symbol
+ BytecodeArrayBuilder& GetIterator(Register object, int load_feedback_slot,
+ int call_feedback_slot);
// Named load property of the @@asyncIterator symbol.
BytecodeArrayBuilder& LoadAsyncIteratorProperty(Register object,
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.h b/deps/v8/src/interpreter/bytecode-array-iterator.h
index e6b58deadc..b992ffc037 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.h
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.h
@@ -5,6 +5,8 @@
#ifndef V8_INTERPRETER_BYTECODE_ARRAY_ITERATOR_H_
#define V8_INTERPRETER_BYTECODE_ARRAY_ITERATOR_H_
+#include <memory>
+
#include "src/interpreter/bytecode-array-accessor.h"
namespace v8 {
diff --git a/deps/v8/src/interpreter/bytecode-array-random-iterator.h b/deps/v8/src/interpreter/bytecode-array-random-iterator.h
index a3b69b7015..68905a146c 100644
--- a/deps/v8/src/interpreter/bytecode-array-random-iterator.h
+++ b/deps/v8/src/interpreter/bytecode-array-random-iterator.h
@@ -5,6 +5,8 @@
#ifndef V8_INTERPRETER_BYTECODE_ARRAY_RANDOM_ITERATOR_H_
#define V8_INTERPRETER_BYTECODE_ARRAY_RANDOM_ITERATOR_H_
+#include <memory>
+
#include "src/interpreter/bytecode-array-accessor.h"
#include "src/zone/zone-containers.h"
#include "src/zone/zone.h"
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 29065d6a55..92ae15127e 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -2042,7 +2042,71 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr, Register name) {
VisitDeclarations(expr->scope()->declarations());
Register class_constructor = register_allocator()->NewRegister();
+ // Create the class brand symbol and store it on the context during class
+ // evaluation. This will be stored in the instance later in the constructor.
+ // We do this early so that invalid access to private methods or accessors
+ // in computed property keys throw.
+ if (expr->scope()->brand() != nullptr) {
+ Register brand = register_allocator()->NewRegister();
+ const AstRawString* class_name =
+ expr->scope()->class_variable() != nullptr
+ ? expr->scope()->class_variable()->raw_name()
+ : ast_string_constants()->empty_string();
+ builder()
+ ->LoadLiteral(class_name)
+ .StoreAccumulatorInRegister(brand)
+ .CallRuntime(Runtime::kCreatePrivateNameSymbol, brand);
+ BuildVariableAssignment(expr->scope()->brand(), Token::INIT,
+ HoleCheckMode::kElided);
+ }
+
AccessorTable<ClassLiteral::Property> private_accessors(zone());
+ for (int i = 0; i < expr->private_members()->length(); i++) {
+ ClassLiteral::Property* property = expr->private_members()->at(i);
+ DCHECK(property->is_private());
+ switch (property->kind()) {
+ case ClassLiteral::Property::FIELD: {
+ // Initialize the private field variables early.
+ // Create the private name symbols for fields during class
+ // evaluation and store them on the context. These will be
+ // used as keys later during instance or static initialization.
+ RegisterAllocationScope private_name_register_scope(this);
+ Register private_name = register_allocator()->NewRegister();
+ VisitForRegisterValue(property->key(), private_name);
+ builder()
+ ->LoadLiteral(property->key()->AsLiteral()->AsRawPropertyName())
+ .StoreAccumulatorInRegister(private_name)
+ .CallRuntime(Runtime::kCreatePrivateNameSymbol, private_name);
+ DCHECK_NOT_NULL(property->private_name_var());
+ BuildVariableAssignment(property->private_name_var(), Token::INIT,
+ HoleCheckMode::kElided);
+ break;
+ }
+ case ClassLiteral::Property::METHOD: {
+ // We can initialize the private methods and accessors later so that the
+ // home objects can be assigned right after the creation of the
+ // closures, and those are guarded by the brand checks.
+ break;
+ }
+ // Collect private accessors into a table to merge the creation of
+ // those closures later.
+ case ClassLiteral::Property::GETTER: {
+ Literal* key = property->key()->AsLiteral();
+ DCHECK_NULL(private_accessors.LookupOrInsert(key)->getter);
+ private_accessors.LookupOrInsert(key)->getter = property;
+ break;
+ }
+ case ClassLiteral::Property::SETTER: {
+ Literal* key = property->key()->AsLiteral();
+ DCHECK_NULL(private_accessors.LookupOrInsert(key)->setter);
+ private_accessors.LookupOrInsert(key)->setter = property;
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ }
+
{
RegisterAllocationScope register_scope(this);
RegisterList args = register_allocator()->NewGrowableRegisterList();
@@ -2065,8 +2129,8 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr, Register name) {
.StoreAccumulatorInRegister(class_boilerplate);
// Create computed names and method values nodes to store into the literal.
- for (int i = 0; i < expr->properties()->length(); i++) {
- ClassLiteral::Property* property = expr->properties()->at(i);
+ for (int i = 0; i < expr->public_members()->length(); i++) {
+ ClassLiteral::Property* property = expr->public_members()->at(i);
if (property->is_computed_name()) {
Register key = register_allocator()->GrowRegisterList(&args);
@@ -2099,50 +2163,7 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr, Register name) {
}
}
- if (property->is_private()) {
- // Assign private class member's name variables.
- switch (property->kind()) {
- case ClassLiteral::Property::FIELD: {
- // Create the private name symbols for fields during class
- // evaluation and store them on the context. These will be
- // used as keys later during instance or static initialization.
- RegisterAllocationScope private_name_register_scope(this);
- Register private_name = register_allocator()->NewRegister();
- VisitForRegisterValue(property->key(), private_name);
- builder()
- ->LoadLiteral(property->key()->AsLiteral()->AsRawPropertyName())
- .StoreAccumulatorInRegister(private_name)
- .CallRuntime(Runtime::kCreatePrivateNameSymbol, private_name);
- DCHECK_NOT_NULL(property->private_name_var());
- BuildVariableAssignment(property->private_name_var(), Token::INIT,
- HoleCheckMode::kElided);
- break;
- }
- case ClassLiteral::Property::METHOD: {
- // Create the closures for private methods.
- VisitForAccumulatorValue(property->value());
- BuildVariableAssignment(property->private_name_var(), Token::INIT,
- HoleCheckMode::kElided);
- break;
- }
- case ClassLiteral::Property::GETTER: {
- Literal* key = property->key()->AsLiteral();
- DCHECK_NULL(private_accessors.LookupOrInsert(key)->getter);
- private_accessors.LookupOrInsert(key)->getter = property;
- break;
- }
- case ClassLiteral::Property::SETTER: {
- Literal* key = property->key()->AsLiteral();
- DCHECK_NULL(private_accessors.LookupOrInsert(key)->setter);
- private_accessors.LookupOrInsert(key)->setter = property;
- break;
- }
- }
- // The private fields are initialized in the initializer function and
- // the private brand for the private methods are initialized in the
- // constructor instead.
- continue;
- }
+ DCHECK(!property->is_private());
if (property->kind() == ClassLiteral::Property::FIELD) {
// We don't compute field's value here, but instead do it in the
@@ -2160,60 +2181,55 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr, Register name) {
builder()->StoreAccumulatorInRegister(prototype);
// Assign to class variable.
- if (expr->class_variable() != nullptr) {
- DCHECK(expr->class_variable()->IsStackLocal() ||
- expr->class_variable()->IsContextSlot());
+ Variable* class_variable = expr->scope()->class_variable();
+ if (class_variable != nullptr && class_variable->is_used()) {
+ DCHECK(class_variable->IsStackLocal() || class_variable->IsContextSlot());
builder()->LoadAccumulatorWithRegister(class_constructor);
- BuildVariableAssignment(expr->class_variable(), Token::INIT,
+ BuildVariableAssignment(class_variable, Token::INIT,
HoleCheckMode::kElided);
}
- // Create the class brand symbol and store it on the context
- // during class evaluation. This will be stored in the
- // receiver later in the constructor.
- if (expr->scope()->brand() != nullptr) {
- Register brand = register_allocator()->NewRegister();
- const AstRawString* class_name =
- expr->class_variable() != nullptr
- ? expr->class_variable()->raw_name()
- : ast_string_constants()->empty_string();
- builder()
- ->LoadLiteral(class_name)
- .StoreAccumulatorInRegister(brand)
- .CallRuntime(Runtime::kCreatePrivateNameSymbol, brand);
- BuildVariableAssignment(expr->scope()->brand(), Token::INIT,
- HoleCheckMode::kElided);
-
- // Store the home object for any private methods that need
- // them. We do this here once the prototype and brand symbol has
- // been created. Private accessors have their home object set later
- // when they are defined.
- for (int i = 0; i < expr->properties()->length(); i++) {
+ // Create the closures of private methods, and store the home object for
+ // any private methods that need them.
+ if (expr->has_private_methods()) {
+ for (int i = 0; i < expr->private_members()->length(); i++) {
+ ClassLiteral::Property* property = expr->private_members()->at(i);
+ if (property->kind() != ClassLiteral::Property::METHOD) {
+ continue;
+ }
RegisterAllocationScope register_scope(this);
- ClassLiteral::Property* property = expr->properties()->at(i);
+ VisitForAccumulatorValue(property->value());
+ BuildVariableAssignment(property->private_name_var(), Token::INIT,
+ HoleCheckMode::kElided);
+ Register home_object = property->private_name_var()->is_static()
+ ? class_constructor
+ : prototype;
if (property->NeedsHomeObjectOnClassPrototype()) {
Register func = register_allocator()->NewRegister();
- BuildVariableLoad(property->private_name_var(), HoleCheckMode::kElided);
builder()->StoreAccumulatorInRegister(func);
- VisitSetHomeObject(func, prototype, property);
+ VisitSetHomeObject(func, home_object, property);
}
}
+ }
- // Define accessors, using only a single call to the runtime for each pair
- // of corresponding getters and setters.
- for (auto accessors : private_accessors.ordered_accessors()) {
- RegisterAllocationScope inner_register_scope(this);
- RegisterList accessors_reg = register_allocator()->NewRegisterList(2);
- ClassLiteral::Property* getter = accessors.second->getter;
- ClassLiteral::Property* setter = accessors.second->setter;
- VisitLiteralAccessor(prototype, getter, accessors_reg[0]);
- VisitLiteralAccessor(prototype, setter, accessors_reg[1]);
- builder()->CallRuntime(Runtime::kCreatePrivateAccessors, accessors_reg);
- Variable* var = getter != nullptr ? getter->private_name_var()
- : setter->private_name_var();
- DCHECK_NOT_NULL(var);
- BuildVariableAssignment(var, Token::INIT, HoleCheckMode::kElided);
- }
+ // Define private accessors, using only a single call to the runtime for
+ // each pair of corresponding getters and setters, in the order the first
+ // component is declared. Store the home objects if necessary.
+ for (auto accessors : private_accessors.ordered_accessors()) {
+ RegisterAllocationScope inner_register_scope(this);
+ RegisterList accessors_reg = register_allocator()->NewRegisterList(2);
+ ClassLiteral::Property* getter = accessors.second->getter;
+ ClassLiteral::Property* setter = accessors.second->setter;
+ bool is_static =
+ getter != nullptr ? getter->is_static() : setter->is_static();
+ Register home_object = is_static ? class_constructor : prototype;
+ VisitLiteralAccessor(home_object, getter, accessors_reg[0]);
+ VisitLiteralAccessor(home_object, setter, accessors_reg[1]);
+ builder()->CallRuntime(Runtime::kCreatePrivateAccessors, accessors_reg);
+ Variable* var = getter != nullptr ? getter->private_name_var()
+ : setter->private_name_var();
+ DCHECK_NOT_NULL(var);
+ BuildVariableAssignment(var, Token::INIT, HoleCheckMode::kElided);
}
if (expr->instance_members_initializer_function() != nullptr) {
@@ -3086,7 +3102,8 @@ void BytecodeGenerator::BuildAsyncReturn(int source_position) {
.StoreAccumulatorInRegister(args[2]) // done
.CallRuntime(Runtime::kInlineAsyncGeneratorResolve, args);
} else {
- DCHECK(IsAsyncFunction(info()->literal()->kind()));
+ DCHECK(IsAsyncFunction(info()->literal()->kind()) ||
+ IsAsyncModule(info()->literal()->kind()));
RegisterList args = register_allocator()->NewRegisterList(3);
builder()
->MoveRegister(generator_object(), args[0]) // generator
@@ -3921,7 +3938,8 @@ void BytecodeGenerator::BuildAssignment(
Property* property = lhs_data.expr()->AsProperty();
Register object = VisitForRegisterValue(property->obj());
Register key = VisitForRegisterValue(property->key());
- BuildPrivateBrandCheck(property, object);
+ BuildPrivateBrandCheck(property, object,
+ MessageTemplate::kInvalidPrivateMemberWrite);
BuildPrivateSetterAccess(object, key, value);
if (!execution_result()->IsEffect()) {
builder()->LoadAccumulatorWithRegister(value);
@@ -4004,6 +4022,12 @@ void BytecodeGenerator::VisitCompoundAssignment(CompoundAssignment* expr) {
// in the accumulator. When the generator is resumed, the sent value is loaded
// in the accumulator.
void BytecodeGenerator::BuildSuspendPoint(int position) {
+ // Because we eliminate jump targets in dead code, we also eliminate resumes
+ // when the suspend is not emitted because otherwise the below call to Bind
+ // would start a new basic block and the code would be considered alive.
+ if (builder()->RemainderOfBlockIsDead()) {
+ return;
+ }
const int suspend_id = suspend_count_++;
RegisterList registers = register_allocator()->AllLiveRegisters();
@@ -4454,12 +4478,14 @@ void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* property) {
case PRIVATE_GETTER_ONLY:
case PRIVATE_GETTER_AND_SETTER: {
Register key = VisitForRegisterValue(property->key());
- BuildPrivateBrandCheck(property, obj);
+ BuildPrivateBrandCheck(property, obj,
+ MessageTemplate::kInvalidPrivateMemberRead);
BuildPrivateGetterAccess(obj, key);
break;
}
case PRIVATE_METHOD: {
- BuildPrivateBrandCheck(property, obj);
+ BuildPrivateBrandCheck(property, obj,
+ MessageTemplate::kInvalidPrivateMemberRead);
// In the case of private methods, property->key() is the function to be
// loaded (stored in a context slot), so load this directly.
VisitForAccumulatorValue(property->key());
@@ -4499,15 +4525,29 @@ void BytecodeGenerator::BuildPrivateSetterAccess(Register object,
}
void BytecodeGenerator::BuildPrivateBrandCheck(Property* property,
- Register object) {
+ Register object,
+ MessageTemplate tmpl) {
Variable* private_name = property->key()->AsVariableProxy()->var();
- DCHECK(private_name->requires_brand_check());
+ DCHECK(IsPrivateMethodOrAccessorVariableMode(private_name->mode()));
ClassScope* scope = private_name->scope()->AsClassScope();
- Variable* brand = scope->brand();
- BuildVariableLoadForAccumulatorValue(brand, HoleCheckMode::kElided);
- builder()->SetExpressionPosition(property);
- builder()->LoadKeyedProperty(
- object, feedback_index(feedback_spec()->AddKeyedLoadICSlot()));
+ if (private_name->is_static()) {
+ DCHECK_NOT_NULL(scope->class_variable());
+ // For static private methods, the only valid receiver is the class.
+ // Load the class constructor.
+ BuildVariableLoadForAccumulatorValue(scope->class_variable(),
+ HoleCheckMode::kElided);
+ BytecodeLabel return_check;
+ builder()->CompareReference(object).JumpIfTrue(
+ ToBooleanMode::kAlreadyBoolean, &return_check);
+ BuildInvalidPropertyAccess(tmpl, property);
+ builder()->Bind(&return_check);
+ } else {
+ BuildVariableLoadForAccumulatorValue(scope->brand(),
+ HoleCheckMode::kElided);
+ builder()->SetExpressionPosition(property);
+ builder()->LoadKeyedProperty(
+ object, feedback_index(feedback_spec()->AddKeyedLoadICSlot()));
+ }
}
void BytecodeGenerator::VisitPropertyLoadForRegister(Register obj,
@@ -5113,7 +5153,8 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
case PRIVATE_GETTER_AND_SETTER: {
object = VisitForRegisterValue(property->obj());
key = VisitForRegisterValue(property->key());
- BuildPrivateBrandCheck(property, object);
+ BuildPrivateBrandCheck(property, object,
+ MessageTemplate::kInvalidPrivateMemberRead);
BuildPrivateGetterAccess(object, key);
break;
}
@@ -5407,7 +5448,8 @@ void BytecodeGenerator::BuildGetIterator(IteratorType hint) {
// If method is undefined,
// Let syncMethod be GetMethod(obj, @@iterator)
builder()
- ->GetIterator(obj, feedback_index(feedback_spec()->AddLoadICSlot()))
+ ->LoadIteratorProperty(obj,
+ feedback_index(feedback_spec()->AddLoadICSlot()))
.StoreAccumulatorInRegister(method);
// Let syncIterator be Call(syncMethod, obj)
@@ -5426,24 +5468,17 @@ void BytecodeGenerator::BuildGetIterator(IteratorType hint) {
RegisterAllocationScope scope(this);
Register obj = register_allocator()->NewRegister();
- Register method = register_allocator()->NewRegister();
-
- // Let method be GetMethod(obj, @@iterator).
- builder()
- ->StoreAccumulatorInRegister(obj)
- .GetIterator(obj, feedback_index(feedback_spec()->AddLoadICSlot()))
- .StoreAccumulatorInRegister(method);
+ int load_feedback_index =
+ feedback_index(feedback_spec()->AddLoadICSlot());
+ int call_feedback_index =
+ feedback_index(feedback_spec()->AddCallICSlot());
- // Let iterator be Call(method, obj).
- builder()->CallProperty(method, RegisterList(obj),
- feedback_index(feedback_spec()->AddCallICSlot()));
+ // Let method be GetMethod(obj, @@iterator) and
+ // iterator be Call(method, obj). If Type(iterator) is not Object,
+ // throw a SymbolIteratorInvalid exception.
+ builder()->StoreAccumulatorInRegister(obj).GetIterator(
+ obj, load_feedback_index, call_feedback_index);
}
-
- // If Type(iterator) is not Object, throw a TypeError exception.
- BytecodeLabel no_type_error;
- builder()->JumpIfJSReceiver(&no_type_error);
- builder()->CallRuntime(Runtime::kThrowSymbolIteratorInvalid);
- builder()->Bind(&no_type_error);
}
}
@@ -6102,8 +6137,9 @@ void BytecodeGenerator::BuildGeneratorObjectVariableInitialization() {
RegisterAllocationScope register_scope(this);
RegisterList args = register_allocator()->NewRegisterList(2);
Runtime::FunctionId function_id =
- (IsAsyncFunction(info()->literal()->kind()) &&
- !IsAsyncGeneratorFunction(info()->literal()->kind()))
+ ((IsAsyncFunction(info()->literal()->kind()) &&
+ !IsAsyncGeneratorFunction(info()->literal()->kind())) ||
+ IsAsyncModule(info()->literal()->kind()))
? Runtime::kInlineAsyncFunctionEnter
: Runtime::kInlineCreateJSGeneratorObject;
builder()
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index 134b1b463a..ecfe50ba5a 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -250,12 +250,6 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void BuildHoleCheckForVariableAssignment(Variable* variable, Token::Value op);
void BuildThrowIfHole(Variable* variable);
- // Build jump to targets[value], where
- // start_index <= value < start_index + size.
- void BuildIndexedJump(
- Register value, size_t start_index, size_t size,
- ZoneVector<BytecodeLabel>& targets); // NOLINT(runtime/references)
-
void BuildNewLocalActivationContext();
void BuildLocalActivationContextInitialization();
void BuildNewLocalBlockContext(Scope* scope);
@@ -307,10 +301,13 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void VisitRestArgumentsArray(Variable* rest);
void VisitCallSuper(Call* call);
void BuildInvalidPropertyAccess(MessageTemplate tmpl, Property* property);
- void BuildPrivateBrandCheck(Property* property, Register object);
+ void BuildPrivateBrandCheck(Property* property, Register object,
+ MessageTemplate tmpl);
void BuildPrivateGetterAccess(Register obj, Register access_pair);
void BuildPrivateSetterAccess(Register obj, Register access_pair,
Register value);
+ void BuildPrivateMethods(ClassLiteral* expr, bool is_static,
+ Register home_object);
void BuildClassLiteral(ClassLiteral* expr, Register name);
void VisitClassLiteral(ClassLiteral* expr, Register name);
void VisitNewTargetVariable(Variable* variable);
diff --git a/deps/v8/src/interpreter/bytecodes.cc b/deps/v8/src/interpreter/bytecodes.cc
index 60f30ee1d9..88e80b9613 100644
--- a/deps/v8/src/interpreter/bytecodes.cc
+++ b/deps/v8/src/interpreter/bytecodes.cc
@@ -217,6 +217,7 @@ bool Bytecodes::MakesCallAlongCriticalPath(Bytecode bytecode) {
case Bytecode::kCreateBlockContext:
case Bytecode::kCreateCatchContext:
case Bytecode::kCreateRegExpLiteral:
+ case Bytecode::kGetIterator:
return true;
default:
return false;
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index 6802d53c95..80f9e4d311 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -356,7 +356,8 @@ namespace interpreter {
OperandType::kRegOutList, OperandType::kRegCount) \
\
/* Iterator protocol operations */ \
- V(GetIterator, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kIdx) \
+ V(GetIterator, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kIdx, \
+ OperandType::kIdx) \
\
/* Debugger */ \
V(Debugger, AccumulatorUse::kNone) \
diff --git a/deps/v8/src/interpreter/constant-array-builder.cc b/deps/v8/src/interpreter/constant-array-builder.cc
index 167b0ee7e2..0a4bdd62f7 100644
--- a/deps/v8/src/interpreter/constant-array-builder.cc
+++ b/deps/v8/src/interpreter/constant-array-builder.cc
@@ -378,7 +378,7 @@ Handle<Object> ConstantArrayBuilder::Entry::ToHandle(Isolate* isolate) const {
case Tag::kRawString:
return raw_string_->string();
case Tag::kHeapNumber:
- return isolate->factory()->NewNumber(heap_number_, AllocationType::kOld);
+ return isolate->factory()->NewNumber<AllocationType::kOld>(heap_number_);
case Tag::kBigInt:
// This should never fail: the parser will never create a BigInt
// literal that cannot be allocated.
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index f01821b565..a55e074b3a 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -22,8 +22,6 @@ namespace interpreter {
using compiler::CodeAssemblerState;
using compiler::Node;
-template <class T>
-using TNode = compiler::TNode<T>;
InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
Bytecode bytecode,
@@ -32,19 +30,19 @@ InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
bytecode_(bytecode),
operand_scale_(operand_scale),
TVARIABLE_CONSTRUCTOR(interpreted_frame_pointer_),
- VARIABLE_CONSTRUCTOR(
- bytecode_array_, MachineRepresentation::kTagged,
- Parameter(InterpreterDispatchDescriptor::kBytecodeArray)),
+ TVARIABLE_CONSTRUCTOR(
+ bytecode_array_,
+ CAST(Parameter(InterpreterDispatchDescriptor::kBytecodeArray))),
TVARIABLE_CONSTRUCTOR(
bytecode_offset_,
UncheckedCast<IntPtrT>(
Parameter(InterpreterDispatchDescriptor::kBytecodeOffset))),
- VARIABLE_CONSTRUCTOR(
- dispatch_table_, MachineType::PointerRepresentation(),
- Parameter(InterpreterDispatchDescriptor::kDispatchTable)),
- VARIABLE_CONSTRUCTOR(
- accumulator_, MachineRepresentation::kTagged,
- Parameter(InterpreterDispatchDescriptor::kAccumulator)),
+ TVARIABLE_CONSTRUCTOR(
+ dispatch_table_, UncheckedCast<ExternalReference>(Parameter(
+ InterpreterDispatchDescriptor::kDispatchTable))),
+ TVARIABLE_CONSTRUCTOR(
+ accumulator_,
+ CAST(Parameter(InterpreterDispatchDescriptor::kAccumulator))),
accumulator_use_(AccumulatorUse::kNone),
made_call_(false),
reloaded_frame_ptr_(false),
@@ -129,27 +127,27 @@ void InterpreterAssembler::SaveBytecodeOffset() {
}
}
-Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
+TNode<BytecodeArray> InterpreterAssembler::BytecodeArrayTaggedPointer() {
// Force a re-load of the bytecode array after every call in case the debugger
// has been activated.
if (!bytecode_array_valid_) {
- bytecode_array_.Bind(LoadRegister(Register::bytecode_array()));
+ bytecode_array_ = CAST(LoadRegister(Register::bytecode_array()));
bytecode_array_valid_ = true;
}
return bytecode_array_.value();
}
-Node* InterpreterAssembler::DispatchTableRawPointer() {
+TNode<ExternalReference> InterpreterAssembler::DispatchTablePointer() {
if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
(dispatch_table_.value() ==
Parameter(InterpreterDispatchDescriptor::kDispatchTable))) {
- dispatch_table_.Bind(ExternalConstant(
- ExternalReference::interpreter_dispatch_table_address(isolate())));
+ dispatch_table_ = ExternalConstant(
+ ExternalReference::interpreter_dispatch_table_address(isolate()));
}
return dispatch_table_.value();
}
-Node* InterpreterAssembler::GetAccumulatorUnchecked() {
+TNode<Object> InterpreterAssembler::GetAccumulatorUnchecked() {
return accumulator_.value();
}
@@ -159,10 +157,11 @@ TNode<Object> InterpreterAssembler::GetAccumulator() {
return TaggedPoisonOnSpeculation(GetAccumulatorUnchecked());
}
-void InterpreterAssembler::SetAccumulator(Node* value) {
+// TODO(v8:6949): Remove sloppy-ness from SetAccumulator's value argument.
+void InterpreterAssembler::SetAccumulator(SloppyTNode<Object> value) {
DCHECK(Bytecodes::WritesAccumulator(bytecode_));
accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
- accumulator_.Bind(value);
+ accumulator_ = value;
}
TNode<Context> InterpreterAssembler::GetContext() {
@@ -173,15 +172,14 @@ void InterpreterAssembler::SetContext(TNode<Context> value) {
StoreRegister(value, Register::current_context());
}
-Node* InterpreterAssembler::GetContextAtDepth(TNode<Context> context,
- TNode<Uint32T> depth) {
+TNode<Context> InterpreterAssembler::GetContextAtDepth(TNode<Context> context,
+ TNode<Uint32T> depth) {
TVARIABLE(Context, cur_context, context);
TVARIABLE(Uint32T, cur_depth, depth);
Label context_found(this);
- Variable* context_search_loop_variables[2] = {&cur_depth, &cur_context};
- Label context_search(this, 2, context_search_loop_variables);
+ Label context_search(this, {&cur_depth, &cur_context});
// Fast path if the depth is 0.
Branch(Word32Equal(depth, Int32Constant(0)), &context_found, &context_search);
@@ -206,33 +204,38 @@ void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth(
TVARIABLE(Context, cur_context, context);
TVARIABLE(Uint32T, cur_depth, depth);
- Variable* context_search_loop_variables[2] = {&cur_depth, &cur_context};
- Label context_search(this, 2, context_search_loop_variables);
+ Label context_search(this, {&cur_depth, &cur_context});
+ Label no_extension(this);
// Loop until the depth is 0.
Goto(&context_search);
BIND(&context_search);
{
- // TODO(leszeks): We only need to do this check if the context had a sloppy
- // eval, we could pass in a context chain bitmask to figure out which
- // contexts actually need to be checked.
+ // Check if context has an extension slot
+ TNode<BoolT> has_extension =
+ LoadContextHasExtensionField(cur_context.value());
+ GotoIfNot(has_extension, &no_extension);
+ // Jump to the target if the extension slot is not a hole.
TNode<Object> extension_slot =
LoadContextElement(cur_context.value(), Context::EXTENSION_INDEX);
+ Branch(TaggedNotEqual(extension_slot, TheHoleConstant()), target,
+ &no_extension);
- // Jump to the target if the extension slot is not a hole.
- GotoIf(TaggedNotEqual(extension_slot, TheHoleConstant()), target);
-
- cur_depth = Unsigned(Int32Sub(cur_depth.value(), Int32Constant(1)));
- cur_context =
- CAST(LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
+ BIND(&no_extension);
+ {
+ cur_depth = Unsigned(Int32Sub(cur_depth.value(), Int32Constant(1)));
+ cur_context = CAST(
+ LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
- GotoIf(Word32NotEqual(cur_depth.value(), Int32Constant(0)),
- &context_search);
+ GotoIf(Word32NotEqual(cur_depth.value(), Int32Constant(0)),
+ &context_search);
+ }
}
}
-TNode<IntPtrT> InterpreterAssembler::RegisterLocation(Node* reg_index) {
+TNode<IntPtrT> InterpreterAssembler::RegisterLocation(
+ TNode<IntPtrT> reg_index) {
return Signed(WordPoisonOnSpeculation(
IntPtrAdd(GetInterpretedFramePointer(), RegisterFrameOffset(reg_index))));
}
@@ -241,11 +244,11 @@ TNode<IntPtrT> InterpreterAssembler::RegisterLocation(Register reg) {
return RegisterLocation(IntPtrConstant(reg.ToOperand()));
}
-TNode<IntPtrT> InterpreterAssembler::RegisterFrameOffset(Node* index) {
- return Signed(TimesSystemPointerSize(index));
+TNode<IntPtrT> InterpreterAssembler::RegisterFrameOffset(TNode<IntPtrT> index) {
+ return TimesSystemPointerSize(index);
}
-TNode<Object> InterpreterAssembler::LoadRegister(Node* reg_index) {
+TNode<Object> InterpreterAssembler::LoadRegister(TNode<IntPtrT> reg_index) {
return LoadFullTagged(GetInterpretedFramePointer(),
RegisterFrameOffset(reg_index),
LoadSensitivity::kCritical);
@@ -281,7 +284,7 @@ std::pair<TNode<Object>, TNode<Object>>
InterpreterAssembler::LoadRegisterPairAtOperandIndex(int operand_index) {
DCHECK_EQ(OperandType::kRegPair,
Bytecodes::GetOperandType(bytecode_, operand_index));
- Node* first_reg_index =
+ TNode<IntPtrT> first_reg_index =
BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
return std::make_pair(LoadRegister(first_reg_index),
@@ -300,7 +303,7 @@ InterpreterAssembler::GetRegisterListAtOperandIndex(int operand_index) {
return RegListNodePair(base_reg, reg_count);
}
-Node* InterpreterAssembler::LoadRegisterFromRegisterList(
+TNode<Object> InterpreterAssembler::LoadRegisterFromRegisterList(
const RegListNodePair& reg_list, int index) {
TNode<IntPtrT> location = RegisterLocationInRegisterList(reg_list, index);
// Location is already poisoned on speculation, so no need to poison here.
@@ -317,29 +320,30 @@ TNode<IntPtrT> InterpreterAssembler::RegisterLocationInRegisterList(
return Signed(IntPtrSub(reg_list.base_reg_location(), offset));
}
-void InterpreterAssembler::StoreRegister(Node* value, Register reg) {
+void InterpreterAssembler::StoreRegister(TNode<Object> value, Register reg) {
StoreFullTaggedNoWriteBarrier(
GetInterpretedFramePointer(),
IntPtrConstant(reg.ToOperand() * kSystemPointerSize), value);
}
-void InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
+void InterpreterAssembler::StoreRegister(TNode<Object> value,
+ TNode<IntPtrT> reg_index) {
StoreFullTaggedNoWriteBarrier(GetInterpretedFramePointer(),
RegisterFrameOffset(reg_index), value);
}
-void InterpreterAssembler::StoreRegisterAtOperandIndex(Node* value,
+void InterpreterAssembler::StoreRegisterAtOperandIndex(TNode<Object> value,
int operand_index) {
StoreRegister(value,
BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
}
-void InterpreterAssembler::StoreRegisterPairAtOperandIndex(Node* value1,
- Node* value2,
+void InterpreterAssembler::StoreRegisterPairAtOperandIndex(TNode<Object> value1,
+ TNode<Object> value2,
int operand_index) {
DCHECK_EQ(OperandType::kRegOutPair,
Bytecodes::GetOperandType(bytecode_, operand_index));
- Node* first_reg_index =
+ TNode<IntPtrT> first_reg_index =
BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
StoreRegister(value1, first_reg_index);
TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
@@ -347,10 +351,11 @@ void InterpreterAssembler::StoreRegisterPairAtOperandIndex(Node* value1,
}
void InterpreterAssembler::StoreRegisterTripleAtOperandIndex(
- Node* value1, Node* value2, Node* value3, int operand_index) {
+ TNode<Object> value1, TNode<Object> value2, TNode<Object> value3,
+ int operand_index) {
DCHECK_EQ(OperandType::kRegOutTriple,
Bytecodes::GetOperandType(bytecode_, operand_index));
- Node* first_reg_index =
+ TNode<IntPtrT> first_reg_index =
BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
StoreRegister(value1, first_reg_index);
TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
@@ -359,12 +364,12 @@ void InterpreterAssembler::StoreRegisterTripleAtOperandIndex(
StoreRegister(value3, third_reg_index);
}
-TNode<IntPtrT> InterpreterAssembler::NextRegister(Node* reg_index) {
+TNode<IntPtrT> InterpreterAssembler::NextRegister(TNode<IntPtrT> reg_index) {
// Register indexes are negative, so the next index is minus one.
return Signed(IntPtrAdd(reg_index, IntPtrConstant(-1)));
}
-Node* InterpreterAssembler::OperandOffset(int operand_index) {
+TNode<IntPtrT> InterpreterAssembler::OperandOffset(int operand_index) {
return IntPtrConstant(
Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale()));
}
@@ -374,7 +379,7 @@ TNode<Uint8T> InterpreterAssembler::BytecodeOperandUnsignedByte(
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
bytecode_, operand_index, operand_scale()));
- Node* operand_offset = OperandOffset(operand_index);
+ TNode<IntPtrT> operand_offset = OperandOffset(operand_index);
return Load<Uint8T>(BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), operand_offset),
needs_poisoning);
@@ -385,7 +390,7 @@ TNode<Int8T> InterpreterAssembler::BytecodeOperandSignedByte(
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
bytecode_, operand_index, operand_scale()));
- Node* operand_offset = OperandOffset(operand_index);
+ TNode<IntPtrT> operand_offset = OperandOffset(operand_index);
return Load<Int8T>(BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), operand_offset),
needs_poisoning);
@@ -429,7 +434,7 @@ TNode<Word32T> InterpreterAssembler::BytecodeOperandReadUnaligned(
MachineType machine_type = (i == 0) ? msb_type : MachineType::Uint8();
TNode<IntPtrT> offset =
IntPtrConstant(relative_offset + msb_offset + i * kStep);
- TNode<WordT> array_offset = IntPtrAdd(BytecodeOffset(), offset);
+ TNode<IntPtrT> array_offset = IntPtrAdd(BytecodeOffset(), offset);
bytes[i] =
UncheckedCast<Word32T>(Load(machine_type, BytecodeArrayTaggedPointer(),
array_offset, needs_poisoning));
@@ -561,7 +566,7 @@ TNode<Uint32T> InterpreterAssembler::BytecodeOperandCount(int operand_index) {
return BytecodeUnsignedOperand(operand_index, operand_size);
}
-Node* InterpreterAssembler::BytecodeOperandFlag(int operand_index) {
+TNode<Uint32T> InterpreterAssembler::BytecodeOperandFlag(int operand_index) {
DCHECK_EQ(OperandType::kFlag8,
Bytecodes::GetOperandType(bytecode_, operand_index));
OperandSize operand_size =
@@ -578,15 +583,16 @@ TNode<Uint32T> InterpreterAssembler::BytecodeOperandUImm(int operand_index) {
return BytecodeUnsignedOperand(operand_index, operand_size);
}
-Node* InterpreterAssembler::BytecodeOperandUImmWord(int operand_index) {
+TNode<UintPtrT> InterpreterAssembler::BytecodeOperandUImmWord(
+ int operand_index) {
return ChangeUint32ToWord(BytecodeOperandUImm(operand_index));
}
-Node* InterpreterAssembler::BytecodeOperandUImmSmi(int operand_index) {
- return SmiFromInt32(Signed(BytecodeOperandUImm(operand_index)));
+TNode<Smi> InterpreterAssembler::BytecodeOperandUImmSmi(int operand_index) {
+ return SmiFromUint32(BytecodeOperandUImm(operand_index));
}
-Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
+TNode<Int32T> InterpreterAssembler::BytecodeOperandImm(int operand_index) {
DCHECK_EQ(OperandType::kImm,
Bytecodes::GetOperandType(bytecode_, operand_index));
OperandSize operand_size =
@@ -594,15 +600,17 @@ Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
return BytecodeSignedOperand(operand_index, operand_size);
}
-Node* InterpreterAssembler::BytecodeOperandImmIntPtr(int operand_index) {
+TNode<IntPtrT> InterpreterAssembler::BytecodeOperandImmIntPtr(
+ int operand_index) {
return ChangeInt32ToIntPtr(BytecodeOperandImm(operand_index));
}
-Node* InterpreterAssembler::BytecodeOperandImmSmi(int operand_index) {
+TNode<Smi> InterpreterAssembler::BytecodeOperandImmSmi(int operand_index) {
return SmiFromInt32(BytecodeOperandImm(operand_index));
}
-Node* InterpreterAssembler::BytecodeOperandIdxInt32(int operand_index) {
+TNode<Uint32T> InterpreterAssembler::BytecodeOperandIdxInt32(
+ int operand_index) {
DCHECK_EQ(OperandType::kIdx,
Bytecodes::GetOperandType(bytecode_, operand_index));
OperandSize operand_size =
@@ -610,15 +618,15 @@ Node* InterpreterAssembler::BytecodeOperandIdxInt32(int operand_index) {
return BytecodeUnsignedOperand(operand_index, operand_size);
}
-Node* InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
+TNode<UintPtrT> InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
return ChangeUint32ToWord(BytecodeOperandIdxInt32(operand_index));
}
-Node* InterpreterAssembler::BytecodeOperandIdxSmi(int operand_index) {
- return SmiTag(BytecodeOperandIdx(operand_index));
+TNode<Smi> InterpreterAssembler::BytecodeOperandIdxSmi(int operand_index) {
+ return SmiTag(Signed(BytecodeOperandIdx(operand_index)));
}
-Node* InterpreterAssembler::BytecodeOperandConstantPoolIdx(
+TNode<UintPtrT> InterpreterAssembler::BytecodeOperandConstantPoolIdx(
int operand_index, LoadSensitivity needs_poisoning) {
DCHECK_EQ(OperandType::kIdx,
Bytecodes::GetOperandType(bytecode_, operand_index));
@@ -628,7 +636,7 @@ Node* InterpreterAssembler::BytecodeOperandConstantPoolIdx(
BytecodeUnsignedOperand(operand_index, operand_size, needs_poisoning));
}
-Node* InterpreterAssembler::BytecodeOperandReg(
+TNode<IntPtrT> InterpreterAssembler::BytecodeOperandReg(
int operand_index, LoadSensitivity needs_poisoning) {
DCHECK(Bytecodes::IsRegisterOperandType(
Bytecodes::GetOperandType(bytecode_, operand_index)));
@@ -638,7 +646,8 @@ Node* InterpreterAssembler::BytecodeOperandReg(
BytecodeSignedOperand(operand_index, operand_size, needs_poisoning));
}
-Node* InterpreterAssembler::BytecodeOperandRuntimeId(int operand_index) {
+TNode<Uint32T> InterpreterAssembler::BytecodeOperandRuntimeId(
+ int operand_index) {
DCHECK_EQ(OperandType::kRuntimeId,
Bytecodes::GetOperandType(bytecode_, operand_index));
OperandSize operand_size =
@@ -647,7 +656,7 @@ Node* InterpreterAssembler::BytecodeOperandRuntimeId(int operand_index) {
return BytecodeUnsignedOperand(operand_index, operand_size);
}
-Node* InterpreterAssembler::BytecodeOperandNativeContextIndex(
+TNode<UintPtrT> InterpreterAssembler::BytecodeOperandNativeContextIndex(
int operand_index) {
DCHECK_EQ(OperandType::kNativeContextIndex,
Bytecodes::GetOperandType(bytecode_, operand_index));
@@ -657,7 +666,8 @@ Node* InterpreterAssembler::BytecodeOperandNativeContextIndex(
BytecodeUnsignedOperand(operand_index, operand_size));
}
-Node* InterpreterAssembler::BytecodeOperandIntrinsicId(int operand_index) {
+TNode<Uint32T> InterpreterAssembler::BytecodeOperandIntrinsicId(
+ int operand_index) {
DCHECK_EQ(OperandType::kIntrinsicId,
Bytecodes::GetOperandType(bytecode_, operand_index));
OperandSize operand_size =
@@ -666,7 +676,7 @@ Node* InterpreterAssembler::BytecodeOperandIntrinsicId(int operand_index) {
return BytecodeUnsignedOperand(operand_index, operand_size);
}
-Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
+TNode<Object> InterpreterAssembler::LoadConstantPoolEntry(TNode<WordT> index) {
TNode<FixedArray> constant_pool = CAST(LoadObjectField(
BytecodeArrayTaggedPointer(), BytecodeArray::kConstantPoolOffset));
return UnsafeLoadFixedArrayElement(
@@ -674,13 +684,13 @@ Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
}
TNode<IntPtrT> InterpreterAssembler::LoadAndUntagConstantPoolEntry(
- Node* index) {
- return SmiUntag(LoadConstantPoolEntry(index));
+ TNode<WordT> index) {
+ return SmiUntag(CAST(LoadConstantPoolEntry(index)));
}
-Node* InterpreterAssembler::LoadConstantPoolEntryAtOperandIndex(
+TNode<Object> InterpreterAssembler::LoadConstantPoolEntryAtOperandIndex(
int operand_index) {
- Node* index =
+ TNode<UintPtrT> index =
BytecodeOperandConstantPoolIdx(operand_index, LoadSensitivity::kSafe);
return LoadConstantPoolEntry(index);
}
@@ -688,7 +698,7 @@ Node* InterpreterAssembler::LoadConstantPoolEntryAtOperandIndex(
TNode<IntPtrT>
InterpreterAssembler::LoadAndUntagConstantPoolEntryAtOperandIndex(
int operand_index) {
- return SmiUntag(LoadConstantPoolEntryAtOperandIndex(operand_index));
+ return SmiUntag(CAST(LoadConstantPoolEntryAtOperandIndex(operand_index)));
}
TNode<HeapObject> InterpreterAssembler::LoadFeedbackVector() {
@@ -713,151 +723,15 @@ void InterpreterAssembler::CallPrologue() {
void InterpreterAssembler::CallEpilogue() {
}
-void InterpreterAssembler::IncrementCallCount(Node* feedback_vector,
- Node* slot_id) {
- Comment("increment call count");
- TNode<Smi> call_count =
- CAST(LoadFeedbackVectorSlot(feedback_vector, slot_id, kTaggedSize));
- // The lowest {FeedbackNexus::CallCountField::kShift} bits of the call
- // count are used as flags. To increment the call count by 1 we hence
- // have to increment by 1 << {FeedbackNexus::CallCountField::kShift}.
- TNode<Smi> new_count = SmiAdd(
- call_count, SmiConstant(1 << FeedbackNexus::CallCountField::kShift));
- // Count is Smi, so we don't need a write barrier.
- StoreFeedbackVectorSlot(feedback_vector, slot_id, new_count,
- SKIP_WRITE_BARRIER, kTaggedSize);
-}
-
-void InterpreterAssembler::CollectCallableFeedback(Node* target, Node* context,
- Node* feedback_vector,
- Node* slot_id) {
- Label extra_checks(this, Label::kDeferred), done(this);
-
- // Check if we have monomorphic {target} feedback already.
- TNode<MaybeObject> feedback =
- LoadFeedbackVectorSlot(feedback_vector, slot_id);
- Comment("check if monomorphic");
- TNode<BoolT> is_monomorphic = IsWeakReferenceTo(feedback, CAST(target));
- GotoIf(is_monomorphic, &done);
-
- // Check if it is a megamorphic {target}.
- Comment("check if megamorphic");
- TNode<BoolT> is_megamorphic = TaggedEqual(
- feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
- Branch(is_megamorphic, &done, &extra_checks);
-
- BIND(&extra_checks);
- {
- Label initialize(this), mark_megamorphic(this);
-
- Comment("check if weak reference");
- TNode<BoolT> is_uninitialized = TaggedEqual(
- feedback,
- HeapConstant(FeedbackVector::UninitializedSentinel(isolate())));
- GotoIf(is_uninitialized, &initialize);
- CSA_ASSERT(this, IsWeakOrCleared(feedback));
-
- // If the weak reference is cleared, we have a new chance to become
- // monomorphic.
- Comment("check if weak reference is cleared");
- Branch(IsCleared(feedback), &initialize, &mark_megamorphic);
-
- BIND(&initialize);
- {
- // Check if {target} is a JSFunction in the current native context.
- Comment("check if function in same native context");
- GotoIf(TaggedIsSmi(target), &mark_megamorphic);
- // Check if the {target} is a JSFunction or JSBoundFunction
- // in the current native context.
- VARIABLE(var_current, MachineRepresentation::kTagged, target);
- Label loop(this, &var_current), done_loop(this);
- Goto(&loop);
- BIND(&loop);
- {
- Label if_boundfunction(this), if_function(this);
- Node* current = var_current.value();
- CSA_ASSERT(this, TaggedIsNotSmi(current));
- TNode<Uint16T> current_instance_type = LoadInstanceType(current);
- GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
- &if_boundfunction);
- Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE),
- &if_function, &mark_megamorphic);
-
- BIND(&if_function);
- {
- // Check that the JSFunction {current} is in the current native
- // context.
- TNode<Context> current_context =
- CAST(LoadObjectField(current, JSFunction::kContextOffset));
- TNode<Context> current_native_context =
- LoadNativeContext(current_context);
- Branch(
- TaggedEqual(LoadNativeContext(context), current_native_context),
- &done_loop, &mark_megamorphic);
- }
-
- BIND(&if_boundfunction);
- {
- // Continue with the [[BoundTargetFunction]] of {target}.
- var_current.Bind(LoadObjectField(
- current, JSBoundFunction::kBoundTargetFunctionOffset));
- Goto(&loop);
- }
- }
- BIND(&done_loop);
- StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id,
- CAST(target));
- ReportFeedbackUpdate(feedback_vector, slot_id, "Call:Initialize");
- Goto(&done);
- }
-
- BIND(&mark_megamorphic);
- {
- // MegamorphicSentinel is an immortal immovable object so
- // write-barrier is not needed.
- Comment("transition to megamorphic");
- DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kmegamorphic_symbol));
- StoreFeedbackVectorSlot(
- feedback_vector, slot_id,
- HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
- SKIP_WRITE_BARRIER);
- ReportFeedbackUpdate(feedback_vector, slot_id,
- "Call:TransitionMegamorphic");
- Goto(&done);
- }
- }
-
- BIND(&done);
-}
-
-void InterpreterAssembler::CollectCallFeedback(Node* target, Node* context,
- Node* maybe_feedback_vector,
- Node* slot_id) {
- Label feedback_done(this);
- // If feedback_vector is not valid, then nothing to do.
- GotoIf(IsUndefined(maybe_feedback_vector), &feedback_done);
-
- CSA_SLOW_ASSERT(this, IsFeedbackVector(maybe_feedback_vector));
-
- // Increment the call count.
- IncrementCallCount(maybe_feedback_vector, slot_id);
-
- // Collect the callable {target} feedback.
- CollectCallableFeedback(target, context, maybe_feedback_vector, slot_id);
- Goto(&feedback_done);
-
- BIND(&feedback_done);
-}
-
void InterpreterAssembler::CallJSAndDispatch(
- Node* function, Node* context, const RegListNodePair& args,
+ TNode<Object> function, TNode<Context> context, const RegListNodePair& args,
ConvertReceiverMode receiver_mode) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
DCHECK(Bytecodes::IsCallOrConstruct(bytecode_) ||
bytecode_ == Bytecode::kInvokeIntrinsic);
DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
- Node* args_count;
+ TNode<Word32T> args_count;
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// The receiver is implied, so it is not in the argument list.
args_count = args.reg_count();
@@ -879,8 +753,9 @@ void InterpreterAssembler::CallJSAndDispatch(
}
template <class... TArgs>
-void InterpreterAssembler::CallJSAndDispatch(Node* function, Node* context,
- Node* arg_count,
+void InterpreterAssembler::CallJSAndDispatch(TNode<Object> function,
+ TNode<Context> context,
+ TNode<Word32T> arg_count,
ConvertReceiverMode receiver_mode,
TArgs... args) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
@@ -892,9 +767,9 @@ void InterpreterAssembler::CallJSAndDispatch(Node* function, Node* context,
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// The first argument parameter (the receiver) is implied to be undefined.
- TailCallStubThenBytecodeDispatch(
- callable.descriptor(), code_target, context, function, arg_count,
- static_cast<Node*>(UndefinedConstant()), args...);
+ TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
+ context, function, arg_count,
+ UndefinedConstant(), args...);
} else {
TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
context, function, arg_count, args...);
@@ -906,21 +781,22 @@ void InterpreterAssembler::CallJSAndDispatch(Node* function, Node* context,
// Instantiate CallJSAndDispatch() for argument counts used by interpreter
// generator.
template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
- Node* function, Node* context, Node* arg_count,
+ TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count,
ConvertReceiverMode receiver_mode);
template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
- Node* function, Node* context, Node* arg_count,
- ConvertReceiverMode receiver_mode, Node*);
+ TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count,
+ ConvertReceiverMode receiver_mode, TNode<Object>);
template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
- Node* function, Node* context, Node* arg_count,
- ConvertReceiverMode receiver_mode, Node*, Node*);
+ TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count,
+ ConvertReceiverMode receiver_mode, TNode<Object>, TNode<Object>);
template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
- Node* function, Node* context, Node* arg_count,
- ConvertReceiverMode receiver_mode, Node*, Node*, Node*);
+ TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count,
+ ConvertReceiverMode receiver_mode, TNode<Object>, TNode<Object>,
+ TNode<Object>);
void InterpreterAssembler::CallJSWithSpreadAndDispatch(
- Node* function, Node* context, const RegListNodePair& args, Node* slot_id,
- Node* maybe_feedback_vector) {
+ TNode<Object> function, TNode<Context> context, const RegListNodePair& args,
+ TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), ConvertReceiverMode::kAny);
CollectCallFeedback(function, context, maybe_feedback_vector, slot_id);
@@ -939,16 +815,18 @@ void InterpreterAssembler::CallJSWithSpreadAndDispatch(
accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
}
-Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context,
- SloppyTNode<Object> new_target,
- const RegListNodePair& args,
- Node* slot_id, Node* feedback_vector) {
+TNode<Object> InterpreterAssembler::Construct(
+ TNode<Object> target, TNode<Context> context, TNode<Object> new_target,
+ const RegListNodePair& args, TNode<UintPtrT> slot_id,
+ TNode<HeapObject> maybe_feedback_vector) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
- VARIABLE(var_result, MachineRepresentation::kTagged);
- VARIABLE(var_site, MachineRepresentation::kTagged);
+ TVARIABLE(Object, var_result);
+ TVARIABLE(AllocationSite, var_site);
Label extra_checks(this, Label::kDeferred), return_result(this, &var_result),
construct(this), construct_array(this, &var_site);
- GotoIf(IsUndefined(feedback_vector), &construct);
+ GotoIf(IsUndefined(maybe_feedback_vector), &construct);
+
+ TNode<FeedbackVector> feedback_vector = CAST(maybe_feedback_vector);
// Increment the call count.
IncrementCallCount(feedback_vector, slot_id);
@@ -956,7 +834,8 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context,
// Check if we have monomorphic {new_target} feedback already.
TNode<MaybeObject> feedback =
LoadFeedbackVectorSlot(feedback_vector, slot_id);
- Branch(IsWeakReferenceTo(feedback, new_target), &construct, &extra_checks);
+ Branch(IsWeakReferenceToObject(feedback, new_target), &construct,
+ &extra_checks);
BIND(&extra_checks);
{
@@ -989,7 +868,7 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context,
LoadNativeContext(context), Context::ARRAY_FUNCTION_INDEX);
GotoIfNot(TaggedEqual(target, array_function), &mark_megamorphic);
GotoIfNot(TaggedEqual(new_target, array_function), &mark_megamorphic);
- var_site.Bind(strong_feedback);
+ var_site = CAST(strong_feedback);
Goto(&construct_array);
}
@@ -1008,14 +887,13 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context,
GotoIf(TaggedIsSmi(new_target), &mark_megamorphic);
// Check if the {new_target} is a JSFunction or JSBoundFunction
// in the current native context.
- VARIABLE(var_current, MachineRepresentation::kTagged, new_target);
+ TVARIABLE(HeapObject, var_current, CAST(new_target));
Label loop(this, &var_current), done_loop(this);
Goto(&loop);
BIND(&loop);
{
Label if_boundfunction(this), if_function(this);
- Node* current = var_current.value();
- CSA_ASSERT(this, TaggedIsNotSmi(current));
+ TNode<HeapObject> current = var_current.value();
TNode<Uint16T> current_instance_type = LoadInstanceType(current);
GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
&if_boundfunction);
@@ -1028,7 +906,7 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context,
// context.
TNode<Context> current_context =
CAST(LoadObjectField(current, JSFunction::kContextOffset));
- TNode<Context> current_native_context =
+ TNode<NativeContext> current_native_context =
LoadNativeContext(current_context);
Branch(
TaggedEqual(LoadNativeContext(context), current_native_context),
@@ -1038,8 +916,8 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context,
BIND(&if_boundfunction);
{
// Continue with the [[BoundTargetFunction]] of {current}.
- var_current.Bind(LoadObjectField(
- current, JSBoundFunction::kBoundTargetFunctionOffset));
+ var_current = LoadObjectField<HeapObject>(
+ current, JSBoundFunction::kBoundTargetFunctionOffset);
Goto(&loop);
}
}
@@ -1056,8 +934,8 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context,
BIND(&create_allocation_site);
{
- var_site.Bind(CreateAllocationSiteInFeedbackVector(feedback_vector,
- SmiTag(slot_id)));
+ var_site =
+ CreateAllocationSiteInFeedbackVector(feedback_vector, slot_id);
ReportFeedbackUpdate(feedback_vector, slot_id,
"Construct:CreateAllocationSite");
Goto(&construct_array);
@@ -1097,9 +975,9 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context,
Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
isolate(), InterpreterPushArgsMode::kArrayFunction);
TNode<Code> code_target = HeapConstant(callable.code());
- var_result.Bind(CallStub(callable.descriptor(), code_target, context,
- args.reg_count(), args.base_reg_location(), target,
- new_target, var_site.value()));
+ var_result = CallStub(callable.descriptor(), code_target, context,
+ args.reg_count(), args.base_reg_location(), target,
+ new_target, var_site.value());
Goto(&return_result);
}
@@ -1110,9 +988,9 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context,
Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
isolate(), InterpreterPushArgsMode::kOther);
TNode<Code> code_target = HeapConstant(callable.code());
- var_result.Bind(CallStub(callable.descriptor(), code_target, context,
- args.reg_count(), args.base_reg_location(), target,
- new_target, UndefinedConstant()));
+ var_result = CallStub(callable.descriptor(), code_target, context,
+ args.reg_count(), args.base_reg_location(), target,
+ new_target, UndefinedConstant());
Goto(&return_result);
}
@@ -1120,17 +998,18 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context,
return var_result.value();
}
-Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
- Node* new_target,
- const RegListNodePair& args,
- Node* slot_id,
- Node* feedback_vector) {
+TNode<Object> InterpreterAssembler::ConstructWithSpread(
+ TNode<Object> target, TNode<Context> context, TNode<Object> new_target,
+ const RegListNodePair& args, TNode<UintPtrT> slot_id,
+ TNode<HeapObject> maybe_feedback_vector) {
// TODO(bmeurer): Unify this with the Construct bytecode feedback
// above once we have a way to pass the AllocationSite to the Array
// constructor _and_ spread the last argument at the same time.
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
Label extra_checks(this, Label::kDeferred), construct(this);
- GotoIf(IsUndefined(feedback_vector), &construct);
+ GotoIf(IsUndefined(maybe_feedback_vector), &construct);
+
+ TNode<FeedbackVector> feedback_vector = CAST(maybe_feedback_vector);
// Increment the call count.
IncrementCallCount(feedback_vector, slot_id);
@@ -1138,7 +1017,7 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
// Check if we have monomorphic {new_target} feedback already.
TNode<MaybeObject> feedback =
LoadFeedbackVectorSlot(feedback_vector, slot_id);
- Branch(IsWeakReferenceTo(feedback, CAST(new_target)), &construct,
+ Branch(IsWeakReferenceToObject(feedback, new_target), &construct,
&extra_checks);
BIND(&extra_checks);
@@ -1174,14 +1053,13 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
GotoIf(TaggedIsSmi(new_target), &mark_megamorphic);
// Check if the {new_target} is a JSFunction or JSBoundFunction
// in the current native context.
- VARIABLE(var_current, MachineRepresentation::kTagged, new_target);
+ TVARIABLE(HeapObject, var_current, CAST(new_target));
Label loop(this, &var_current), done_loop(this);
Goto(&loop);
BIND(&loop);
{
Label if_boundfunction(this), if_function(this);
- Node* current = var_current.value();
- CSA_ASSERT(this, TaggedIsNotSmi(current));
+ TNode<HeapObject> current = var_current.value();
TNode<Uint16T> current_instance_type = LoadInstanceType(current);
GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
&if_boundfunction);
@@ -1194,7 +1072,7 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
// context.
TNode<Context> current_context =
CAST(LoadObjectField(current, JSFunction::kContextOffset));
- TNode<Context> current_native_context =
+ TNode<NativeContext> current_native_context =
LoadNativeContext(current_context);
Branch(
TaggedEqual(LoadNativeContext(context), current_native_context),
@@ -1204,8 +1082,8 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
BIND(&if_boundfunction);
{
// Continue with the [[BoundTargetFunction]] of {current}.
- var_current.Bind(LoadObjectField(
- current, JSBoundFunction::kBoundTargetFunctionOffset));
+ var_current = LoadObjectField<HeapObject>(
+ current, JSBoundFunction::kBoundTargetFunctionOffset);
Goto(&loop);
}
}
@@ -1243,7 +1121,8 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
UndefinedConstant());
}
-Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
+Node* InterpreterAssembler::CallRuntimeN(TNode<Uint32T> function_id,
+ TNode<Context> context,
const RegListNodePair& args,
int result_size) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
@@ -1252,22 +1131,22 @@ Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
TNode<Code> code_target = HeapConstant(callable.code());
// Get the function entry from the function id.
- Node* function_table = ExternalConstant(
- ExternalReference::runtime_function_table_address(isolate()));
+ TNode<RawPtrT> function_table = ReinterpretCast<RawPtrT>(ExternalConstant(
+ ExternalReference::runtime_function_table_address(isolate())));
TNode<Word32T> function_offset =
Int32Mul(function_id, Int32Constant(sizeof(Runtime::Function)));
TNode<WordT> function =
IntPtrAdd(function_table, ChangeUint32ToWord(function_offset));
- Node* function_entry =
- Load(MachineType::Pointer(), function,
- IntPtrConstant(offsetof(Runtime::Function, entry)));
+ TNode<RawPtrT> function_entry = Load<RawPtrT>(
+ function, IntPtrConstant(offsetof(Runtime::Function, entry)));
return CallStubR(StubCallMode::kCallCodeObject, callable.descriptor(),
result_size, code_target, context, args.reg_count(),
args.base_reg_location(), function_entry);
}
-void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) {
+void InterpreterAssembler::UpdateInterruptBudget(TNode<Int32T> weight,
+ bool backward) {
Comment("[ UpdateInterruptBudget");
// Assert that the weight is positive (negative weights should be implemented
@@ -1289,7 +1168,7 @@ void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) {
TVARIABLE(Int32T, new_budget);
if (backward) {
// Update budget by |weight| and check if it reaches zero.
- new_budget = Signed(Int32Sub(budget_after_bytecode, weight));
+ new_budget = Int32Sub(budget_after_bytecode, weight);
TNode<BoolT> condition =
Int32GreaterThanOrEqual(new_budget.value(), Int32Constant(0));
Label ok(this), interrupt_check(this, Label::kDeferred);
@@ -1303,7 +1182,7 @@ void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) {
} else {
// For a forward jump, we know we only increase the interrupt budget, so
// no need to check if it's below zero.
- new_budget = Signed(Int32Add(budget_after_bytecode, weight));
+ new_budget = Int32Add(budget_after_bytecode, weight);
}
// Update budget.
@@ -1323,7 +1202,7 @@ TNode<IntPtrT> InterpreterAssembler::Advance(int delta) {
return Advance(IntPtrConstant(delta));
}
-TNode<IntPtrT> InterpreterAssembler::Advance(SloppyTNode<IntPtrT> delta,
+TNode<IntPtrT> InterpreterAssembler::Advance(TNode<IntPtrT> delta,
bool backward) {
#ifdef V8_TRACE_IGNITION
TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
@@ -1334,45 +1213,51 @@ TNode<IntPtrT> InterpreterAssembler::Advance(SloppyTNode<IntPtrT> delta,
return next_offset;
}
-Node* InterpreterAssembler::Jump(Node* delta, bool backward) {
+void InterpreterAssembler::Jump(TNode<IntPtrT> jump_offset, bool backward) {
DCHECK(!Bytecodes::IsStarLookahead(bytecode_, operand_scale_));
- UpdateInterruptBudget(TruncateIntPtrToInt32(delta), backward);
- Node* new_bytecode_offset = Advance(delta, backward);
- TNode<WordT> target_bytecode = LoadBytecode(new_bytecode_offset);
- return DispatchToBytecode(target_bytecode, new_bytecode_offset);
+ UpdateInterruptBudget(TruncateIntPtrToInt32(jump_offset), backward);
+ TNode<IntPtrT> new_bytecode_offset = Advance(jump_offset, backward);
+ TNode<RawPtrT> target_bytecode =
+ UncheckedCast<RawPtrT>(LoadBytecode(new_bytecode_offset));
+ DispatchToBytecode(target_bytecode, new_bytecode_offset);
}
-Node* InterpreterAssembler::Jump(Node* delta) { return Jump(delta, false); }
+void InterpreterAssembler::Jump(TNode<IntPtrT> jump_offset) {
+ Jump(jump_offset, false);
+}
-Node* InterpreterAssembler::JumpBackward(Node* delta) {
- return Jump(delta, true);
+void InterpreterAssembler::JumpBackward(TNode<IntPtrT> jump_offset) {
+ Jump(jump_offset, true);
}
-void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) {
+void InterpreterAssembler::JumpConditional(TNode<BoolT> condition,
+ TNode<IntPtrT> jump_offset) {
Label match(this), no_match(this);
Branch(condition, &match, &no_match);
BIND(&match);
- Jump(delta);
+ Jump(jump_offset);
BIND(&no_match);
Dispatch();
}
void InterpreterAssembler::JumpIfTaggedEqual(TNode<Object> lhs,
- TNode<Object> rhs, Node* delta) {
- JumpConditional(TaggedEqual(lhs, rhs), delta);
+ TNode<Object> rhs,
+ TNode<IntPtrT> jump_offset) {
+ JumpConditional(TaggedEqual(lhs, rhs), jump_offset);
}
void InterpreterAssembler::JumpIfTaggedNotEqual(TNode<Object> lhs,
TNode<Object> rhs,
- Node* delta) {
- JumpConditional(TaggedNotEqual(lhs, rhs), delta);
+ TNode<IntPtrT> jump_offset) {
+ JumpConditional(TaggedNotEqual(lhs, rhs), jump_offset);
}
-TNode<WordT> InterpreterAssembler::LoadBytecode(Node* bytecode_offset) {
- Node* bytecode =
- Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(), bytecode_offset);
+TNode<WordT> InterpreterAssembler::LoadBytecode(
+ TNode<IntPtrT> bytecode_offset) {
+ TNode<Uint8T> bytecode =
+ Load<Uint8T>(BytecodeArrayTaggedPointer(), bytecode_offset);
return ChangeUint32ToWord(bytecode);
}
@@ -1418,51 +1303,39 @@ void InterpreterAssembler::InlineStar() {
accumulator_use_ = previous_acc_use;
}
-Node* InterpreterAssembler::Dispatch() {
+void InterpreterAssembler::Dispatch() {
Comment("========= Dispatch");
DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
- Node* target_offset = Advance();
+ TNode<IntPtrT> target_offset = Advance();
TNode<WordT> target_bytecode = LoadBytecode(target_offset);
if (Bytecodes::IsStarLookahead(bytecode_, operand_scale_)) {
target_bytecode = StarDispatchLookahead(target_bytecode);
}
- return DispatchToBytecode(target_bytecode, BytecodeOffset());
+ DispatchToBytecode(target_bytecode, BytecodeOffset());
}
-Node* InterpreterAssembler::DispatchToBytecode(Node* target_bytecode,
- Node* new_bytecode_offset) {
+void InterpreterAssembler::DispatchToBytecode(
+ TNode<WordT> target_bytecode, TNode<IntPtrT> new_bytecode_offset) {
if (FLAG_trace_ignition_dispatches) {
TraceBytecodeDispatch(target_bytecode);
}
- Node* target_code_entry =
- Load(MachineType::Pointer(), DispatchTableRawPointer(),
- TimesSystemPointerSize(target_bytecode));
-
- return DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset,
- target_bytecode);
-}
+ TNode<RawPtrT> target_code_entry = Load<RawPtrT>(
+ DispatchTablePointer(), TimesSystemPointerSize(target_bytecode));
-Node* InterpreterAssembler::DispatchToBytecodeHandler(Node* handler,
- Node* bytecode_offset,
- Node* target_bytecode) {
- // TODO(ishell): Add CSA::CodeEntryPoint(code).
- TNode<IntPtrT> handler_entry =
- IntPtrAdd(BitcastTaggedToWord(handler),
- IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
- return DispatchToBytecodeHandlerEntry(handler_entry, bytecode_offset,
- target_bytecode);
+ DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset);
}
-Node* InterpreterAssembler::DispatchToBytecodeHandlerEntry(
- Node* handler_entry, Node* bytecode_offset, Node* target_bytecode) {
+void InterpreterAssembler::DispatchToBytecodeHandlerEntry(
+ TNode<RawPtrT> handler_entry, TNode<IntPtrT> bytecode_offset) {
// Propagate speculation poisoning.
- TNode<WordT> poisoned_handler_entry = WordPoisonOnSpeculation(handler_entry);
- return TailCallBytecodeDispatch(
- InterpreterDispatchDescriptor{}, poisoned_handler_entry,
- GetAccumulatorUnchecked(), bytecode_offset, BytecodeArrayTaggedPointer(),
- DispatchTableRawPointer());
+ TNode<RawPtrT> poisoned_handler_entry =
+ UncheckedCast<RawPtrT>(WordPoisonOnSpeculation(handler_entry));
+ TailCallBytecodeDispatch(InterpreterDispatchDescriptor{},
+ poisoned_handler_entry, GetAccumulatorUnchecked(),
+ bytecode_offset, BytecodeArrayTaggedPointer(),
+ DispatchTablePointer());
}
void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
@@ -1474,14 +1347,14 @@ void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
// Indices 256-511 correspond to bytecodes with operand_scale == 1
// Indices 512-767 correspond to bytecodes with operand_scale == 2
DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
- Node* next_bytecode_offset = Advance(1);
+ TNode<IntPtrT> next_bytecode_offset = Advance(1);
TNode<WordT> next_bytecode = LoadBytecode(next_bytecode_offset);
if (FLAG_trace_ignition_dispatches) {
TraceBytecodeDispatch(next_bytecode);
}
- Node* base_index;
+ TNode<IntPtrT> base_index;
switch (operand_scale) {
case OperandScale::kDouble:
base_index = IntPtrConstant(1 << kBitsPerByte);
@@ -1493,12 +1366,10 @@ void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
UNREACHABLE();
}
TNode<WordT> target_index = IntPtrAdd(base_index, next_bytecode);
- Node* target_code_entry =
- Load(MachineType::Pointer(), DispatchTableRawPointer(),
- TimesSystemPointerSize(target_index));
+ TNode<RawPtrT> target_code_entry = Load<RawPtrT>(
+ DispatchTablePointer(), TimesSystemPointerSize(target_index));
- DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset,
- next_bytecode);
+ DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset);
}
void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
@@ -1527,10 +1398,9 @@ void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
UpdateInterruptBudget(profiling_weight, true);
}
-Node* InterpreterAssembler::LoadOsrNestingLevel() {
- return LoadObjectField(BytecodeArrayTaggedPointer(),
- BytecodeArray::kOsrNestingLevelOffset,
- MachineType::Int8());
+TNode<Int8T> InterpreterAssembler::LoadOsrNestingLevel() {
+ return LoadObjectField<Int8T>(BytecodeArrayTaggedPointer(),
+ BytecodeArray::kOsrNestingLevelOffset);
}
void InterpreterAssembler::Abort(AbortReason abort_reason) {
@@ -1551,7 +1421,7 @@ void InterpreterAssembler::AbortIfWordNotEqual(TNode<WordT> lhs,
BIND(&ok);
}
-void InterpreterAssembler::MaybeDropFrames(Node* context) {
+void InterpreterAssembler::MaybeDropFrames(TNode<Context> context) {
TNode<ExternalReference> restart_fp_address =
ExternalConstant(ExternalReference::debug_restart_fp_address(isolate()));
@@ -1576,7 +1446,7 @@ void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
SmiTag(BytecodeOffset()), GetAccumulatorUnchecked());
}
-void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) {
+void InterpreterAssembler::TraceBytecodeDispatch(TNode<WordT> target_bytecode) {
TNode<ExternalReference> counters_table = ExternalConstant(
ExternalReference::interpreter_dispatch_counters(isolate()));
TNode<IntPtrT> source_bytecode_table_index = IntPtrConstant(
@@ -1616,8 +1486,8 @@ bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
}
void InterpreterAssembler::AbortIfRegisterCountInvalid(
- Node* parameters_and_registers, Node* formal_parameter_count,
- Node* register_count) {
+ TNode<FixedArrayBase> parameters_and_registers,
+ TNode<IntPtrT> formal_parameter_count, TNode<UintPtrT> register_count) {
TNode<IntPtrT> array_size =
LoadAndUntagFixedArrayBaseLength(parameters_and_registers);
@@ -1633,13 +1503,13 @@ void InterpreterAssembler::AbortIfRegisterCountInvalid(
BIND(&ok);
}
-Node* InterpreterAssembler::ExportParametersAndRegisterFile(
+TNode<FixedArray> InterpreterAssembler::ExportParametersAndRegisterFile(
TNode<FixedArray> array, const RegListNodePair& registers,
TNode<Int32T> formal_parameter_count) {
// Store the formal parameters (without receiver) followed by the
// registers into the generator's internal parameters_and_registers field.
TNode<IntPtrT> formal_parameter_count_intptr =
- ChangeInt32ToIntPtr(formal_parameter_count);
+ Signed(ChangeUint32ToWord(formal_parameter_count));
TNode<UintPtrT> register_count = ChangeUint32ToWord(registers.reg_count());
if (FLAG_debug_code) {
CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(),
@@ -1649,8 +1519,8 @@ Node* InterpreterAssembler::ExportParametersAndRegisterFile(
}
{
- Variable var_index(this, MachineType::PointerRepresentation());
- var_index.Bind(IntPtrConstant(0));
+ TVARIABLE(IntPtrT, var_index);
+ var_index = IntPtrConstant(0);
// Iterate over parameters and write them into the array.
Label loop(this, &var_index), done_loop(this);
@@ -1662,16 +1532,16 @@ Node* InterpreterAssembler::ExportParametersAndRegisterFile(
Goto(&loop);
BIND(&loop);
{
- Node* index = var_index.value();
+ TNode<IntPtrT> index = var_index.value();
GotoIfNot(UintPtrLessThan(index, formal_parameter_count_intptr),
&done_loop);
- TNode<WordT> reg_index = IntPtrSub(reg_base, index);
+ TNode<IntPtrT> reg_index = IntPtrSub(reg_base, index);
TNode<Object> value = LoadRegister(reg_index);
StoreFixedArrayElement(array, index, value);
- var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
+ var_index = IntPtrAdd(index, IntPtrConstant(1));
Goto(&loop);
}
BIND(&done_loop);
@@ -1681,25 +1551,25 @@ Node* InterpreterAssembler::ExportParametersAndRegisterFile(
// Iterate over register file and write values into array.
// The mapping of register to array index must match that used in
// BytecodeGraphBuilder::VisitResumeGenerator.
- Variable var_index(this, MachineType::PointerRepresentation());
- var_index.Bind(IntPtrConstant(0));
+ TVARIABLE(IntPtrT, var_index);
+ var_index = IntPtrConstant(0);
Label loop(this, &var_index), done_loop(this);
Goto(&loop);
BIND(&loop);
{
- Node* index = var_index.value();
+ TNode<IntPtrT> index = var_index.value();
GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
- TNode<WordT> reg_index =
+ TNode<IntPtrT> reg_index =
IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
TNode<Object> value = LoadRegister(reg_index);
- TNode<WordT> array_index =
+ TNode<IntPtrT> array_index =
IntPtrAdd(formal_parameter_count_intptr, index);
StoreFixedArrayElement(array, array_index, value);
- var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
+ var_index = IntPtrAdd(index, IntPtrConstant(1));
Goto(&loop);
}
BIND(&done_loop);
@@ -1708,11 +1578,11 @@ Node* InterpreterAssembler::ExportParametersAndRegisterFile(
return array;
}
-Node* InterpreterAssembler::ImportRegisterFile(
+TNode<FixedArray> InterpreterAssembler::ImportRegisterFile(
TNode<FixedArray> array, const RegListNodePair& registers,
TNode<Int32T> formal_parameter_count) {
TNode<IntPtrT> formal_parameter_count_intptr =
- ChangeInt32ToIntPtr(formal_parameter_count);
+ Signed(ChangeUint32ToWord(formal_parameter_count));
TNode<UintPtrT> register_count = ChangeUint32ToWord(registers.reg_count());
if (FLAG_debug_code) {
CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(),
@@ -1758,8 +1628,8 @@ void InterpreterAssembler::ToNumberOrNumeric(Object::Conversion mode) {
TNode<Object> object = GetAccumulator();
TNode<Context> context = GetContext();
- Variable var_type_feedback(this, MachineRepresentation::kTaggedSigned);
- Variable var_result(this, MachineRepresentation::kTagged);
+ TVARIABLE(Smi, var_type_feedback);
+ TVARIABLE(Numeric, var_result);
Label if_done(this), if_objectissmi(this), if_objectisheapnumber(this),
if_objectisother(this, Label::kDeferred);
@@ -1768,15 +1638,15 @@ void InterpreterAssembler::ToNumberOrNumeric(Object::Conversion mode) {
BIND(&if_objectissmi);
{
- var_result.Bind(object);
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kSignedSmall));
+ var_result = CAST(object);
+ var_type_feedback = SmiConstant(BinaryOperationFeedback::kSignedSmall);
Goto(&if_done);
}
BIND(&if_objectisheapnumber);
{
- var_result.Bind(object);
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber));
+ var_result = CAST(object);
+ var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumber);
Goto(&if_done);
}
@@ -1789,23 +1659,23 @@ void InterpreterAssembler::ToNumberOrNumeric(Object::Conversion mode) {
Label not_bigint(this);
GotoIfNot(IsBigInt(CAST(object)), &not_bigint);
{
- var_result.Bind(object);
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kBigInt));
+ var_result = CAST(object);
+ var_type_feedback = SmiConstant(BinaryOperationFeedback::kBigInt);
Goto(&if_done);
}
BIND(&not_bigint);
}
// Convert {object} by calling out to the appropriate builtin.
- var_result.Bind(CallBuiltin(builtin, context, object));
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny));
+ var_result = CAST(CallBuiltin(builtin, context, object));
+ var_type_feedback = SmiConstant(BinaryOperationFeedback::kAny);
Goto(&if_done);
}
BIND(&if_done);
// Record the type feedback collected for {object}.
- Node* slot_index = BytecodeOperandIdx(0);
+ TNode<UintPtrT> slot_index = BytecodeOperandIdx(0);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_index);
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index 33fa987595..4a1882b82c 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -25,64 +25,62 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Returns the 32-bit unsigned count immediate for bytecode operand
// |operand_index| in the current bytecode.
- compiler::TNode<Uint32T> BytecodeOperandCount(int operand_index);
+ TNode<Uint32T> BytecodeOperandCount(int operand_index);
// Returns the 32-bit unsigned flag for bytecode operand |operand_index|
// in the current bytecode.
- compiler::Node* BytecodeOperandFlag(int operand_index);
+ TNode<Uint32T> BytecodeOperandFlag(int operand_index);
// Returns the 32-bit zero-extended index immediate for bytecode operand
// |operand_index| in the current bytecode.
- compiler::Node* BytecodeOperandIdxInt32(int operand_index);
+ TNode<Uint32T> BytecodeOperandIdxInt32(int operand_index);
// Returns the word zero-extended index immediate for bytecode operand
// |operand_index| in the current bytecode.
- compiler::Node* BytecodeOperandIdx(int operand_index);
+ TNode<UintPtrT> BytecodeOperandIdx(int operand_index);
// Returns the smi index immediate for bytecode operand |operand_index|
// in the current bytecode.
- compiler::Node* BytecodeOperandIdxSmi(int operand_index);
+ TNode<Smi> BytecodeOperandIdxSmi(int operand_index);
// Returns the 32-bit unsigned immediate for bytecode operand |operand_index|
// in the current bytecode.
- compiler::TNode<Uint32T> BytecodeOperandUImm(int operand_index);
+ TNode<Uint32T> BytecodeOperandUImm(int operand_index);
// Returns the word-size unsigned immediate for bytecode operand
// |operand_index| in the current bytecode.
- compiler::Node* BytecodeOperandUImmWord(int operand_index);
+ TNode<UintPtrT> BytecodeOperandUImmWord(int operand_index);
// Returns the unsigned smi immediate for bytecode operand |operand_index| in
// the current bytecode.
- compiler::Node* BytecodeOperandUImmSmi(int operand_index);
+ TNode<Smi> BytecodeOperandUImmSmi(int operand_index);
// Returns the 32-bit signed immediate for bytecode operand |operand_index|
// in the current bytecode.
- compiler::Node* BytecodeOperandImm(int operand_index);
+ TNode<Int32T> BytecodeOperandImm(int operand_index);
// Returns the word-size signed immediate for bytecode operand |operand_index|
// in the current bytecode.
- compiler::Node* BytecodeOperandImmIntPtr(int operand_index);
+ TNode<IntPtrT> BytecodeOperandImmIntPtr(int operand_index);
// Returns the smi immediate for bytecode operand |operand_index| in the
// current bytecode.
- compiler::Node* BytecodeOperandImmSmi(int operand_index);
+ TNode<Smi> BytecodeOperandImmSmi(int operand_index);
// Returns the 32-bit unsigned runtime id immediate for bytecode operand
// |operand_index| in the current bytecode.
- compiler::Node* BytecodeOperandRuntimeId(int operand_index);
- // Returns the 32-bit unsigned native context index immediate for bytecode
+ TNode<Uint32T> BytecodeOperandRuntimeId(int operand_index);
+ // Returns the word zero-extended native context index immediate for bytecode
// operand |operand_index| in the current bytecode.
- compiler::Node* BytecodeOperandNativeContextIndex(int operand_index);
+ TNode<UintPtrT> BytecodeOperandNativeContextIndex(int operand_index);
// Returns the 32-bit unsigned intrinsic id immediate for bytecode operand
// |operand_index| in the current bytecode.
- compiler::Node* BytecodeOperandIntrinsicId(int operand_index);
-
+ TNode<Uint32T> BytecodeOperandIntrinsicId(int operand_index);
// Accumulator.
- compiler::TNode<Object> GetAccumulator();
- void SetAccumulator(compiler::Node* value);
+ TNode<Object> GetAccumulator();
+ void SetAccumulator(SloppyTNode<Object> value);
// Context.
- compiler::TNode<Context> GetContext();
- void SetContext(compiler::TNode<Context> value);
+ TNode<Context> GetContext();
+ void SetContext(TNode<Context> value);
// Context at |depth| in the context chain starting at |context|.
- compiler::Node* GetContextAtDepth(compiler::TNode<Context> context,
- compiler::TNode<Uint32T> depth);
+ TNode<Context> GetContextAtDepth(TNode<Context> context,
+ TNode<Uint32T> depth);
// Goto the given |target| if the context chain starting at |context| has any
// extensions up to the given |depth|.
- void GotoIfHasContextExtensionUpToDepth(compiler::TNode<Context> context,
- compiler::TNode<Uint32T> depth,
- Label* target);
+ void GotoIfHasContextExtensionUpToDepth(TNode<Context> context,
+ TNode<Uint32T> depth, Label* target);
// A RegListNodePair provides an abstraction over lists of registers.
class RegListNodePair {
@@ -90,14 +88,12 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
RegListNodePair(TNode<IntPtrT> base_reg_location, TNode<Word32T> reg_count)
: base_reg_location_(base_reg_location), reg_count_(reg_count) {}
- compiler::TNode<Word32T> reg_count() const { return reg_count_; }
- compiler::TNode<IntPtrT> base_reg_location() const {
- return base_reg_location_;
- }
+ TNode<Word32T> reg_count() const { return reg_count_; }
+ TNode<IntPtrT> base_reg_location() const { return base_reg_location_; }
private:
- compiler::TNode<IntPtrT> base_reg_location_;
- compiler::TNode<Word32T> reg_count_;
+ TNode<IntPtrT> base_reg_location_;
+ TNode<Word32T> reg_count_;
};
// Backup/restore register file to/from a fixed array of the correct length.
@@ -105,72 +101,53 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// - Suspend copies arguments and registers to the generator.
// - Resume copies only the registers from the generator, the arguments
// are copied by the ResumeGenerator trampoline.
- compiler::Node* ExportParametersAndRegisterFile(
+ TNode<FixedArray> ExportParametersAndRegisterFile(
TNode<FixedArray> array, const RegListNodePair& registers,
TNode<Int32T> formal_parameter_count);
- compiler::Node* ImportRegisterFile(TNode<FixedArray> array,
- const RegListNodePair& registers,
- TNode<Int32T> formal_parameter_count);
+ TNode<FixedArray> ImportRegisterFile(TNode<FixedArray> array,
+ const RegListNodePair& registers,
+ TNode<Int32T> formal_parameter_count);
// Loads from and stores to the interpreter register file.
- compiler::TNode<Object> LoadRegister(Register reg);
- compiler::TNode<IntPtrT> LoadAndUntagRegister(Register reg);
- compiler::TNode<Object> LoadRegisterAtOperandIndex(int operand_index);
- std::pair<compiler::TNode<Object>, compiler::TNode<Object>>
- LoadRegisterPairAtOperandIndex(int operand_index);
- void StoreRegister(compiler::Node* value, Register reg);
- void StoreRegisterAtOperandIndex(compiler::Node* value, int operand_index);
- void StoreRegisterPairAtOperandIndex(compiler::Node* value1,
- compiler::Node* value2,
- int operand_index);
- void StoreRegisterTripleAtOperandIndex(compiler::Node* value1,
- compiler::Node* value2,
- compiler::Node* value3,
+ TNode<Object> LoadRegister(Register reg);
+ TNode<IntPtrT> LoadAndUntagRegister(Register reg);
+ TNode<Object> LoadRegisterAtOperandIndex(int operand_index);
+ std::pair<TNode<Object>, TNode<Object>> LoadRegisterPairAtOperandIndex(
+ int operand_index);
+ void StoreRegister(TNode<Object> value, Register reg);
+ void StoreRegisterAtOperandIndex(TNode<Object> value, int operand_index);
+ void StoreRegisterPairAtOperandIndex(TNode<Object> value1,
+ TNode<Object> value2, int operand_index);
+ void StoreRegisterTripleAtOperandIndex(TNode<Object> value1,
+ TNode<Object> value2,
+ TNode<Object> value3,
int operand_index);
RegListNodePair GetRegisterListAtOperandIndex(int operand_index);
- Node* LoadRegisterFromRegisterList(const RegListNodePair& reg_list,
- int index);
+ TNode<Object> LoadRegisterFromRegisterList(const RegListNodePair& reg_list,
+ int index);
TNode<IntPtrT> RegisterLocationInRegisterList(const RegListNodePair& reg_list,
int index);
// Load constant at the index specified in operand |operand_index| from the
// constant pool.
- compiler::Node* LoadConstantPoolEntryAtOperandIndex(int operand_index);
+ TNode<Object> LoadConstantPoolEntryAtOperandIndex(int operand_index);
// Load and untag constant at the index specified in operand |operand_index|
// from the constant pool.
TNode<IntPtrT> LoadAndUntagConstantPoolEntryAtOperandIndex(int operand_index);
// Load constant at |index| in the constant pool.
- compiler::Node* LoadConstantPoolEntry(compiler::Node* index);
+ TNode<Object> LoadConstantPoolEntry(TNode<WordT> index);
// Load and untag constant at |index| in the constant pool.
- TNode<IntPtrT> LoadAndUntagConstantPoolEntry(compiler::Node* index);
+ TNode<IntPtrT> LoadAndUntagConstantPoolEntry(TNode<WordT> index);
// Load the FeedbackVector for the current function. The retuned node could be
// undefined.
- compiler::TNode<HeapObject> LoadFeedbackVector();
-
- // Increment the call count for a CALL_IC or construct call.
- // The call count is located at feedback_vector[slot_id + 1].
- void IncrementCallCount(compiler::Node* feedback_vector,
- compiler::Node* slot_id);
-
- // Collect the callable |target| feedback for either a CALL_IC or
- // an INSTANCEOF_IC in the |feedback_vector| at |slot_id|.
- void CollectCallableFeedback(compiler::Node* target, compiler::Node* context,
- compiler::Node* feedback_vector,
- compiler::Node* slot_id);
-
- // Collect CALL_IC feedback for |target| function in the
- // |feedback_vector| at |slot_id|, and the call counts in
- // the |feedback_vector| at |slot_id+1|.
- void CollectCallFeedback(compiler::Node* target, compiler::Node* context,
- compiler::Node* maybe_feedback_vector,
- compiler::Node* slot_id);
+ TNode<HeapObject> LoadFeedbackVector();
// Call JSFunction or Callable |function| with |args| arguments, possibly
// including the receiver depending on |receiver_mode|. After the call returns
// directly dispatches to the next bytecode.
- void CallJSAndDispatch(compiler::Node* function, compiler::Node* context,
+ void CallJSAndDispatch(TNode<Object> function, TNode<Context> context,
const RegListNodePair& args,
ConvertReceiverMode receiver_mode);
@@ -179,93 +156,89 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// depending on |receiver_mode|. After the call returns directly dispatches to
// the next bytecode.
template <class... TArgs>
- void CallJSAndDispatch(Node* function, Node* context, Node* arg_count,
+ void CallJSAndDispatch(TNode<Object> function, TNode<Context> context,
+ TNode<Word32T> arg_count,
ConvertReceiverMode receiver_mode, TArgs... args);
// Call JSFunction or Callable |function| with |args|
// arguments (not including receiver), and the final argument being spread.
// After the call returns directly dispatches to the next bytecode.
- void CallJSWithSpreadAndDispatch(compiler::Node* function,
- compiler::Node* context,
+ void CallJSWithSpreadAndDispatch(TNode<Object> function,
+ TNode<Context> context,
const RegListNodePair& args,
- compiler::Node* slot_id,
- compiler::Node* feedback_vector);
+ TNode<UintPtrT> slot_id,
+ TNode<HeapObject> maybe_feedback_vector);
// Call constructor |target| with |args| arguments (not including receiver).
// The |new_target| is the same as the |target| for the new keyword, but
// differs for the super keyword.
- compiler::Node* Construct(compiler::SloppyTNode<Object> target,
- compiler::Node* context,
- compiler::SloppyTNode<Object> new_target,
- const RegListNodePair& args,
- compiler::Node* slot_id,
- compiler::Node* feedback_vector);
+ TNode<Object> Construct(TNode<Object> target, TNode<Context> context,
+ TNode<Object> new_target, const RegListNodePair& args,
+ TNode<UintPtrT> slot_id,
+ TNode<HeapObject> maybe_feedback_vector);
// Call constructor |target| with |args| arguments (not including
// receiver). The last argument is always a spread. The |new_target| is the
// same as the |target| for the new keyword, but differs for the super
// keyword.
- compiler::Node* ConstructWithSpread(compiler::Node* target,
- compiler::Node* context,
- compiler::Node* new_target,
- const RegListNodePair& args,
- compiler::Node* slot_id,
- compiler::Node* feedback_vector);
+ TNode<Object> ConstructWithSpread(TNode<Object> target,
+ TNode<Context> context,
+ TNode<Object> new_target,
+ const RegListNodePair& args,
+ TNode<UintPtrT> slot_id,
+ TNode<HeapObject> maybe_feedback_vector);
// Call runtime function with |args| arguments which will return |return_size|
// number of values.
- compiler::Node* CallRuntimeN(compiler::Node* function_id,
- compiler::Node* context,
+ compiler::Node* CallRuntimeN(TNode<Uint32T> function_id,
+ TNode<Context> context,
const RegListNodePair& args,
int return_size = 1);
// Jump forward relative to the current bytecode by the |jump_offset|.
- compiler::Node* Jump(compiler::Node* jump_offset);
+ void Jump(TNode<IntPtrT> jump_offset);
// Jump backward relative to the current bytecode by the |jump_offset|.
- compiler::Node* JumpBackward(compiler::Node* jump_offset);
+ void JumpBackward(TNode<IntPtrT> jump_offset);
// Jump forward relative to the current bytecode by |jump_offset| if the
// word values |lhs| and |rhs| are equal.
- void JumpIfTaggedEqual(compiler::TNode<Object> lhs,
- compiler::TNode<Object> rhs,
- compiler::Node* jump_offset);
+ void JumpIfTaggedEqual(TNode<Object> lhs, TNode<Object> rhs,
+ TNode<IntPtrT> jump_offset);
// Jump forward relative to the current bytecode by |jump_offset| if the
// word values |lhs| and |rhs| are not equal.
- void JumpIfTaggedNotEqual(compiler::TNode<Object> lhs,
- compiler::TNode<Object> rhs,
- compiler::Node* jump_offset);
+ void JumpIfTaggedNotEqual(TNode<Object> lhs, TNode<Object> rhs,
+ TNode<IntPtrT> jump_offset);
// Updates the profiler interrupt budget for a return.
void UpdateInterruptBudgetOnReturn();
// Returns the OSR nesting level from the bytecode header.
- compiler::Node* LoadOsrNestingLevel();
+ TNode<Int8T> LoadOsrNestingLevel();
// Dispatch to the bytecode.
- compiler::Node* Dispatch();
+ void Dispatch();
// Dispatch bytecode as wide operand variant.
void DispatchWide(OperandScale operand_scale);
// Dispatch to |target_bytecode| at |new_bytecode_offset|.
// |target_bytecode| should be equivalent to loading from the offset.
- compiler::Node* DispatchToBytecode(compiler::Node* target_bytecode,
- compiler::Node* new_bytecode_offset);
+ void DispatchToBytecode(TNode<WordT> target_bytecode,
+ TNode<IntPtrT> new_bytecode_offset);
// Abort with the given abort reason.
void Abort(AbortReason abort_reason);
- void AbortIfWordNotEqual(compiler::TNode<WordT> lhs,
- compiler::TNode<WordT> rhs,
+ void AbortIfWordNotEqual(TNode<WordT> lhs, TNode<WordT> rhs,
AbortReason abort_reason);
// Abort if |register_count| is invalid for given register file array.
- void AbortIfRegisterCountInvalid(compiler::Node* parameters_and_registers,
- compiler::Node* formal_parameter_count,
- compiler::Node* register_count);
+ void AbortIfRegisterCountInvalid(
+ TNode<FixedArrayBase> parameters_and_registers,
+ TNode<IntPtrT> formal_parameter_count, TNode<UintPtrT> register_count);
// Dispatch to frame dropper trampoline if necessary.
- void MaybeDropFrames(compiler::Node* context);
+ void MaybeDropFrames(TNode<Context> context);
// Returns the offset from the BytecodeArrayPointer of the current bytecode.
TNode<IntPtrT> BytecodeOffset();
@@ -277,27 +250,27 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
void ToNumberOrNumeric(Object::Conversion mode);
private:
- // Returns a tagged pointer to the current function's BytecodeArray object.
- compiler::Node* BytecodeArrayTaggedPointer();
+ // Returns a pointer to the current function's BytecodeArray object.
+ TNode<BytecodeArray> BytecodeArrayTaggedPointer();
- // Returns a raw pointer to first entry in the interpreter dispatch table.
- compiler::Node* DispatchTableRawPointer();
+ // Returns a pointer to first entry in the interpreter dispatch table.
+ TNode<ExternalReference> DispatchTablePointer();
// Returns the accumulator value without checking whether bytecode
// uses it. This is intended to be used only in dispatch and in
// tracing as these need to bypass accumulator use validity checks.
- compiler::Node* GetAccumulatorUnchecked();
+ TNode<Object> GetAccumulatorUnchecked();
// Returns the frame pointer for the interpreted frame of the function being
// interpreted.
TNode<RawPtrT> GetInterpretedFramePointer();
// Operations on registers.
- compiler::TNode<IntPtrT> RegisterLocation(Register reg);
- compiler::TNode<IntPtrT> RegisterLocation(compiler::Node* reg_index);
- compiler::TNode<IntPtrT> NextRegister(compiler::Node* reg_index);
- compiler::TNode<Object> LoadRegister(Node* reg_index);
- void StoreRegister(compiler::Node* value, compiler::Node* reg_index);
+ TNode<IntPtrT> RegisterLocation(Register reg);
+ TNode<IntPtrT> RegisterLocation(TNode<IntPtrT> reg_index);
+ TNode<IntPtrT> NextRegister(TNode<IntPtrT> reg_index);
+ TNode<Object> LoadRegister(TNode<IntPtrT> reg_index);
+ void StoreRegister(TNode<Object> value, TNode<IntPtrT> reg_index);
// Saves and restores interpreter bytecode offset to the interpreter stack
// frame when performing a call.
@@ -305,7 +278,7 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
void CallEpilogue();
// Increment the dispatch counter for the (current, next) bytecode pair.
- void TraceBytecodeDispatch(compiler::Node* target_index);
+ void TraceBytecodeDispatch(TNode<WordT> target_bytecode);
// Traces the current bytecode by calling |function_id|.
void TraceBytecode(Runtime::FunctionId function_id);
@@ -313,74 +286,74 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Updates the bytecode array's interrupt budget by a 32-bit unsigned |weight|
// and calls Runtime::kInterrupt if counter reaches zero. If |backward|, then
// the interrupt budget is decremented, otherwise it is incremented.
- void UpdateInterruptBudget(compiler::Node* weight, bool backward);
+ void UpdateInterruptBudget(TNode<Int32T> weight, bool backward);
// Returns the offset of register |index| relative to RegisterFilePointer().
- compiler::TNode<IntPtrT> RegisterFrameOffset(compiler::Node* index);
+ TNode<IntPtrT> RegisterFrameOffset(TNode<IntPtrT> index);
// Returns the offset of an operand relative to the current bytecode offset.
- compiler::Node* OperandOffset(int operand_index);
+ TNode<IntPtrT> OperandOffset(int operand_index);
// Returns a value built from an sequence of bytes in the bytecode
// array starting at |relative_offset| from the current bytecode.
// The |result_type| determines the size and signedness. of the
// value read. This method should only be used on architectures that
// do not support unaligned memory accesses.
- compiler::TNode<Word32T> BytecodeOperandReadUnaligned(
+ TNode<Word32T> BytecodeOperandReadUnaligned(
int relative_offset, MachineType result_type,
LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
// Returns zero- or sign-extended to word32 value of the operand.
- compiler::TNode<Uint8T> BytecodeOperandUnsignedByte(
+ TNode<Uint8T> BytecodeOperandUnsignedByte(
int operand_index,
LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
- compiler::TNode<Int8T> BytecodeOperandSignedByte(
+ TNode<Int8T> BytecodeOperandSignedByte(
int operand_index,
LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
- compiler::TNode<Uint16T> BytecodeOperandUnsignedShort(
+ TNode<Uint16T> BytecodeOperandUnsignedShort(
int operand_index,
LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
- compiler::TNode<Int16T> BytecodeOperandSignedShort(
+ TNode<Int16T> BytecodeOperandSignedShort(
int operand_index,
LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
- compiler::TNode<Uint32T> BytecodeOperandUnsignedQuad(
+ TNode<Uint32T> BytecodeOperandUnsignedQuad(
int operand_index,
LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
- compiler::TNode<Int32T> BytecodeOperandSignedQuad(
+ TNode<Int32T> BytecodeOperandSignedQuad(
int operand_index,
LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
// Returns zero- or sign-extended to word32 value of the operand of
// given size.
- compiler::TNode<Int32T> BytecodeSignedOperand(
+ TNode<Int32T> BytecodeSignedOperand(
int operand_index, OperandSize operand_size,
LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
- compiler::TNode<Uint32T> BytecodeUnsignedOperand(
+ TNode<Uint32T> BytecodeUnsignedOperand(
int operand_index, OperandSize operand_size,
LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
// Returns the word-size sign-extended register index for bytecode operand
// |operand_index| in the current bytecode. Value is not poisoned on
// speculation since the value loaded from the register is poisoned instead.
- compiler::Node* BytecodeOperandReg(
+ TNode<IntPtrT> BytecodeOperandReg(
int operand_index,
LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
// Returns the word zero-extended index immediate for bytecode operand
// |operand_index| in the current bytecode for use when loading a .
- compiler::Node* BytecodeOperandConstantPoolIdx(
+ TNode<UintPtrT> BytecodeOperandConstantPoolIdx(
int operand_index,
LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
// Jump relative to the current bytecode by the |jump_offset|. If |backward|,
// then jump backward (subtract the offset), otherwise jump forward (add the
// offset). Helper function for Jump and JumpBackward.
- compiler::Node* Jump(compiler::Node* jump_offset, bool backward);
+ void Jump(TNode<IntPtrT> jump_offset, bool backward);
// Jump forward relative to the current bytecode by |jump_offset| if the
// |condition| is true. Helper function for JumpIfTaggedEqual and
// JumpIfTaggedNotEqual.
- void JumpConditional(compiler::Node* condition, compiler::Node* jump_offset);
+ void JumpConditional(TNode<BoolT> condition, TNode<IntPtrT> jump_offset);
// Save the bytecode offset to the interpreter frame.
void SaveBytecodeOffset();
@@ -394,29 +367,22 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Updates and returns BytecodeOffset() advanced by delta bytecodes.
// Traces the exit of the current bytecode.
TNode<IntPtrT> Advance(int delta);
- TNode<IntPtrT> Advance(SloppyTNode<IntPtrT> delta, bool backward = false);
+ TNode<IntPtrT> Advance(TNode<IntPtrT> delta, bool backward = false);
// Load the bytecode at |bytecode_offset|.
- compiler::TNode<WordT> LoadBytecode(compiler::Node* bytecode_offset);
+ TNode<WordT> LoadBytecode(TNode<IntPtrT> bytecode_offset);
// Look ahead for Star and inline it in a branch. Returns a new target
// bytecode node for dispatch.
- compiler::TNode<WordT> StarDispatchLookahead(
- compiler::TNode<WordT> target_bytecode);
+ TNode<WordT> StarDispatchLookahead(TNode<WordT> target_bytecode);
// Build code for Star at the current BytecodeOffset() and Advance() to the
// next dispatch offset.
void InlineStar();
- // Dispatch to the bytecode handler with code offset |handler|.
- compiler::Node* DispatchToBytecodeHandler(compiler::Node* handler,
- compiler::Node* bytecode_offset,
- compiler::Node* target_bytecode);
-
// Dispatch to the bytecode handler with code entry point |handler_entry|.
- compiler::Node* DispatchToBytecodeHandlerEntry(
- compiler::Node* handler_entry, compiler::Node* bytecode_offset,
- compiler::Node* target_bytecode);
+ void DispatchToBytecodeHandlerEntry(TNode<RawPtrT> handler_entry,
+ TNode<IntPtrT> bytecode_offset);
int CurrentBytecodeSize() const;
@@ -424,11 +390,11 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
Bytecode bytecode_;
OperandScale operand_scale_;
- TVariable<RawPtrT> interpreted_frame_pointer_;
- CodeStubAssembler::Variable bytecode_array_;
- TVariable<IntPtrT> bytecode_offset_;
- CodeStubAssembler::Variable dispatch_table_;
- CodeStubAssembler::Variable accumulator_;
+ CodeStubAssembler::TVariable<RawPtrT> interpreted_frame_pointer_;
+ CodeStubAssembler::TVariable<BytecodeArray> bytecode_array_;
+ CodeStubAssembler::TVariable<IntPtrT> bytecode_offset_;
+ CodeStubAssembler::TVariable<ExternalReference> dispatch_table_;
+ CodeStubAssembler::TVariable<Object> accumulator_;
AccumulatorUse accumulator_use_;
bool made_call_;
bool reloaded_frame_ptr_;
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index e8569ecd55..5f686f86b8 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -35,7 +35,6 @@ namespace {
using compiler::Node;
using Label = CodeStubAssembler::Label;
-using Variable = CodeStubAssembler::Variable;
#define IGNITION_HANDLER(Name, BaseAssembler) \
class Name##Assembler : public BaseAssembler { \
@@ -71,7 +70,7 @@ IGNITION_HANDLER(LdaZero, InterpreterAssembler) {
//
// Load an integer literal into the accumulator as a Smi.
IGNITION_HANDLER(LdaSmi, InterpreterAssembler) {
- Node* smi_int = BytecodeOperandImmSmi(0);
+ TNode<Smi> smi_int = BytecodeOperandImmSmi(0);
SetAccumulator(smi_int);
Dispatch();
}
@@ -80,7 +79,7 @@ IGNITION_HANDLER(LdaSmi, InterpreterAssembler) {
//
// Load constant literal at |idx| in the constant pool into the accumulator.
IGNITION_HANDLER(LdaConstant, InterpreterAssembler) {
- Node* constant = LoadConstantPoolEntryAtOperandIndex(0);
+ TNode<Object> constant = LoadConstantPoolEntryAtOperandIndex(0);
SetAccumulator(constant);
Dispatch();
}
@@ -161,7 +160,6 @@ class InterpreterLoadGlobalAssembler : public InterpreterAssembler {
void LdaGlobal(int slot_operand_index, int name_operand_index,
TypeofMode typeof_mode) {
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
- Node* feedback_slot = BytecodeOperandIdx(slot_operand_index);
AccessorAssembler accessor_asm(state());
ExitPoint exit_point(this, [=](Node* result) {
@@ -169,17 +167,25 @@ class InterpreterLoadGlobalAssembler : public InterpreterAssembler {
Dispatch();
});
+ LazyNode<Smi> lazy_smi_slot = [=] {
+ return SmiTag(Signed(BytecodeOperandIdx(slot_operand_index)));
+ };
+
+ LazyNode<UintPtrT> lazy_slot = [=] {
+ return BytecodeOperandIdx(slot_operand_index);
+ };
+
LazyNode<Context> lazy_context = [=] { return GetContext(); };
LazyNode<Name> lazy_name = [=] {
- Node* name = LoadConstantPoolEntryAtOperandIndex(name_operand_index);
- return CAST(name);
+ TNode<Name> name =
+ CAST(LoadConstantPoolEntryAtOperandIndex(name_operand_index));
+ return name;
};
- ParameterMode slot_mode = CodeStubAssembler::INTPTR_PARAMETERS;
- accessor_asm.LoadGlobalIC(maybe_feedback_vector, feedback_slot,
- lazy_context, lazy_name, typeof_mode, &exit_point,
- slot_mode);
+ accessor_asm.LoadGlobalIC(maybe_feedback_vector, lazy_smi_slot, lazy_slot,
+ lazy_context, lazy_name, typeof_mode,
+ &exit_point);
}
};
@@ -213,9 +219,9 @@ IGNITION_HANDLER(StaGlobal, InterpreterAssembler) {
TNode<Context> context = GetContext();
// Store the global via the StoreGlobalIC.
- Node* name = LoadConstantPoolEntryAtOperandIndex(0);
+ TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(0));
TNode<Object> value = GetAccumulator();
- Node* raw_slot = BytecodeOperandIdx(1);
+ TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(1));
TNode<Smi> smi_slot = SmiTag(raw_slot);
TNode<HeapObject> maybe_vector = LoadFeedbackVector();
@@ -240,9 +246,9 @@ IGNITION_HANDLER(StaGlobal, InterpreterAssembler) {
// chain starting at |context| into the accumulator.
IGNITION_HANDLER(LdaContextSlot, InterpreterAssembler) {
TNode<Context> context = CAST(LoadRegisterAtOperandIndex(0));
- Node* slot_index = BytecodeOperandIdx(1);
+ TNode<IntPtrT> slot_index = Signed(BytecodeOperandIdx(1));
TNode<Uint32T> depth = BytecodeOperandUImm(2);
- Node* slot_context = GetContextAtDepth(context, depth);
+ TNode<Context> slot_context = GetContextAtDepth(context, depth);
TNode<Object> result = LoadContextElement(slot_context, slot_index);
SetAccumulator(result);
Dispatch();
@@ -254,9 +260,9 @@ IGNITION_HANDLER(LdaContextSlot, InterpreterAssembler) {
// chain starting at |context| into the accumulator.
IGNITION_HANDLER(LdaImmutableContextSlot, InterpreterAssembler) {
TNode<Context> context = CAST(LoadRegisterAtOperandIndex(0));
- Node* slot_index = BytecodeOperandIdx(1);
+ TNode<IntPtrT> slot_index = Signed(BytecodeOperandIdx(1));
TNode<Uint32T> depth = BytecodeOperandUImm(2);
- Node* slot_context = GetContextAtDepth(context, depth);
+ TNode<Context> slot_context = GetContextAtDepth(context, depth);
TNode<Object> result = LoadContextElement(slot_context, slot_index);
SetAccumulator(result);
Dispatch();
@@ -266,7 +272,7 @@ IGNITION_HANDLER(LdaImmutableContextSlot, InterpreterAssembler) {
//
// Load the object in |slot_index| of the current context into the accumulator.
IGNITION_HANDLER(LdaCurrentContextSlot, InterpreterAssembler) {
- Node* slot_index = BytecodeOperandIdx(0);
+ TNode<IntPtrT> slot_index = Signed(BytecodeOperandIdx(0));
TNode<Context> slot_context = GetContext();
TNode<Object> result = LoadContextElement(slot_context, slot_index);
SetAccumulator(result);
@@ -277,7 +283,7 @@ IGNITION_HANDLER(LdaCurrentContextSlot, InterpreterAssembler) {
//
// Load the object in |slot_index| of the current context into the accumulator.
IGNITION_HANDLER(LdaImmutableCurrentContextSlot, InterpreterAssembler) {
- Node* slot_index = BytecodeOperandIdx(0);
+ TNode<IntPtrT> slot_index = Signed(BytecodeOperandIdx(0));
TNode<Context> slot_context = GetContext();
TNode<Object> result = LoadContextElement(slot_context, slot_index);
SetAccumulator(result);
@@ -291,9 +297,9 @@ IGNITION_HANDLER(LdaImmutableCurrentContextSlot, InterpreterAssembler) {
IGNITION_HANDLER(StaContextSlot, InterpreterAssembler) {
TNode<Object> value = GetAccumulator();
TNode<Context> context = CAST(LoadRegisterAtOperandIndex(0));
- Node* slot_index = BytecodeOperandIdx(1);
+ TNode<IntPtrT> slot_index = Signed(BytecodeOperandIdx(1));
TNode<Uint32T> depth = BytecodeOperandUImm(2);
- Node* slot_context = GetContextAtDepth(context, depth);
+ TNode<Context> slot_context = GetContextAtDepth(context, depth);
StoreContextElement(slot_context, slot_index, value);
Dispatch();
}
@@ -304,7 +310,7 @@ IGNITION_HANDLER(StaContextSlot, InterpreterAssembler) {
// context.
IGNITION_HANDLER(StaCurrentContextSlot, InterpreterAssembler) {
TNode<Object> value = GetAccumulator();
- Node* slot_index = BytecodeOperandIdx(0);
+ TNode<IntPtrT> slot_index = Signed(BytecodeOperandIdx(0));
TNode<Context> slot_context = GetContext();
StoreContextElement(slot_context, slot_index, value);
Dispatch();
@@ -315,7 +321,7 @@ IGNITION_HANDLER(StaCurrentContextSlot, InterpreterAssembler) {
// Lookup the object with the name in constant pool entry |name_index|
// dynamically.
IGNITION_HANDLER(LdaLookupSlot, InterpreterAssembler) {
- Node* name = LoadConstantPoolEntryAtOperandIndex(0);
+ TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(0));
TNode<Context> context = GetContext();
TNode<Object> result = CallRuntime(Runtime::kLoadLookupSlot, context, name);
SetAccumulator(result);
@@ -327,7 +333,7 @@ IGNITION_HANDLER(LdaLookupSlot, InterpreterAssembler) {
// Lookup the object with the name in constant pool entry |name_index|
// dynamically without causing a NoReferenceError.
IGNITION_HANDLER(LdaLookupSlotInsideTypeof, InterpreterAssembler) {
- Node* name = LoadConstantPoolEntryAtOperandIndex(0);
+ TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(0));
TNode<Context> context = GetContext();
TNode<Object> result =
CallRuntime(Runtime::kLoadLookupSlotInsideTypeof, context, name);
@@ -344,7 +350,7 @@ class InterpreterLookupContextSlotAssembler : public InterpreterAssembler {
void LookupContextSlot(Runtime::FunctionId function_id) {
TNode<Context> context = GetContext();
- Node* slot_index = BytecodeOperandIdx(1);
+ TNode<IntPtrT> slot_index = Signed(BytecodeOperandIdx(1));
TNode<Uint32T> depth = BytecodeOperandUImm(2);
Label slowpath(this, Label::kDeferred);
@@ -354,7 +360,7 @@ class InterpreterLookupContextSlotAssembler : public InterpreterAssembler {
// Fast path does a normal load context.
{
- Node* slot_context = GetContextAtDepth(context, depth);
+ TNode<Context> slot_context = GetContextAtDepth(context, depth);
TNode<Object> result = LoadContextElement(slot_context, slot_index);
SetAccumulator(result);
Dispatch();
@@ -363,7 +369,7 @@ class InterpreterLookupContextSlotAssembler : public InterpreterAssembler {
// Slow path when we have to call out to the runtime.
BIND(&slowpath);
{
- Node* name = LoadConstantPoolEntryAtOperandIndex(0);
+ TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(0));
TNode<Object> result = CallRuntime(function_id, context, name);
SetAccumulator(result);
Dispatch();
@@ -419,7 +425,7 @@ class InterpreterLookupGlobalAssembler : public InterpreterLoadGlobalAssembler {
// Slow path when we have to call out to the runtime
BIND(&slowpath);
{
- Node* name = LoadConstantPoolEntryAtOperandIndex(0);
+ TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(0));
TNode<Object> result = CallRuntime(function_id, context, name);
SetAccumulator(result);
Dispatch();
@@ -450,10 +456,10 @@ IGNITION_HANDLER(LdaLookupGlobalSlotInsideTypeof,
// pool entry |name_index|.
IGNITION_HANDLER(StaLookupSlot, InterpreterAssembler) {
TNode<Object> value = GetAccumulator();
- Node* name = LoadConstantPoolEntryAtOperandIndex(0);
- Node* bytecode_flags = BytecodeOperandFlag(1);
+ TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(0));
+ TNode<Uint32T> bytecode_flags = BytecodeOperandFlag(1);
TNode<Context> context = GetContext();
- Variable var_result(this, MachineRepresentation::kTagged);
+ TVARIABLE(Object, var_result);
Label sloppy(this), strict(this), end(this);
DCHECK_EQ(0, LanguageMode::kSloppy);
@@ -467,8 +473,8 @@ IGNITION_HANDLER(StaLookupSlot, InterpreterAssembler) {
{
CSA_ASSERT(this, IsClearWord32<StoreLookupSlotFlags::LookupHoistingModeBit>(
bytecode_flags));
- var_result.Bind(
- CallRuntime(Runtime::kStoreLookupSlot_Strict, context, name, value));
+ var_result =
+ CallRuntime(Runtime::kStoreLookupSlot_Strict, context, name, value);
Goto(&end);
}
@@ -481,15 +487,15 @@ IGNITION_HANDLER(StaLookupSlot, InterpreterAssembler) {
BIND(&hoisting);
{
- var_result.Bind(CallRuntime(Runtime::kStoreLookupSlot_SloppyHoisting,
- context, name, value));
+ var_result = CallRuntime(Runtime::kStoreLookupSlot_SloppyHoisting,
+ context, name, value);
Goto(&end);
}
BIND(&ordinary);
{
- var_result.Bind(
- CallRuntime(Runtime::kStoreLookupSlot_Sloppy, context, name, value));
+ var_result =
+ CallRuntime(Runtime::kStoreLookupSlot_Sloppy, context, name, value);
Goto(&end);
}
}
@@ -507,24 +513,24 @@ IGNITION_HANDLER(StaLookupSlot, InterpreterAssembler) {
// constant pool entry <name_index>.
IGNITION_HANDLER(LdaNamedProperty, InterpreterAssembler) {
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
- Node* feedback_slot = BytecodeOperandIdx(2);
- TNode<Smi> smi_slot = SmiTag(feedback_slot);
+ TNode<UintPtrT> feedback_slot = BytecodeOperandIdx(2);
// Load receiver.
TNode<Object> recv = LoadRegisterAtOperandIndex(0);
// Load the name and context lazily.
- LazyNode<Name> name = [=] {
+ LazyNode<Smi> lazy_smi_slot = [=] { return SmiTag(Signed(feedback_slot)); };
+ LazyNode<Name> lazy_name = [=] {
return CAST(LoadConstantPoolEntryAtOperandIndex(1));
};
- LazyNode<Context> context = [=] { return GetContext(); };
+ LazyNode<Context> lazy_context = [=] { return GetContext(); };
Label done(this);
- Variable var_result(this, MachineRepresentation::kTagged);
+ TVARIABLE(Object, var_result);
ExitPoint exit_point(this, &done, &var_result);
- AccessorAssembler::LazyLoadICParameters params(context, recv, name, smi_slot,
- feedback_vector);
+ AccessorAssembler::LazyLoadICParameters params(
+ lazy_context, recv, lazy_name, lazy_smi_slot, feedback_vector);
AccessorAssembler accessor_asm(state());
accessor_asm.LoadIC_BytecodeHandler(&params, &exit_point);
@@ -540,7 +546,7 @@ IGNITION_HANDLER(LdaNamedProperty, InterpreterAssembler) {
// Calls the GetProperty builtin for <object> and the key in the accumulator.
IGNITION_HANDLER(LdaNamedPropertyNoFeedback, InterpreterAssembler) {
TNode<Object> object = LoadRegisterAtOperandIndex(0);
- Node* name = LoadConstantPoolEntryAtOperandIndex(1);
+ TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(1));
TNode<Context> context = GetContext();
TNode<Object> result =
CallBuiltin(Builtins::kGetProperty, context, object, name);
@@ -555,14 +561,14 @@ IGNITION_HANDLER(LdaNamedPropertyNoFeedback, InterpreterAssembler) {
IGNITION_HANDLER(LdaKeyedProperty, InterpreterAssembler) {
TNode<Object> object = LoadRegisterAtOperandIndex(0);
TNode<Object> name = GetAccumulator();
- Node* raw_slot = BytecodeOperandIdx(1);
+ TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(1));
TNode<Smi> smi_slot = SmiTag(raw_slot);
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
- VARIABLE(var_result, MachineRepresentation::kTagged);
- var_result.Bind(CallBuiltin(Builtins::kKeyedLoadIC, context, object, name,
- smi_slot, feedback_vector));
+ TVARIABLE(Object, var_result);
+ var_result = CallBuiltin(Builtins::kKeyedLoadIC, context, object, name,
+ smi_slot, feedback_vector);
SetAccumulator(var_result.value());
Dispatch();
}
@@ -577,16 +583,16 @@ class InterpreterStoreNamedPropertyAssembler : public InterpreterAssembler {
void StaNamedProperty(Callable ic, NamedPropertyType property_type) {
TNode<Code> code_target = HeapConstant(ic.code());
TNode<Object> object = LoadRegisterAtOperandIndex(0);
- Node* name = LoadConstantPoolEntryAtOperandIndex(1);
+ TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(1));
TNode<Object> value = GetAccumulator();
- Node* raw_slot = BytecodeOperandIdx(2);
+ TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(2));
TNode<Smi> smi_slot = SmiTag(raw_slot);
TNode<HeapObject> maybe_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
- VARIABLE(var_result, MachineRepresentation::kTagged);
- var_result.Bind(CallStub(ic.descriptor(), code_target, context, object,
- name, value, smi_slot, maybe_vector));
+ TVARIABLE(Object, var_result);
+ var_result = CallStub(ic.descriptor(), code_target, context, object, name,
+ value, smi_slot, maybe_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
@@ -624,7 +630,7 @@ IGNITION_HANDLER(StaNamedOwnProperty, InterpreterStoreNamedPropertyAssembler) {
IGNITION_HANDLER(StaNamedPropertyNoFeedback,
InterpreterStoreNamedPropertyAssembler) {
TNode<Object> object = LoadRegisterAtOperandIndex(0);
- Node* name = LoadConstantPoolEntryAtOperandIndex(1);
+ TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(1));
TNode<Object> value = GetAccumulator();
TNode<Context> context = GetContext();
@@ -642,14 +648,14 @@ IGNITION_HANDLER(StaKeyedProperty, InterpreterAssembler) {
TNode<Object> object = LoadRegisterAtOperandIndex(0);
TNode<Object> name = LoadRegisterAtOperandIndex(1);
TNode<Object> value = GetAccumulator();
- Node* raw_slot = BytecodeOperandIdx(2);
+ TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(2));
TNode<Smi> smi_slot = SmiTag(raw_slot);
TNode<HeapObject> maybe_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
- VARIABLE(var_result, MachineRepresentation::kTagged);
- var_result.Bind(CallBuiltin(Builtins::kKeyedStoreIC, context, object, name,
- value, smi_slot, maybe_vector));
+ TVARIABLE(Object, var_result);
+ var_result = CallBuiltin(Builtins::kKeyedStoreIC, context, object, name,
+ value, smi_slot, maybe_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
@@ -667,14 +673,14 @@ IGNITION_HANDLER(StaInArrayLiteral, InterpreterAssembler) {
TNode<Object> array = LoadRegisterAtOperandIndex(0);
TNode<Object> index = LoadRegisterAtOperandIndex(1);
TNode<Object> value = GetAccumulator();
- Node* raw_slot = BytecodeOperandIdx(2);
+ TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(2));
TNode<Smi> smi_slot = SmiTag(raw_slot);
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
- VARIABLE(var_result, MachineRepresentation::kTagged);
- var_result.Bind(CallBuiltin(Builtins::kStoreInArrayLiteralIC, context, array,
- index, value, smi_slot, feedback_vector));
+ TVARIABLE(Object, var_result);
+ var_result = CallBuiltin(Builtins::kStoreInArrayLiteralIC, context, array,
+ index, value, smi_slot, feedback_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
@@ -696,8 +702,9 @@ IGNITION_HANDLER(StaDataPropertyInLiteral, InterpreterAssembler) {
TNode<Object> object = LoadRegisterAtOperandIndex(0);
TNode<Object> name = LoadRegisterAtOperandIndex(1);
TNode<Object> value = GetAccumulator();
- TNode<Smi> flags = SmiFromInt32(BytecodeOperandFlag(2));
- TNode<Smi> vector_index = SmiTag(BytecodeOperandIdx(3));
+ TNode<Smi> flags =
+ SmiFromInt32(UncheckedCast<Int32T>(BytecodeOperandFlag(2)));
+ TNode<Smi> vector_index = BytecodeOperandIdxSmi(3);
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
@@ -708,7 +715,7 @@ IGNITION_HANDLER(StaDataPropertyInLiteral, InterpreterAssembler) {
}
IGNITION_HANDLER(CollectTypeProfile, InterpreterAssembler) {
- Node* position = BytecodeOperandImmSmi(0);
+ TNode<Smi> position = BytecodeOperandImmSmi(0);
TNode<Object> value = GetAccumulator();
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
@@ -725,10 +732,10 @@ IGNITION_HANDLER(CollectTypeProfile, InterpreterAssembler) {
// identified by <cell_index>. <depth> is the depth of the current context
// relative to the module context.
IGNITION_HANDLER(LdaModuleVariable, InterpreterAssembler) {
- Node* cell_index = BytecodeOperandImmIntPtr(0);
+ TNode<IntPtrT> cell_index = BytecodeOperandImmIntPtr(0);
TNode<Uint32T> depth = BytecodeOperandUImm(1);
- Node* module_context = GetContextAtDepth(GetContext(), depth);
+ TNode<Context> module_context = GetContextAtDepth(GetContext(), depth);
TNode<SourceTextModule> module =
CAST(LoadContextElement(module_context, Context::EXTENSION_INDEX));
@@ -741,7 +748,7 @@ IGNITION_HANDLER(LdaModuleVariable, InterpreterAssembler) {
TNode<FixedArray> regular_exports = LoadObjectField<FixedArray>(
module, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
- TNode<WordT> export_index = IntPtrSub(cell_index, IntPtrConstant(1));
+ TNode<IntPtrT> export_index = IntPtrSub(cell_index, IntPtrConstant(1));
TNode<Cell> cell =
CAST(LoadFixedArrayElement(regular_exports, export_index));
SetAccumulator(LoadObjectField(cell, Cell::kValueOffset));
@@ -753,7 +760,7 @@ IGNITION_HANDLER(LdaModuleVariable, InterpreterAssembler) {
TNode<FixedArray> regular_imports = LoadObjectField<FixedArray>(
module, SourceTextModule::kRegularImportsOffset);
// The actual array index is (-cell_index - 1).
- TNode<WordT> import_index = IntPtrSub(IntPtrConstant(-1), cell_index);
+ TNode<IntPtrT> import_index = IntPtrSub(IntPtrConstant(-1), cell_index);
TNode<Cell> cell =
CAST(LoadFixedArrayElement(regular_imports, import_index));
SetAccumulator(LoadObjectField(cell, Cell::kValueOffset));
@@ -770,10 +777,10 @@ IGNITION_HANDLER(LdaModuleVariable, InterpreterAssembler) {
// <depth> is the depth of the current context relative to the module context.
IGNITION_HANDLER(StaModuleVariable, InterpreterAssembler) {
TNode<Object> value = GetAccumulator();
- Node* cell_index = BytecodeOperandImmIntPtr(0);
+ TNode<IntPtrT> cell_index = BytecodeOperandImmIntPtr(0);
TNode<Uint32T> depth = BytecodeOperandUImm(1);
- Node* module_context = GetContextAtDepth(GetContext(), depth);
+ TNode<Context> module_context = GetContextAtDepth(GetContext(), depth);
TNode<SourceTextModule> module =
CAST(LoadContextElement(module_context, Context::EXTENSION_INDEX));
@@ -786,7 +793,7 @@ IGNITION_HANDLER(StaModuleVariable, InterpreterAssembler) {
TNode<FixedArray> regular_exports = LoadObjectField<FixedArray>(
module, SourceTextModule::kRegularExportsOffset);
// The actual array index is (cell_index - 1).
- TNode<WordT> export_index = IntPtrSub(cell_index, IntPtrConstant(1));
+ TNode<IntPtrT> export_index = IntPtrSub(cell_index, IntPtrConstant(1));
TNode<Object> cell = LoadFixedArrayElement(regular_exports, export_index);
StoreObjectField(cell, Cell::kValueOffset, value);
Goto(&end);
@@ -830,34 +837,35 @@ class InterpreterBinaryOpAssembler : public InterpreterAssembler {
OperandScale operand_scale)
: InterpreterAssembler(state, bytecode, operand_scale) {}
- using BinaryOpGenerator =
- Node* (BinaryOpAssembler::*)(Node* context, Node* left, Node* right,
- Node* slot, Node* vector, bool lhs_is_smi);
+ using BinaryOpGenerator = TNode<Object> (BinaryOpAssembler::*)(
+ TNode<Context> context, TNode<Object> left, TNode<Object> right,
+ TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
+ bool rhs_known_smi);
void BinaryOpWithFeedback(BinaryOpGenerator generator) {
TNode<Object> lhs = LoadRegisterAtOperandIndex(0);
TNode<Object> rhs = GetAccumulator();
TNode<Context> context = GetContext();
- Node* slot_index = BytecodeOperandIdx(1);
+ TNode<UintPtrT> slot_index = BytecodeOperandIdx(1);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
BinaryOpAssembler binop_asm(state());
- Node* result = (binop_asm.*generator)(context, lhs, rhs, slot_index,
- maybe_feedback_vector, false);
+ TNode<Object> result = (binop_asm.*generator)(context, lhs, rhs, slot_index,
+ maybe_feedback_vector, false);
SetAccumulator(result);
Dispatch();
}
void BinaryOpSmiWithFeedback(BinaryOpGenerator generator) {
TNode<Object> lhs = GetAccumulator();
- Node* rhs = BytecodeOperandImmSmi(0);
+ TNode<Smi> rhs = BytecodeOperandImmSmi(0);
TNode<Context> context = GetContext();
- Node* slot_index = BytecodeOperandIdx(1);
+ TNode<UintPtrT> slot_index = BytecodeOperandIdx(1);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
BinaryOpAssembler binop_asm(state());
- Node* result = (binop_asm.*generator)(context, lhs, rhs, slot_index,
- maybe_feedback_vector, true);
+ TNode<Object> result = (binop_asm.*generator)(context, lhs, rhs, slot_index,
+ maybe_feedback_vector, true);
SetAccumulator(result);
Dispatch();
}
@@ -959,15 +967,15 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
TNode<Object> left = LoadRegisterAtOperandIndex(0);
TNode<Object> right = GetAccumulator();
TNode<Context> context = GetContext();
- Node* slot_index = BytecodeOperandIdx(1);
+ TNode<UintPtrT> slot_index = BytecodeOperandIdx(1);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
TVARIABLE(Smi, var_left_feedback);
TVARIABLE(Smi, var_right_feedback);
- VARIABLE(var_left_word32, MachineRepresentation::kWord32);
- VARIABLE(var_right_word32, MachineRepresentation::kWord32);
- VARIABLE(var_left_bigint, MachineRepresentation::kTagged, left);
- VARIABLE(var_right_bigint, MachineRepresentation::kTagged);
+ TVARIABLE(Word32T, var_left_word32);
+ TVARIABLE(Word32T, var_right_word32);
+ TVARIABLE(Object, var_left_bigint, left);
+ TVARIABLE(Object, var_right_bigint);
Label if_left_number(this), do_number_op(this);
Label if_left_bigint(this), do_bigint_op(this);
@@ -1007,14 +1015,16 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
void BitwiseBinaryOpWithSmi(Operation bitwise_op) {
TNode<Object> left = GetAccumulator();
- Node* right = BytecodeOperandImmSmi(0);
- Node* slot_index = BytecodeOperandIdx(1);
+ TNode<Smi> right = BytecodeOperandImmSmi(0);
+ TNode<UintPtrT> slot_index = BytecodeOperandIdx(1);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
TVARIABLE(Smi, var_left_feedback);
- VARIABLE(var_left_word32, MachineRepresentation::kWord32);
- VARIABLE(var_left_bigint, MachineRepresentation::kTagged);
+ TVARIABLE(Word32T, var_left_word32);
+ // TODO(v8:6949): var_left_bigint should be BigInt, but before that we need
+ // to clean up TaggedToWord32OrBigIntWithFeedback and related methods.
+ TVARIABLE(Object, var_left_bigint);
Label do_smi_op(this), if_bigint_mix(this);
TaggedToWord32OrBigIntWithFeedback(context, left, &do_smi_op,
@@ -1115,13 +1125,15 @@ IGNITION_HANDLER(BitwiseAndSmi, InterpreterBitwiseBinaryOpAssembler) {
// Perform bitwise-not on the accumulator.
IGNITION_HANDLER(BitwiseNot, InterpreterAssembler) {
TNode<Object> operand = GetAccumulator();
- Node* slot_index = BytecodeOperandIdx(0);
+ TNode<UintPtrT> slot_index = BytecodeOperandIdx(0);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
- VARIABLE(var_word32, MachineRepresentation::kWord32);
+ TVARIABLE(Word32T, var_word32);
TVARIABLE(Smi, var_feedback);
- VARIABLE(var_bigint, MachineRepresentation::kTagged);
+ // TODO(v8:6949): var_bigint should be BigInt, but before that we need to
+ // clean up TaggedToWord32OrBigIntWithFeedback and related methods.
+ TVARIABLE(Object, var_bigint);
Label if_number(this), if_bigint(this);
TaggedToWord32OrBigIntWithFeedback(context, operand, &if_number, &var_word32,
&if_bigint, &var_bigint, &var_feedback);
@@ -1184,20 +1196,20 @@ class UnaryNumericOpAssembler : public InterpreterAssembler {
virtual ~UnaryNumericOpAssembler() = default;
// Must return a tagged value.
- virtual TNode<Number> SmiOp(TNode<Smi> smi_value, Variable* var_feedback,
- Label* do_float_op, Variable* var_float) = 0;
+ virtual TNode<Number> SmiOp(TNode<Smi> smi_value,
+ TVariable<Smi>* var_feedback, Label* do_float_op,
+ TVariable<Float64T>* var_float) = 0;
// Must return a Float64 value.
- virtual Node* FloatOp(Node* float_value) = 0;
+ virtual TNode<Float64T> FloatOp(TNode<Float64T> float_value) = 0;
// Must return a tagged value.
- virtual Node* BigIntOp(Node* bigint_value) = 0;
+ virtual TNode<HeapObject> BigIntOp(TNode<HeapObject> bigint_value) = 0;
void UnaryOpWithFeedback() {
- VARIABLE(var_value, MachineRepresentation::kTagged, GetAccumulator());
- VARIABLE(var_result, MachineRepresentation::kTagged);
- VARIABLE(var_float_value, MachineRepresentation::kFloat64);
+ TVARIABLE(Object, var_value, GetAccumulator());
+ TVARIABLE(Object, var_result);
+ TVARIABLE(Float64T, var_float_value);
TVARIABLE(Smi, var_feedback, SmiConstant(BinaryOperationFeedback::kNone));
- Variable* loop_vars[] = {&var_value, &var_feedback};
- Label start(this, arraysize(loop_vars), loop_vars), end(this);
+ Label start(this, {&var_value, &var_feedback}), end(this);
Label do_float_op(this, &var_float_value);
Goto(&start);
// We might have to try again after ToNumeric conversion.
@@ -1206,9 +1218,11 @@ class UnaryNumericOpAssembler : public InterpreterAssembler {
Label if_smi(this), if_heapnumber(this), if_oddball(this);
Label if_bigint(this, Label::kDeferred);
Label if_other(this, Label::kDeferred);
- Node* value = var_value.value();
+ TNode<Object> value = var_value.value();
GotoIf(TaggedIsSmi(value), &if_smi);
- TNode<Map> map = LoadMap(value);
+
+ TNode<HeapObject> value_heap_object = CAST(value);
+ TNode<Map> map = LoadMap(value_heap_object);
GotoIf(IsHeapNumberMap(map), &if_heapnumber);
TNode<Uint16T> instance_type = LoadMapInstanceType(map);
GotoIf(IsBigIntInstanceType(instance_type), &if_bigint);
@@ -1217,20 +1231,20 @@ class UnaryNumericOpAssembler : public InterpreterAssembler {
BIND(&if_smi);
{
- var_result.Bind(
- SmiOp(CAST(value), &var_feedback, &do_float_op, &var_float_value));
+ var_result =
+ SmiOp(CAST(value), &var_feedback, &do_float_op, &var_float_value);
Goto(&end);
}
BIND(&if_heapnumber);
{
- var_float_value.Bind(LoadHeapNumberValue(value));
+ var_float_value = LoadHeapNumberValue(value_heap_object);
Goto(&do_float_op);
}
BIND(&if_bigint);
{
- var_result.Bind(BigIntOp(value));
+ var_result = BigIntOp(value_heap_object);
CombineFeedback(&var_feedback, BinaryOperationFeedback::kBigInt);
Goto(&end);
}
@@ -1244,7 +1258,8 @@ class UnaryNumericOpAssembler : public InterpreterAssembler {
SmiConstant(BinaryOperationFeedback::kNone)));
OverwriteFeedback(&var_feedback,
BinaryOperationFeedback::kNumberOrOddball);
- var_value.Bind(LoadObjectField(value, Oddball::kToNumberOffset));
+ var_value =
+ LoadObjectField(value_heap_object, Oddball::kToNumberOffset);
Goto(&start);
}
@@ -1256,8 +1271,8 @@ class UnaryNumericOpAssembler : public InterpreterAssembler {
CSA_ASSERT(this, SmiEqual(var_feedback.value(),
SmiConstant(BinaryOperationFeedback::kNone)));
OverwriteFeedback(&var_feedback, BinaryOperationFeedback::kAny);
- var_value.Bind(
- CallBuiltin(Builtins::kNonNumberToNumeric, GetContext(), value));
+ var_value = CallBuiltin(Builtins::kNonNumberToNumeric, GetContext(),
+ value_heap_object);
Goto(&start);
}
}
@@ -1265,13 +1280,13 @@ class UnaryNumericOpAssembler : public InterpreterAssembler {
BIND(&do_float_op);
{
CombineFeedback(&var_feedback, BinaryOperationFeedback::kNumber);
- var_result.Bind(
- AllocateHeapNumberWithValue(FloatOp(var_float_value.value())));
+ var_result =
+ AllocateHeapNumberWithValue(FloatOp(var_float_value.value()));
Goto(&end);
}
BIND(&end);
- Node* slot_index = BytecodeOperandIdx(0);
+ TNode<UintPtrT> slot_index = BytecodeOperandIdx(0);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
UpdateFeedback(var_feedback.value(), maybe_feedback_vector, slot_index);
SetAccumulator(var_result.value());
@@ -1285,8 +1300,9 @@ class NegateAssemblerImpl : public UnaryNumericOpAssembler {
OperandScale operand_scale)
: UnaryNumericOpAssembler(state, bytecode, operand_scale) {}
- TNode<Number> SmiOp(TNode<Smi> smi_value, Variable* var_feedback,
- Label* do_float_op, Variable* var_float) override {
+ TNode<Number> SmiOp(TNode<Smi> smi_value, TVariable<Smi>* var_feedback,
+ Label* do_float_op,
+ TVariable<Float64T>* var_float) override {
TVARIABLE(Number, var_result);
Label if_zero(this), if_min_smi(this), end(this);
// Return -0 if operand is 0.
@@ -1306,18 +1322,20 @@ class NegateAssemblerImpl : public UnaryNumericOpAssembler {
Goto(&end);
BIND(&if_min_smi);
- var_float->Bind(SmiToFloat64(smi_value));
+ *var_float = SmiToFloat64(smi_value);
Goto(do_float_op);
BIND(&end);
return var_result.value();
}
- Node* FloatOp(Node* float_value) override { return Float64Neg(float_value); }
+ TNode<Float64T> FloatOp(TNode<Float64T> float_value) override {
+ return Float64Neg(float_value);
+ }
- Node* BigIntOp(Node* bigint_value) override {
- return CallRuntime(Runtime::kBigIntUnaryOp, GetContext(), bigint_value,
- SmiConstant(Operation::kNegate));
+ TNode<HeapObject> BigIntOp(TNode<HeapObject> bigint_value) override {
+ return CAST(CallRuntime(Runtime::kBigIntUnaryOp, GetContext(), bigint_value,
+ SmiConstant(Operation::kNegate)));
}
};
@@ -1381,8 +1399,9 @@ class IncDecAssembler : public UnaryNumericOpAssembler {
return op_;
}
- TNode<Number> SmiOp(TNode<Smi> value, Variable* var_feedback,
- Label* do_float_op, Variable* var_float) override {
+ TNode<Number> SmiOp(TNode<Smi> value, TVariable<Smi>* var_feedback,
+ Label* do_float_op,
+ TVariable<Float64T>* var_float) override {
TNode<Smi> one = SmiConstant(1);
Label if_overflow(this), if_notoverflow(this);
TNode<Smi> result = op() == Operation::kIncrement
@@ -1392,7 +1411,7 @@ class IncDecAssembler : public UnaryNumericOpAssembler {
BIND(&if_overflow);
{
- var_float->Bind(SmiToFloat64(value));
+ *var_float = SmiToFloat64(value);
Goto(do_float_op);
}
@@ -1401,15 +1420,15 @@ class IncDecAssembler : public UnaryNumericOpAssembler {
return result;
}
- Node* FloatOp(Node* float_value) override {
+ TNode<Float64T> FloatOp(TNode<Float64T> float_value) override {
return op() == Operation::kIncrement
? Float64Add(float_value, Float64Constant(1.0))
: Float64Sub(float_value, Float64Constant(1.0));
}
- Node* BigIntOp(Node* bigint_value) override {
- return CallRuntime(Runtime::kBigIntUnaryOp, GetContext(), bigint_value,
- SmiConstant(op()));
+ TNode<HeapObject> BigIntOp(TNode<HeapObject> bigint_value) override {
+ return CAST(CallRuntime(Runtime::kBigIntUnaryOp, GetContext(), bigint_value,
+ SmiConstant(op())));
}
void IncWithFeedback() {
@@ -1442,17 +1461,17 @@ IGNITION_HANDLER(Dec, IncDecAssembler) { DecWithFeedback(); }
// accumulator to a boolean value if required.
IGNITION_HANDLER(ToBooleanLogicalNot, InterpreterAssembler) {
TNode<Object> value = GetAccumulator();
- Variable result(this, MachineRepresentation::kTagged);
+ TVARIABLE(Oddball, result);
Label if_true(this), if_false(this), end(this);
BranchIfToBooleanIsTrue(value, &if_true, &if_false);
BIND(&if_true);
{
- result.Bind(FalseConstant());
+ result = FalseConstant();
Goto(&end);
}
BIND(&if_false);
{
- result.Bind(TrueConstant());
+ result = TrueConstant();
Goto(&end);
}
BIND(&end);
@@ -1466,20 +1485,20 @@ IGNITION_HANDLER(ToBooleanLogicalNot, InterpreterAssembler) {
// value.
IGNITION_HANDLER(LogicalNot, InterpreterAssembler) {
TNode<Object> value = GetAccumulator();
- Variable result(this, MachineRepresentation::kTagged);
+ TVARIABLE(Oddball, result);
Label if_true(this), if_false(this), end(this);
TNode<Oddball> true_value = TrueConstant();
TNode<Oddball> false_value = FalseConstant();
Branch(TaggedEqual(value, true_value), &if_true, &if_false);
BIND(&if_true);
{
- result.Bind(false_value);
+ result = false_value;
Goto(&end);
}
BIND(&if_false);
{
CSA_ASSERT(this, TaggedEqual(value, false_value));
- result.Bind(true_value);
+ result = true_value;
Goto(&end);
}
BIND(&end);
@@ -1493,7 +1512,7 @@ IGNITION_HANDLER(LogicalNot, InterpreterAssembler) {
// object in the accumulator.
IGNITION_HANDLER(TypeOf, InterpreterAssembler) {
TNode<Object> value = GetAccumulator();
- Node* result = Typeof(value);
+ TNode<String> result = Typeof(value);
SetAccumulator(result);
Dispatch();
}
@@ -1550,7 +1569,7 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
void JSCall(ConvertReceiverMode receiver_mode) {
TNode<Object> function = LoadRegisterAtOperandIndex(0);
RegListNodePair args = GetRegisterListAtOperandIndex(1);
- Node* slot_id = BytecodeOperandIdx(3);
+ TNode<UintPtrT> slot_id = BytecodeOperandIdx(3);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
@@ -1583,7 +1602,7 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
kFirstArgumentOperandIndex + kRecieverAndArgOperandCount;
TNode<Object> function = LoadRegisterAtOperandIndex(0);
- Node* slot_id = BytecodeOperandIdx(kSlotOperandIndex);
+ TNode<UintPtrT> slot_id = BytecodeOperandIdx(kSlotOperandIndex);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
@@ -1598,26 +1617,20 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
case 1:
CallJSAndDispatch(
function, context, Int32Constant(arg_count), receiver_mode,
- static_cast<Node*>(
- LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex)));
+ LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex));
break;
case 2:
CallJSAndDispatch(
function, context, Int32Constant(arg_count), receiver_mode,
- static_cast<Node*>(
- LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex)),
- static_cast<Node*>(
- LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1)));
+ LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex),
+ LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1));
break;
case 3:
CallJSAndDispatch(
function, context, Int32Constant(arg_count), receiver_mode,
- static_cast<Node*>(
- LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex)),
- static_cast<Node*>(
- LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1)),
- static_cast<Node*>(
- LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 2)));
+ LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex),
+ LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1),
+ LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 2));
break;
default:
UNREACHABLE();
@@ -1676,7 +1689,7 @@ IGNITION_HANDLER(CallNoFeedback, InterpreterJSCallAssembler) {
// register |first_arg| and |arg_count| arguments in subsequent
// registers.
IGNITION_HANDLER(CallRuntime, InterpreterAssembler) {
- Node* function_id = BytecodeOperandRuntimeId(0);
+ TNode<Uint32T> function_id = BytecodeOperandRuntimeId(0);
RegListNodePair args = GetRegisterListAtOperandIndex(1);
TNode<Context> context = GetContext();
Node* result = CallRuntimeN(function_id, context, args);
@@ -1690,10 +1703,11 @@ IGNITION_HANDLER(CallRuntime, InterpreterAssembler) {
// |function_id| with the first argument in |first_arg| and |arg_count|
// arguments in subsequent registers.
IGNITION_HANDLER(InvokeIntrinsic, InterpreterAssembler) {
- Node* function_id = BytecodeOperandIntrinsicId(0);
+ TNode<Uint32T> function_id = BytecodeOperandIntrinsicId(0);
RegListNodePair args = GetRegisterListAtOperandIndex(1);
TNode<Context> context = GetContext();
- Node* result = GenerateInvokeIntrinsic(this, function_id, context, args);
+ TNode<Object> result =
+ GenerateInvokeIntrinsic(this, function_id, context, args);
SetAccumulator(result);
Dispatch();
}
@@ -1706,13 +1720,13 @@ IGNITION_HANDLER(InvokeIntrinsic, InterpreterAssembler) {
// <first_return + 1>
IGNITION_HANDLER(CallRuntimeForPair, InterpreterAssembler) {
// Call the runtime function.
- Node* function_id = BytecodeOperandRuntimeId(0);
+ TNode<Uint32T> function_id = BytecodeOperandRuntimeId(0);
RegListNodePair args = GetRegisterListAtOperandIndex(1);
TNode<Context> context = GetContext();
Node* result_pair = CallRuntimeN(function_id, context, args, 2);
// Store the results in <first_return> and <first_return + 1>
- Node* result0 = Projection(0, result_pair);
- Node* result1 = Projection(1, result_pair);
+ TNode<Object> result0 = CAST(Projection(0, result_pair));
+ TNode<Object> result1 = CAST(Projection(1, result_pair));
StoreRegisterPairAtOperandIndex(result0, result1, 3);
Dispatch();
}
@@ -1722,12 +1736,12 @@ IGNITION_HANDLER(CallRuntimeForPair, InterpreterAssembler) {
// Call the JS runtime function that has the |context_index| with the receiver
// in register |receiver| and |arg_count| arguments in subsequent registers.
IGNITION_HANDLER(CallJSRuntime, InterpreterAssembler) {
- Node* context_index = BytecodeOperandNativeContextIndex(0);
+ TNode<IntPtrT> context_index = Signed(BytecodeOperandNativeContextIndex(0));
RegListNodePair args = GetRegisterListAtOperandIndex(1);
// Get the function to call from the native context.
TNode<Context> context = GetContext();
- TNode<Context> native_context = LoadNativeContext(context);
+ TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<Object> function = LoadContextElement(native_context, context_index);
// Call the function.
@@ -1744,7 +1758,7 @@ IGNITION_HANDLER(CallJSRuntime, InterpreterAssembler) {
IGNITION_HANDLER(CallWithSpread, InterpreterAssembler) {
TNode<Object> callable = LoadRegisterAtOperandIndex(0);
RegListNodePair args = GetRegisterListAtOperandIndex(1);
- Node* slot_id = BytecodeOperandIdx(3);
+ TNode<UintPtrT> slot_id = BytecodeOperandIdx(3);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
@@ -1763,11 +1777,11 @@ IGNITION_HANDLER(ConstructWithSpread, InterpreterAssembler) {
TNode<Object> new_target = GetAccumulator();
TNode<Object> constructor = LoadRegisterAtOperandIndex(0);
RegListNodePair args = GetRegisterListAtOperandIndex(1);
- Node* slot_id = BytecodeOperandIdx(3);
- TNode<HeapObject> feedback_vector = LoadFeedbackVector();
+ TNode<UintPtrT> slot_id = BytecodeOperandIdx(3);
+ TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
- Node* result = ConstructWithSpread(constructor, context, new_target, args,
- slot_id, feedback_vector);
+ TNode<Object> result = ConstructWithSpread(
+ constructor, context, new_target, args, slot_id, maybe_feedback_vector);
SetAccumulator(result);
Dispatch();
}
@@ -1782,11 +1796,11 @@ IGNITION_HANDLER(Construct, InterpreterAssembler) {
TNode<Object> new_target = GetAccumulator();
TNode<Object> constructor = LoadRegisterAtOperandIndex(0);
RegListNodePair args = GetRegisterListAtOperandIndex(1);
- Node* slot_id = BytecodeOperandIdx(3);
- TNode<HeapObject> feedback_vector = LoadFeedbackVector();
+ TNode<UintPtrT> slot_id = BytecodeOperandIdx(3);
+ TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
- Node* result = Construct(constructor, context, new_target, args, slot_id,
- feedback_vector);
+ TNode<Object> result = Construct(constructor, context, new_target, args,
+ slot_id, maybe_feedback_vector);
SetAccumulator(result);
Dispatch();
}
@@ -1802,8 +1816,8 @@ class InterpreterCompareOpAssembler : public InterpreterAssembler {
TNode<Object> rhs = GetAccumulator();
TNode<Context> context = GetContext();
- Variable var_type_feedback(this, MachineRepresentation::kTagged);
- Node* result;
+ TVARIABLE(Smi, var_type_feedback);
+ TNode<Oddball> result;
switch (compare_op) {
case Operation::kEqual:
result = Equal(lhs, rhs, context, &var_type_feedback);
@@ -1822,7 +1836,7 @@ class InterpreterCompareOpAssembler : public InterpreterAssembler {
UNREACHABLE();
}
- Node* slot_index = BytecodeOperandIdx(1);
+ TNode<UintPtrT> slot_index = BytecodeOperandIdx(1);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector,
slot_index);
@@ -1894,14 +1908,14 @@ IGNITION_HANDLER(TestReferenceEqual, InterpreterAssembler) {
IGNITION_HANDLER(TestIn, InterpreterAssembler) {
TNode<Object> name = LoadRegisterAtOperandIndex(0);
TNode<Object> object = GetAccumulator();
- Node* raw_slot = BytecodeOperandIdx(1);
+ TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(1));
TNode<Smi> smi_slot = SmiTag(raw_slot);
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
- VARIABLE(var_result, MachineRepresentation::kTagged);
- var_result.Bind(CallBuiltin(Builtins::kKeyedHasIC, context, object, name,
- smi_slot, feedback_vector));
+ TVARIABLE(Object, var_result);
+ var_result = CallBuiltin(Builtins::kKeyedHasIC, context, object, name,
+ smi_slot, feedback_vector);
SetAccumulator(var_result.value());
Dispatch();
}
@@ -1913,15 +1927,16 @@ IGNITION_HANDLER(TestIn, InterpreterAssembler) {
IGNITION_HANDLER(TestInstanceOf, InterpreterAssembler) {
TNode<Object> object = LoadRegisterAtOperandIndex(0);
TNode<Object> callable = GetAccumulator();
- Node* slot_id = BytecodeOperandIdx(1);
- TNode<HeapObject> feedback_vector = LoadFeedbackVector();
+ TNode<UintPtrT> slot_id = BytecodeOperandIdx(1);
+ TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
Label feedback_done(this);
- GotoIf(IsUndefined(feedback_vector), &feedback_done);
+ GotoIf(IsUndefined(maybe_feedback_vector), &feedback_done);
// Record feedback for the {callable} in the {feedback_vector}.
- CollectCallableFeedback(callable, context, feedback_vector, slot_id);
+ CollectCallableFeedback(callable, context, CAST(maybe_feedback_vector),
+ slot_id);
Goto(&feedback_done);
BIND(&feedback_done);
@@ -1980,7 +1995,7 @@ IGNITION_HANDLER(TestUndefined, InterpreterAssembler) {
// by |literal_flag|.
IGNITION_HANDLER(TestTypeOf, InterpreterAssembler) {
TNode<Object> object = GetAccumulator();
- Node* literal_flag = BytecodeOperandFlag(0);
+ TNode<Uint32T> literal_flag = BytecodeOperandFlag(0);
#define MAKE_LABEL(name, lower_case) Label if_##lower_case(this);
TYPEOF_LITERAL_LIST(MAKE_LABEL)
@@ -2097,7 +2112,7 @@ IGNITION_HANDLER(TestTypeOf, InterpreterAssembler) {
//
// Jump by the number of bytes represented by the immediate operand |imm|.
IGNITION_HANDLER(Jump, InterpreterAssembler) {
- Node* relative_jump = BytecodeOperandUImmWord(0);
+ TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
Jump(relative_jump);
}
@@ -2117,7 +2132,7 @@ IGNITION_HANDLER(JumpConstant, InterpreterAssembler) {
// will misbehave if passed arbitrary input values.
IGNITION_HANDLER(JumpIfTrue, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
- Node* relative_jump = BytecodeOperandUImmWord(0);
+ TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
CSA_ASSERT(this, IsBoolean(CAST(accumulator)));
JumpIfTaggedEqual(accumulator, TrueConstant(), relative_jump);
}
@@ -2141,7 +2156,7 @@ IGNITION_HANDLER(JumpIfTrueConstant, InterpreterAssembler) {
// will misbehave if passed arbitrary input values.
IGNITION_HANDLER(JumpIfFalse, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
- Node* relative_jump = BytecodeOperandUImmWord(0);
+ TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
CSA_ASSERT(this, IsBoolean(CAST(accumulator)));
JumpIfTaggedEqual(accumulator, FalseConstant(), relative_jump);
}
@@ -2164,7 +2179,7 @@ IGNITION_HANDLER(JumpIfFalseConstant, InterpreterAssembler) {
// referenced by the accumulator is true when the object is cast to boolean.
IGNITION_HANDLER(JumpIfToBooleanTrue, InterpreterAssembler) {
TNode<Object> value = GetAccumulator();
- Node* relative_jump = BytecodeOperandUImmWord(0);
+ TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
Label if_true(this), if_false(this);
BranchIfToBooleanIsTrue(value, &if_true, &if_false);
BIND(&if_true);
@@ -2195,7 +2210,7 @@ IGNITION_HANDLER(JumpIfToBooleanTrueConstant, InterpreterAssembler) {
// referenced by the accumulator is false when the object is cast to boolean.
IGNITION_HANDLER(JumpIfToBooleanFalse, InterpreterAssembler) {
TNode<Object> value = GetAccumulator();
- Node* relative_jump = BytecodeOperandUImmWord(0);
+ TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
Label if_true(this), if_false(this);
BranchIfToBooleanIsTrue(value, &if_true, &if_false);
BIND(&if_true);
@@ -2226,7 +2241,7 @@ IGNITION_HANDLER(JumpIfToBooleanFalseConstant, InterpreterAssembler) {
// referenced by the accumulator is the null constant.
IGNITION_HANDLER(JumpIfNull, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
- Node* relative_jump = BytecodeOperandUImmWord(0);
+ TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
JumpIfTaggedEqual(accumulator, NullConstant(), relative_jump);
}
@@ -2246,7 +2261,7 @@ IGNITION_HANDLER(JumpIfNullConstant, InterpreterAssembler) {
// referenced by the accumulator is not the null constant.
IGNITION_HANDLER(JumpIfNotNull, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
- Node* relative_jump = BytecodeOperandUImmWord(0);
+ TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
JumpIfTaggedNotEqual(accumulator, NullConstant(), relative_jump);
}
@@ -2266,7 +2281,7 @@ IGNITION_HANDLER(JumpIfNotNullConstant, InterpreterAssembler) {
// referenced by the accumulator is the undefined constant.
IGNITION_HANDLER(JumpIfUndefined, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
- Node* relative_jump = BytecodeOperandUImmWord(0);
+ TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
JumpIfTaggedEqual(accumulator, UndefinedConstant(), relative_jump);
}
@@ -2286,7 +2301,7 @@ IGNITION_HANDLER(JumpIfUndefinedConstant, InterpreterAssembler) {
// referenced by the accumulator is not the undefined constant.
IGNITION_HANDLER(JumpIfNotUndefined, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
- Node* relative_jump = BytecodeOperandUImmWord(0);
+ TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
JumpIfTaggedNotEqual(accumulator, UndefinedConstant(), relative_jump);
}
@@ -2314,7 +2329,7 @@ IGNITION_HANDLER(JumpIfUndefinedOrNull, InterpreterAssembler) {
Dispatch();
BIND(&do_jump);
- Node* relative_jump = BytecodeOperandUImmWord(0);
+ TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
Jump(relative_jump);
}
@@ -2342,7 +2357,7 @@ IGNITION_HANDLER(JumpIfUndefinedOrNullConstant, InterpreterAssembler) {
// referenced by the accumulator is a JSReceiver.
IGNITION_HANDLER(JumpIfJSReceiver, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
- Node* relative_jump = BytecodeOperandUImmWord(0);
+ TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
Label if_object(this), if_notobject(this, Label::kDeferred), if_notsmi(this);
Branch(TaggedIsSmi(accumulator), &if_notobject, &if_notsmi);
@@ -2383,9 +2398,9 @@ IGNITION_HANDLER(JumpIfJSReceiverConstant, InterpreterAssembler) {
// performs a loop nesting check and potentially triggers OSR in case the
// current OSR level matches (or exceeds) the specified |loop_depth|.
IGNITION_HANDLER(JumpLoop, InterpreterAssembler) {
- Node* relative_jump = BytecodeOperandUImmWord(0);
- Node* loop_depth = BytecodeOperandImm(1);
- Node* osr_level = LoadOsrNestingLevel();
+ TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
+ TNode<Int32T> loop_depth = BytecodeOperandImm(1);
+ TNode<Int8T> osr_level = LoadOsrNestingLevel();
// Check if OSR points at the given {loop_depth} are armed by comparing it to
// the current {osr_level} loaded from the header of the BytecodeArray.
@@ -2415,9 +2430,9 @@ IGNITION_HANDLER(JumpLoop, InterpreterAssembler) {
// next bytecode.
IGNITION_HANDLER(SwitchOnSmiNoFeedback, InterpreterAssembler) {
TNode<Object> acc = GetAccumulator();
- Node* table_start = BytecodeOperandIdx(0);
- Node* table_length = BytecodeOperandUImmWord(1);
- Node* case_value_base = BytecodeOperandImmIntPtr(2);
+ TNode<UintPtrT> table_start = BytecodeOperandIdx(0);
+ TNode<UintPtrT> table_length = BytecodeOperandUImmWord(1);
+ TNode<IntPtrT> case_value_base = BytecodeOperandImmIntPtr(2);
Label fall_through(this);
@@ -2426,7 +2441,7 @@ IGNITION_HANDLER(SwitchOnSmiNoFeedback, InterpreterAssembler) {
// accumulator values.
CSA_ASSERT(this, TaggedIsSmi(acc));
- TNode<WordT> case_value = IntPtrSub(SmiUntag(CAST(acc)), case_value_base);
+ TNode<IntPtrT> case_value = IntPtrSub(SmiUntag(CAST(acc)), case_value_base);
GotoIf(IntPtrLessThan(case_value, IntPtrConstant(0)), &fall_through);
GotoIf(IntPtrGreaterThanOrEqual(case_value, table_length), &fall_through);
TNode<WordT> entry = IntPtrAdd(table_start, case_value);
@@ -2442,17 +2457,18 @@ IGNITION_HANDLER(SwitchOnSmiNoFeedback, InterpreterAssembler) {
// Creates a regular expression literal for literal index <literal_idx> with
// <flags> and the pattern in <pattern_idx>.
IGNITION_HANDLER(CreateRegExpLiteral, InterpreterAssembler) {
- Node* pattern = LoadConstantPoolEntryAtOperandIndex(0);
+ TNode<Object> pattern = LoadConstantPoolEntryAtOperandIndex(0);
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
- Node* slot_id = BytecodeOperandIdx(1);
- TNode<Smi> flags = SmiFromInt32(BytecodeOperandFlag(2));
+ TNode<UintPtrT> slot_id = BytecodeOperandIdx(1);
+ TNode<Smi> flags =
+ SmiFromInt32(UncheckedCast<Int32T>(BytecodeOperandFlag(2)));
TNode<Context> context = GetContext();
- VARIABLE(result, MachineRepresentation::kTagged);
+ TVARIABLE(JSRegExp, result);
ConstructorBuiltinsAssembler constructor_assembler(state());
- result.Bind(constructor_assembler.EmitCreateRegExpLiteral(
- feedback_vector, slot_id, pattern, flags, context));
+ result = constructor_assembler.EmitCreateRegExpLiteral(
+ feedback_vector, slot_id, pattern, flags, context);
SetAccumulator(result.value());
Dispatch();
}
@@ -2463,9 +2479,9 @@ IGNITION_HANDLER(CreateRegExpLiteral, InterpreterAssembler) {
// CreateArrayLiteral flags <flags> and constant elements in <element_idx>.
IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) {
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
- Node* slot_id = BytecodeOperandIdx(1);
+ TNode<UintPtrT> slot_id = BytecodeOperandIdx(1);
TNode<Context> context = GetContext();
- Node* bytecode_flags = BytecodeOperandFlag(2);
+ TNode<Uint32T> bytecode_flags = BytecodeOperandFlag(2);
Label fast_shallow_clone(this), call_runtime(this, Label::kDeferred);
// No feedback, so handle it as a slow case.
@@ -2478,8 +2494,8 @@ IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) {
BIND(&fast_shallow_clone);
{
ConstructorBuiltinsAssembler constructor_assembler(state());
- Node* result = constructor_assembler.EmitCreateShallowArrayLiteral(
- feedback_vector, slot_id, context, &call_runtime,
+ TNode<JSArray> result = constructor_assembler.EmitCreateShallowArrayLiteral(
+ CAST(feedback_vector), slot_id, context, &call_runtime,
TRACK_ALLOCATION_SITE);
SetAccumulator(result);
Dispatch();
@@ -2487,14 +2503,14 @@ IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) {
BIND(&call_runtime);
{
- TNode<WordT> flags_raw =
+ TNode<UintPtrT> flags_raw =
DecodeWordFromWord32<CreateArrayLiteralFlags::FlagsBits>(
bytecode_flags);
TNode<Smi> flags = SmiTag(Signed(flags_raw));
- Node* constant_elements = LoadConstantPoolEntryAtOperandIndex(0);
+ TNode<Object> constant_elements = LoadConstantPoolEntryAtOperandIndex(0);
TNode<Object> result =
CallRuntime(Runtime::kCreateArrayLiteral, context, feedback_vector,
- SmiTag(slot_id), constant_elements, flags);
+ SmiTag(Signed(slot_id)), constant_elements, flags);
SetAccumulator(result);
Dispatch();
}
@@ -2504,26 +2520,26 @@ IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) {
//
// Creates an empty JSArray literal for literal index <literal_idx>.
IGNITION_HANDLER(CreateEmptyArrayLiteral, InterpreterAssembler) {
- TNode<HeapObject> feedback_vector = LoadFeedbackVector();
- Node* slot_id = BytecodeOperandIdx(0);
+ TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
+ TNode<UintPtrT> slot_id = BytecodeOperandIdx(0);
TNode<Context> context = GetContext();
Label no_feedback(this, Label::kDeferred), end(this);
- VARIABLE(result, MachineRepresentation::kTagged);
- GotoIf(IsUndefined(feedback_vector), &no_feedback);
+ TVARIABLE(JSArray, result);
+ GotoIf(IsUndefined(maybe_feedback_vector), &no_feedback);
ConstructorBuiltinsAssembler constructor_assembler(state());
- result.Bind(constructor_assembler.EmitCreateEmptyArrayLiteral(
- feedback_vector, slot_id, context));
+ result = constructor_assembler.EmitCreateEmptyArrayLiteral(
+ CAST(maybe_feedback_vector), slot_id, context);
Goto(&end);
BIND(&no_feedback);
{
TNode<Map> array_map = LoadJSArrayElementsMap(GetInitialFastElementsKind(),
LoadNativeContext(context));
- result.Bind(AllocateJSArray(GetInitialFastElementsKind(), array_map,
- SmiConstant(0), SmiConstant(0), nullptr,
- ParameterMode::SMI_PARAMETERS));
+ result =
+ AllocateJSArray(GetInitialFastElementsKind(), array_map, SmiConstant(0),
+ SmiConstant(0), {}, ParameterMode::SMI_PARAMETERS);
Goto(&end);
}
@@ -2551,8 +2567,8 @@ IGNITION_HANDLER(CreateArrayFromIterable, InterpreterAssembler) {
// CreateObjectLiteralFlags <flags> and constant elements in <element_idx>.
IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
- Node* slot_id = BytecodeOperandIdx(1);
- Node* bytecode_flags = BytecodeOperandFlag(2);
+ TNode<UintPtrT> slot_id = BytecodeOperandIdx(1);
+ TNode<Uint32T> bytecode_flags = BytecodeOperandFlag(2);
Label if_fast_clone(this), if_not_fast_clone(this, Label::kDeferred);
// No feedback, so handle it as a slow case.
@@ -2567,8 +2583,9 @@ IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
{
// If we can do a fast clone do the fast-path in CreateShallowObjectLiteral.
ConstructorBuiltinsAssembler constructor_assembler(state());
- Node* result = constructor_assembler.EmitCreateShallowObjectLiteral(
- feedback_vector, slot_id, &if_not_fast_clone);
+ TNode<HeapObject> result =
+ constructor_assembler.EmitCreateShallowObjectLiteral(
+ CAST(feedback_vector), slot_id, &if_not_fast_clone);
SetAccumulator(result);
Dispatch();
}
@@ -2576,18 +2593,18 @@ IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
BIND(&if_not_fast_clone);
{
// If we can't do a fast clone, call into the runtime.
- Node* object_boilerplate_description =
- LoadConstantPoolEntryAtOperandIndex(0);
+ TNode<ObjectBoilerplateDescription> object_boilerplate_description =
+ CAST(LoadConstantPoolEntryAtOperandIndex(0));
TNode<Context> context = GetContext();
- TNode<WordT> flags_raw =
+ TNode<UintPtrT> flags_raw =
DecodeWordFromWord32<CreateObjectLiteralFlags::FlagsBits>(
bytecode_flags);
TNode<Smi> flags = SmiTag(Signed(flags_raw));
- TNode<Object> result =
- CallRuntime(Runtime::kCreateObjectLiteral, context, feedback_vector,
- SmiTag(slot_id), object_boilerplate_description, flags);
+ TNode<Object> result = CallRuntime(Runtime::kCreateObjectLiteral, context,
+ feedback_vector, SmiTag(Signed(slot_id)),
+ object_boilerplate_description, flags);
SetAccumulator(result);
// TODO(klaasb) build a single dispatch once the call is inlined
Dispatch();
@@ -2600,7 +2617,8 @@ IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
IGNITION_HANDLER(CreateEmptyObjectLiteral, InterpreterAssembler) {
TNode<Context> context = GetContext();
ConstructorBuiltinsAssembler constructor_assembler(state());
- Node* result = constructor_assembler.EmitCreateEmptyObjectLiteral(context);
+ TNode<JSObject> result =
+ constructor_assembler.EmitCreateEmptyObjectLiteral(context);
SetAccumulator(result);
Dispatch();
}
@@ -2611,18 +2629,18 @@ IGNITION_HANDLER(CreateEmptyObjectLiteral, InterpreterAssembler) {
// {source}, converting getters into data properties.
IGNITION_HANDLER(CloneObject, InterpreterAssembler) {
TNode<Object> source = LoadRegisterAtOperandIndex(0);
- Node* bytecode_flags = BytecodeOperandFlag(1);
- TNode<WordT> raw_flags =
+ TNode<Uint32T> bytecode_flags = BytecodeOperandFlag(1);
+ TNode<UintPtrT> raw_flags =
DecodeWordFromWord32<CreateObjectLiteralFlags::FlagsBits>(bytecode_flags);
TNode<Smi> smi_flags = SmiTag(Signed(raw_flags));
- Node* raw_slot = BytecodeOperandIdx(2);
+ TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(2));
TNode<Smi> smi_slot = SmiTag(raw_slot);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
- Variable var_result(this, MachineRepresentation::kTagged);
- var_result.Bind(CallBuiltin(Builtins::kCloneObjectIC, context, source,
- smi_flags, smi_slot, maybe_feedback_vector));
+ TVARIABLE(Object, var_result);
+ var_result = CallBuiltin(Builtins::kCloneObjectIC, context, source, smi_flags,
+ smi_slot, maybe_feedback_vector);
SetAccumulator(var_result.value());
Dispatch();
}
@@ -2633,14 +2651,14 @@ IGNITION_HANDLER(CloneObject, InterpreterAssembler) {
// accumulator, creating and caching the site object on-demand as per the
// specification.
IGNITION_HANDLER(GetTemplateObject, InterpreterAssembler) {
- TNode<HeapObject> feedback_vector = LoadFeedbackVector();
- Node* slot = BytecodeOperandIdx(1);
+ TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
+ TNode<UintPtrT> slot = BytecodeOperandIdx(1);
Label call_runtime(this, Label::kDeferred);
- GotoIf(IsUndefined(feedback_vector), &call_runtime);
+ GotoIf(IsUndefined(maybe_feedback_vector), &call_runtime);
TNode<Object> cached_value =
- CAST(LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS));
+ CAST(LoadFeedbackVectorSlot(CAST(maybe_feedback_vector), slot));
GotoIf(TaggedEqual(cached_value, SmiConstant(0)), &call_runtime);
@@ -2649,8 +2667,8 @@ IGNITION_HANDLER(GetTemplateObject, InterpreterAssembler) {
BIND(&call_runtime);
{
- Node* description = LoadConstantPoolEntryAtOperandIndex(0);
- TNode<Smi> slot_smi = SmiTag(slot);
+ TNode<Object> description = LoadConstantPoolEntryAtOperandIndex(0);
+ TNode<Smi> slot_smi = SmiTag(Signed(slot));
TNode<JSFunction> closure =
CAST(LoadRegister(Register::function_closure()));
TNode<SharedFunctionInfo> shared_info = LoadObjectField<SharedFunctionInfo>(
@@ -2660,8 +2678,8 @@ IGNITION_HANDLER(GetTemplateObject, InterpreterAssembler) {
description, shared_info, slot_smi);
Label end(this);
- GotoIf(IsUndefined(feedback_vector), &end);
- StoreFeedbackVectorSlot(feedback_vector, slot, result);
+ GotoIf(IsUndefined(maybe_feedback_vector), &end);
+ StoreFeedbackVectorSlot(CAST(maybe_feedback_vector), slot, result);
Goto(&end);
Bind(&end);
@@ -2675,10 +2693,10 @@ IGNITION_HANDLER(GetTemplateObject, InterpreterAssembler) {
// Creates a new closure for SharedFunctionInfo at position |index| in the
// constant pool and with pretenuring controlled by |flags|.
IGNITION_HANDLER(CreateClosure, InterpreterAssembler) {
- Node* shared = LoadConstantPoolEntryAtOperandIndex(0);
- Node* flags = BytecodeOperandFlag(2);
+ TNode<Object> shared = LoadConstantPoolEntryAtOperandIndex(0);
+ TNode<Uint32T> flags = BytecodeOperandFlag(2);
TNode<Context> context = GetContext();
- Node* slot = BytecodeOperandIdx(1);
+ TNode<UintPtrT> slot = BytecodeOperandIdx(1);
Label if_undefined(this);
TNode<ClosureFeedbackCellArray> feedback_cell_array =
@@ -2727,7 +2745,7 @@ IGNITION_HANDLER(CreateClosure, InterpreterAssembler) {
//
// Creates a new block context with the scope info constant at |index|.
IGNITION_HANDLER(CreateBlockContext, InterpreterAssembler) {
- Node* scope_info = LoadConstantPoolEntryAtOperandIndex(0);
+ TNode<ScopeInfo> scope_info = CAST(LoadConstantPoolEntryAtOperandIndex(0));
TNode<Context> context = GetContext();
SetAccumulator(CallRuntime(Runtime::kPushBlockContext, context, scope_info));
Dispatch();
@@ -2739,7 +2757,7 @@ IGNITION_HANDLER(CreateBlockContext, InterpreterAssembler) {
// and the ScopeInfo at |scope_info_idx|.
IGNITION_HANDLER(CreateCatchContext, InterpreterAssembler) {
TNode<Object> exception = LoadRegisterAtOperandIndex(0);
- Node* scope_info = LoadConstantPoolEntryAtOperandIndex(1);
+ TNode<ScopeInfo> scope_info = CAST(LoadConstantPoolEntryAtOperandIndex(1));
TNode<Context> context = GetContext();
SetAccumulator(
CallRuntime(Runtime::kPushCatchContext, context, exception, scope_info));
@@ -2750,8 +2768,8 @@ IGNITION_HANDLER(CreateCatchContext, InterpreterAssembler) {
//
// Creates a new context with number of |slots| for the function closure.
IGNITION_HANDLER(CreateFunctionContext, InterpreterAssembler) {
- Node* scope_info_idx = BytecodeOperandIdx(0);
- Node* scope_info = LoadConstantPoolEntry(scope_info_idx);
+ TNode<UintPtrT> scope_info_idx = BytecodeOperandIdx(0);
+ TNode<ScopeInfo> scope_info = CAST(LoadConstantPoolEntry(scope_info_idx));
TNode<Uint32T> slots = BytecodeOperandUImm(1);
TNode<Context> context = GetContext();
ConstructorBuiltinsAssembler constructor_assembler(state());
@@ -2764,8 +2782,8 @@ IGNITION_HANDLER(CreateFunctionContext, InterpreterAssembler) {
//
// Creates a new context with number of |slots| for an eval closure.
IGNITION_HANDLER(CreateEvalContext, InterpreterAssembler) {
- Node* scope_info_idx = BytecodeOperandIdx(0);
- Node* scope_info = LoadConstantPoolEntry(scope_info_idx);
+ TNode<UintPtrT> scope_info_idx = BytecodeOperandIdx(0);
+ TNode<ScopeInfo> scope_info = CAST(LoadConstantPoolEntry(scope_info_idx));
TNode<Uint32T> slots = BytecodeOperandUImm(1);
TNode<Context> context = GetContext();
ConstructorBuiltinsAssembler constructor_assembler(state());
@@ -2780,7 +2798,7 @@ IGNITION_HANDLER(CreateEvalContext, InterpreterAssembler) {
// with-statement with the object in |register|.
IGNITION_HANDLER(CreateWithContext, InterpreterAssembler) {
TNode<Object> object = LoadRegisterAtOperandIndex(0);
- Node* scope_info = LoadConstantPoolEntryAtOperandIndex(1);
+ TNode<ScopeInfo> scope_info = CAST(LoadConstantPoolEntryAtOperandIndex(1));
TNode<Context> context = GetContext();
SetAccumulator(
CallRuntime(Runtime::kPushWithContext, context, object, scope_info));
@@ -2802,8 +2820,8 @@ IGNITION_HANDLER(CreateMappedArguments, InterpreterAssembler) {
// duplicate parameters.
TNode<SharedFunctionInfo> shared_info = LoadObjectField<SharedFunctionInfo>(
closure, JSFunction::kSharedFunctionInfoOffset);
- Node* flags = LoadObjectField(shared_info, SharedFunctionInfo::kFlagsOffset,
- MachineType::Uint32());
+ TNode<Uint32T> flags =
+ LoadObjectField<Uint32T>(shared_info, SharedFunctionInfo::kFlagsOffset);
TNode<BoolT> has_duplicate_parameters =
IsSetWord32<SharedFunctionInfo::HasDuplicateParametersBit>(flags);
Branch(has_duplicate_parameters, &if_duplicate_parameters,
@@ -2812,7 +2830,7 @@ IGNITION_HANDLER(CreateMappedArguments, InterpreterAssembler) {
BIND(&if_not_duplicate_parameters);
{
ArgumentsBuiltinsAssembler constructor_assembler(state());
- Node* result =
+ TNode<JSObject> result =
constructor_assembler.EmitFastNewSloppyArguments(context, closure);
SetAccumulator(result);
Dispatch();
@@ -2832,9 +2850,9 @@ IGNITION_HANDLER(CreateMappedArguments, InterpreterAssembler) {
// Creates a new unmapped arguments object.
IGNITION_HANDLER(CreateUnmappedArguments, InterpreterAssembler) {
TNode<Context> context = GetContext();
- TNode<Object> closure = LoadRegister(Register::function_closure());
+ TNode<JSFunction> closure = CAST(LoadRegister(Register::function_closure()));
ArgumentsBuiltinsAssembler builtins_assembler(state());
- Node* result =
+ TNode<JSObject> result =
builtins_assembler.EmitFastNewStrictArguments(context, closure);
SetAccumulator(result);
Dispatch();
@@ -2844,10 +2862,11 @@ IGNITION_HANDLER(CreateUnmappedArguments, InterpreterAssembler) {
//
// Creates a new rest parameter array.
IGNITION_HANDLER(CreateRestParameter, InterpreterAssembler) {
- TNode<Object> closure = LoadRegister(Register::function_closure());
+ TNode<JSFunction> closure = CAST(LoadRegister(Register::function_closure()));
TNode<Context> context = GetContext();
ArgumentsBuiltinsAssembler builtins_assembler(state());
- Node* result = builtins_assembler.EmitFastNewRestParameter(context, closure);
+ TNode<JSObject> result =
+ builtins_assembler.EmitFastNewRestParameter(context, closure);
SetAccumulator(result);
Dispatch();
}
@@ -2868,7 +2887,7 @@ IGNITION_HANDLER(StackCheck, InterpreterAssembler) {
IGNITION_HANDLER(SetPendingMessage, InterpreterAssembler) {
TNode<ExternalReference> pending_message = ExternalConstant(
ExternalReference::address_of_pending_message_obj(isolate()));
- Node* previous_message = Load(MachineType::TaggedPointer(), pending_message);
+ TNode<HeapObject> previous_message = Load<HeapObject>(pending_message);
TNode<Object> new_message = GetAccumulator();
StoreFullTaggedNoWriteBarrier(pending_message, new_message);
SetAccumulator(previous_message);
@@ -2903,8 +2922,8 @@ IGNITION_HANDLER(ReThrow, InterpreterAssembler) {
//
// Aborts execution (via a call to the runtime function).
IGNITION_HANDLER(Abort, InterpreterAssembler) {
- Node* reason = BytecodeOperandIdx(0);
- CallRuntime(Runtime::kAbort, NoContextConstant(), SmiTag(reason));
+ TNode<UintPtrT> reason = BytecodeOperandIdx(0);
+ CallRuntime(Runtime::kAbort, NoContextConstant(), SmiTag(Signed(reason)));
Unreachable();
}
@@ -2929,7 +2948,7 @@ IGNITION_HANDLER(ThrowReferenceErrorIfHole, InterpreterAssembler) {
BIND(&throw_error);
{
- Node* name = LoadConstantPoolEntryAtOperandIndex(0);
+ TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(0));
CallRuntime(Runtime::kThrowAccessedUninitializedVariable, GetContext(),
name);
// We shouldn't ever return from a throw.
@@ -2995,7 +3014,7 @@ IGNITION_HANDLER(Debugger, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator(); \
TNode<Object> result_pair = \
CallRuntime(Runtime::kDebugBreakOnBytecode, context, accumulator); \
- Node* return_value = Projection(0, result_pair); \
+ TNode<Object> return_value = CAST(Projection(0, result_pair)); \
TNode<IntPtrT> original_bytecode = SmiUntag(Projection(1, result_pair)); \
MaybeDropFrames(context); \
SetAccumulator(return_value); \
@@ -3010,7 +3029,7 @@ DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK)
// coverage.
IGNITION_HANDLER(IncBlockCounter, InterpreterAssembler) {
TNode<Object> closure = LoadRegister(Register::function_closure());
- Node* coverage_array_slot = BytecodeOperandIdxSmi(0);
+ TNode<Smi> coverage_array_slot = BytecodeOperandIdxSmi(0);
TNode<Context> context = GetContext();
CallBuiltin(Builtins::kIncBlockCounter, context, closure,
@@ -3025,11 +3044,11 @@ IGNITION_HANDLER(IncBlockCounter, InterpreterAssembler) {
// map of the |receiver| if it has a usable enum cache or a fixed array
// with the keys to enumerate in the accumulator.
IGNITION_HANDLER(ForInEnumerate, InterpreterAssembler) {
- TNode<Object> receiver = LoadRegisterAtOperandIndex(0);
+ TNode<HeapObject> receiver = CAST(LoadRegisterAtOperandIndex(0));
TNode<Context> context = GetContext();
Label if_empty(this), if_runtime(this, Label::kDeferred);
- Node* receiver_map = CheckEnumCache(receiver, &if_empty, &if_runtime);
+ TNode<Map> receiver_map = CheckEnumCache(receiver, &if_empty, &if_runtime);
SetAccumulator(receiver_map);
Dispatch();
@@ -3060,7 +3079,7 @@ IGNITION_HANDLER(ForInEnumerate, InterpreterAssembler) {
IGNITION_HANDLER(ForInPrepare, InterpreterAssembler) {
// The {enumerator} is either a Map or a FixedArray.
TNode<HeapObject> enumerator = CAST(GetAccumulator());
- Node* vector_index = BytecodeOperandIdx(1);
+ TNode<UintPtrT> vector_index = BytecodeOperandIdx(1);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
// Check if we're using an enum cache.
@@ -3091,8 +3110,8 @@ IGNITION_HANDLER(ForInPrepare, InterpreterAssembler) {
UpdateFeedback(feedback, maybe_feedback_vector, vector_index);
// Construct the cache info triple.
- Node* cache_type = enumerator;
- Node* cache_array = enum_keys;
+ TNode<Map> cache_type = map_enumerator;
+ TNode<FixedArray> cache_array = enum_keys;
TNode<Smi> cache_length = SmiTag(Signed(enum_length));
StoreRegisterTripleAtOperandIndex(cache_type, cache_array, cache_length, 0);
Dispatch();
@@ -3108,8 +3127,8 @@ IGNITION_HANDLER(ForInPrepare, InterpreterAssembler) {
vector_index);
// Construct the cache info triple.
- Node* cache_type = array_enumerator;
- Node* cache_array = array_enumerator;
+ TNode<FixedArray> cache_type = array_enumerator;
+ TNode<FixedArray> cache_array = array_enumerator;
TNode<Smi> cache_length = LoadFixedArrayBaseLength(array_enumerator);
StoreRegisterTripleAtOperandIndex(cache_type, cache_array, cache_length, 0);
Dispatch();
@@ -3125,7 +3144,7 @@ IGNITION_HANDLER(ForInNext, InterpreterAssembler) {
TNode<Object> cache_type;
TNode<Object> cache_array;
std::tie(cache_type, cache_array) = LoadRegisterPairAtOperandIndex(2);
- Node* vector_index = BytecodeOperandIdx(3);
+ TNode<UintPtrT> vector_index = BytecodeOperandIdx(3);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
// Load the next key from the enumeration array.
@@ -3195,21 +3214,22 @@ IGNITION_HANDLER(ForInStep, InterpreterAssembler) {
// GetIterator <object>
//
-// Retrieves the object[Symbol.iterator] method and stores the result
-// in the accumulator
-// TODO(swapnilgaikwad): Extend the functionality of the bytecode to call
-// iterator method for an object
+// Retrieves the object[Symbol.iterator] method, calls it and stores
+// the result in the accumulator. If the result is not a JSReceiver, throws
+// SymbolIteratorInvalid runtime exception.
IGNITION_HANDLER(GetIterator, InterpreterAssembler) {
TNode<Object> receiver = LoadRegisterAtOperandIndex(0);
TNode<Context> context = GetContext();
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
- Node* feedback_slot = BytecodeOperandIdx(1);
- TNode<Smi> smi_slot = SmiTag(feedback_slot);
+ TNode<IntPtrT> load_feedback_slot = Signed(BytecodeOperandIdx(1));
+ TNode<IntPtrT> call_feedback_slot = Signed(BytecodeOperandIdx(2));
+ TNode<Smi> load_slot_smi = SmiTag(load_feedback_slot);
+ TNode<Smi> call_slot_smi = SmiTag(call_feedback_slot);
- TNode<Object> result =
+ TNode<Object> iterator =
CallBuiltin(Builtins::kGetIteratorWithFeedback, context, receiver,
- smi_slot, feedback_vector);
- SetAccumulator(result);
+ load_slot_smi, call_slot_smi, feedback_vector);
+ SetAccumulator(iterator);
Dispatch();
}
@@ -3249,7 +3269,7 @@ IGNITION_HANDLER(SuspendGenerator, InterpreterAssembler) {
TNode<JSFunction> closure = CAST(LoadRegister(Register::function_closure()));
TNode<Context> context = GetContext();
RegListNodePair registers = GetRegisterListAtOperandIndex(1);
- Node* suspend_id = BytecodeOperandUImmSmi(3);
+ TNode<Smi> suspend_id = BytecodeOperandUImmSmi(3);
TNode<SharedFunctionInfo> shared =
CAST(LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset));
@@ -3297,10 +3317,10 @@ IGNITION_HANDLER(SwitchOnGeneratorState, InterpreterAssembler) {
CAST(LoadObjectField(generator, JSGeneratorObject::kContextOffset));
SetContext(context);
- Node* table_start = BytecodeOperandIdx(1);
+ TNode<UintPtrT> table_start = BytecodeOperandIdx(1);
// TODO(leszeks): table_length is only used for a CSA_ASSERT, we don't
// actually need it otherwise.
- Node* table_length = BytecodeOperandUImmWord(2);
+ TNode<UintPtrT> table_length = BytecodeOperandUImmWord(2);
// The state must be a Smi.
CSA_ASSERT(this, TaggedIsSmi(state));
@@ -3350,14 +3370,15 @@ IGNITION_HANDLER(ResumeGenerator, InterpreterAssembler) {
} // namespace
-Handle<Code> GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode,
+Handle<Code> GenerateBytecodeHandler(Isolate* isolate, const char* debug_name,
+ Bytecode bytecode,
OperandScale operand_scale,
int builtin_index,
const AssemblerOptions& options) {
Zone zone(isolate->allocator(), ZONE_NAME);
compiler::CodeAssemblerState state(
isolate, &zone, InterpreterDispatchDescriptor{}, Code::BYTECODE_HANDLER,
- Bytecodes::ToString(bytecode),
+ debug_name,
FLAG_untrusted_code_mitigations
? PoisoningMitigationLevel::kPoisonCriticalOnly
: PoisoningMitigationLevel::kDontPoison,
@@ -3377,7 +3398,7 @@ Handle<Code> GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode,
#ifdef ENABLE_DISASSEMBLER
if (FLAG_trace_ignition_codegen) {
StdoutStream os;
- code->Disassemble(Bytecodes::ToString(bytecode), os);
+ code->Disassemble(Bytecodes::ToString(bytecode), os, isolate);
os << std::flush;
}
#endif // ENABLE_DISASSEMBLER
diff --git a/deps/v8/src/interpreter/interpreter-generator.h b/deps/v8/src/interpreter/interpreter-generator.h
index a41e89f250..263f02ba39 100644
--- a/deps/v8/src/interpreter/interpreter-generator.h
+++ b/deps/v8/src/interpreter/interpreter-generator.h
@@ -15,7 +15,9 @@ struct AssemblerOptions;
namespace interpreter {
-extern Handle<Code> GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode,
+extern Handle<Code> GenerateBytecodeHandler(Isolate* isolate,
+ const char* debug_name,
+ Bytecode bytecode,
OperandScale operand_scale,
int builtin_index,
const AssemblerOptions& options);
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
index a329e7189f..f5307762f7 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
@@ -21,8 +21,6 @@ namespace internal {
namespace interpreter {
using compiler::Node;
-template <typename T>
-using TNode = compiler::TNode<T>;
class IntrinsicsGenerator {
public:
@@ -31,8 +29,9 @@ class IntrinsicsGenerator {
zone_(assembler->zone()),
assembler_(assembler) {}
- Node* InvokeIntrinsic(Node* function_id, Node* context,
- const InterpreterAssembler::RegListNodePair& args);
+ TNode<Object> InvokeIntrinsic(
+ TNode<Uint32T> function_id, TNode<Context> context,
+ const InterpreterAssembler::RegListNodePair& args);
private:
enum InstanceTypeCompareMode {
@@ -40,17 +39,20 @@ class IntrinsicsGenerator {
kInstanceTypeGreaterThanOrEqual
};
- Node* IsInstanceType(Node* input, int type);
- Node* CompareInstanceType(Node* map, int type, InstanceTypeCompareMode mode);
- Node* IntrinsicAsStubCall(const InterpreterAssembler::RegListNodePair& args,
- Node* context, Callable const& callable);
- Node* IntrinsicAsBuiltinCall(
- const InterpreterAssembler::RegListNodePair& args, Node* context,
+ TNode<Oddball> IsInstanceType(TNode<Object> input, int type);
+ TNode<BoolT> CompareInstanceType(TNode<HeapObject> map, int type,
+ InstanceTypeCompareMode mode);
+ TNode<Object> IntrinsicAsStubCall(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ Callable const& callable);
+ TNode<Object> IntrinsicAsBuiltinCall(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
Builtins::Name name);
- void AbortIfArgCountMismatch(int expected, compiler::TNode<Word32T> actual);
+ void AbortIfArgCountMismatch(int expected, TNode<Word32T> actual);
-#define DECLARE_INTRINSIC_HELPER(name, lower_case, count) \
- Node* name(const InterpreterAssembler::RegListNodePair& args, Node* context);
+#define DECLARE_INTRINSIC_HELPER(name, lower_case, count) \
+ TNode<Object> name(const InterpreterAssembler::RegListNodePair& args, \
+ TNode<Context> context);
INTRINSICS_LIST(DECLARE_INTRINSIC_HELPER)
#undef DECLARE_INTRINSIC_HELPER
@@ -65,21 +67,20 @@ class IntrinsicsGenerator {
DISALLOW_COPY_AND_ASSIGN(IntrinsicsGenerator);
};
-Node* GenerateInvokeIntrinsic(
- InterpreterAssembler* assembler, Node* function_id, Node* context,
- const InterpreterAssembler::RegListNodePair& args) {
+TNode<Object> GenerateInvokeIntrinsic(
+ InterpreterAssembler* assembler, TNode<Uint32T> function_id,
+ TNode<Context> context, const InterpreterAssembler::RegListNodePair& args) {
IntrinsicsGenerator generator(assembler);
return generator.InvokeIntrinsic(function_id, context, args);
}
#define __ assembler_->
-Node* IntrinsicsGenerator::InvokeIntrinsic(
- Node* function_id, Node* context,
+TNode<Object> IntrinsicsGenerator::InvokeIntrinsic(
+ TNode<Uint32T> function_id, TNode<Context> context,
const InterpreterAssembler::RegListNodePair& args) {
InterpreterAssembler::Label abort(assembler_), end(assembler_);
- InterpreterAssembler::Variable result(assembler_,
- MachineRepresentation::kTagged);
+ InterpreterAssembler::TVariable<Object> result(assembler_);
#define MAKE_LABEL(name, lower_case, count) \
InterpreterAssembler::Label lower_case(assembler_);
@@ -102,9 +103,9 @@ Node* IntrinsicsGenerator::InvokeIntrinsic(
if (FLAG_debug_code && expected_arg_count >= 0) { \
AbortIfArgCountMismatch(expected_arg_count, args.reg_count()); \
} \
- Node* value = name(args, context); \
+ TNode<Object> value = name(args, context); \
if (value) { \
- result.Bind(value); \
+ result = value; \
__ Goto(&end); \
} \
}
@@ -114,7 +115,7 @@ Node* IntrinsicsGenerator::InvokeIntrinsic(
__ BIND(&abort);
{
__ Abort(AbortReason::kUnexpectedFunctionIDForInvokeIntrinsic);
- result.Bind(__ UndefinedConstant());
+ result = __ UndefinedConstant();
__ Goto(&end);
}
@@ -122,8 +123,8 @@ Node* IntrinsicsGenerator::InvokeIntrinsic(
return result.value();
}
-Node* IntrinsicsGenerator::CompareInstanceType(Node* object, int type,
- InstanceTypeCompareMode mode) {
+TNode<BoolT> IntrinsicsGenerator::CompareInstanceType(
+ TNode<HeapObject> object, int type, InstanceTypeCompareMode mode) {
TNode<Uint16T> instance_type = __ LoadInstanceType(object);
if (mode == kInstanceTypeEqual) {
@@ -134,39 +135,42 @@ Node* IntrinsicsGenerator::CompareInstanceType(Node* object, int type,
}
}
-Node* IntrinsicsGenerator::IsInstanceType(Node* input, int type) {
+TNode<Oddball> IntrinsicsGenerator::IsInstanceType(TNode<Object> input,
+ int type) {
TNode<Oddball> result = __ Select<Oddball>(
__ TaggedIsSmi(input), [=] { return __ FalseConstant(); },
[=] {
return __ SelectBooleanConstant(
- CompareInstanceType(input, type, kInstanceTypeEqual));
+ CompareInstanceType(__ CAST(input), type, kInstanceTypeEqual));
});
return result;
}
-Node* IntrinsicsGenerator::IsJSReceiver(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
- Node* input = __ LoadRegisterFromRegisterList(args, 0);
+TNode<Object> IntrinsicsGenerator::IsJSReceiver(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
+ TNode<Object> input = __ LoadRegisterFromRegisterList(args, 0);
TNode<Oddball> result = __ Select<Oddball>(
__ TaggedIsSmi(input), [=] { return __ FalseConstant(); },
- [=] { return __ SelectBooleanConstant(__ IsJSReceiver(input)); });
+ [=] {
+ return __ SelectBooleanConstant(__ IsJSReceiver(__ CAST(input)));
+ });
return result;
}
-Node* IntrinsicsGenerator::IsArray(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
- Node* input = __ LoadRegisterFromRegisterList(args, 0);
+TNode<Object> IntrinsicsGenerator::IsArray(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
+ TNode<Object> input = __ LoadRegisterFromRegisterList(args, 0);
return IsInstanceType(input, JS_ARRAY_TYPE);
}
-Node* IntrinsicsGenerator::IsSmi(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
- Node* input = __ LoadRegisterFromRegisterList(args, 0);
+TNode<Object> IntrinsicsGenerator::IsSmi(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
+ TNode<Object> input = __ LoadRegisterFromRegisterList(args, 0);
return __ SelectBooleanConstant(__ TaggedIsSmi(input));
}
-Node* IntrinsicsGenerator::IntrinsicAsStubCall(
- const InterpreterAssembler::RegListNodePair& args, Node* context,
+TNode<Object> IntrinsicsGenerator::IntrinsicAsStubCall(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
Callable const& callable) {
int param_count = callable.descriptor().GetParameterCount();
int input_count = param_count + 2; // +2 for target and context
@@ -177,59 +181,60 @@ Node* IntrinsicsGenerator::IntrinsicAsStubCall(
stub_args[index++] = __ LoadRegisterFromRegisterList(args, i);
}
stub_args[index++] = context;
- return __ CallStubN(StubCallMode::kCallCodeObject, callable.descriptor(), 1,
- input_count, stub_args);
+ return __ CAST(__ CallStubN(StubCallMode::kCallCodeObject,
+ callable.descriptor(), 1, input_count,
+ stub_args));
}
-Node* IntrinsicsGenerator::IntrinsicAsBuiltinCall(
- const InterpreterAssembler::RegListNodePair& args, Node* context,
+TNode<Object> IntrinsicsGenerator::IntrinsicAsBuiltinCall(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
Builtins::Name name) {
Callable callable = Builtins::CallableFor(isolate_, name);
return IntrinsicAsStubCall(args, context, callable);
}
-Node* IntrinsicsGenerator::CopyDataProperties(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::CopyDataProperties(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
return IntrinsicAsStubCall(
args, context,
Builtins::CallableFor(isolate(), Builtins::kCopyDataProperties));
}
-Node* IntrinsicsGenerator::CreateIterResultObject(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::CreateIterResultObject(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
return IntrinsicAsStubCall(
args, context,
Builtins::CallableFor(isolate(), Builtins::kCreateIterResultObject));
}
-Node* IntrinsicsGenerator::HasProperty(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::HasProperty(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
return IntrinsicAsStubCall(
args, context, Builtins::CallableFor(isolate(), Builtins::kHasProperty));
}
-Node* IntrinsicsGenerator::ToStringRT(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::ToStringRT(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
return IntrinsicAsStubCall(
args, context, Builtins::CallableFor(isolate(), Builtins::kToString));
}
-Node* IntrinsicsGenerator::ToLength(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::ToLength(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
return IntrinsicAsStubCall(
args, context, Builtins::CallableFor(isolate(), Builtins::kToLength));
}
-Node* IntrinsicsGenerator::ToObject(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::ToObject(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
return IntrinsicAsStubCall(
args, context, Builtins::CallableFor(isolate(), Builtins::kToObject));
}
-Node* IntrinsicsGenerator::Call(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::Call(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
// First argument register contains the function target.
- Node* function = __ LoadRegisterFromRegisterList(args, 0);
+ TNode<Object> function = __ LoadRegisterFromRegisterList(args, 0);
// The arguments for the target function are from the second runtime call
// argument.
@@ -249,26 +254,25 @@ Node* IntrinsicsGenerator::Call(
__ CallJSAndDispatch(function, context, target_args,
ConvertReceiverMode::kAny);
- return nullptr; // We never return from the CallJSAndDispatch above.
+ return TNode<Object>(); // We never return from the CallJSAndDispatch above.
}
-Node* IntrinsicsGenerator::CreateAsyncFromSyncIterator(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::CreateAsyncFromSyncIterator(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
InterpreterAssembler::Label not_receiver(
assembler_, InterpreterAssembler::Label::kDeferred);
InterpreterAssembler::Label done(assembler_);
- InterpreterAssembler::Variable return_value(assembler_,
- MachineRepresentation::kTagged);
+ InterpreterAssembler::TVariable<Object> return_value(assembler_);
- Node* sync_iterator = __ LoadRegisterFromRegisterList(args, 0);
+ TNode<Object> sync_iterator = __ LoadRegisterFromRegisterList(args, 0);
__ GotoIf(__ TaggedIsSmi(sync_iterator), &not_receiver);
- __ GotoIfNot(__ IsJSReceiver(sync_iterator), &not_receiver);
+ __ GotoIfNot(__ IsJSReceiver(__ CAST(sync_iterator)), &not_receiver);
TNode<Object> const next =
__ GetProperty(context, sync_iterator, factory()->next_string());
- TNode<Context> const native_context = __ LoadNativeContext(context);
+ TNode<NativeContext> const native_context = __ LoadNativeContext(context);
TNode<Map> const map = __ CAST(__ LoadContextElement(
native_context, Context::ASYNC_FROM_SYNC_ITERATOR_MAP_INDEX));
TNode<JSObject> const iterator = __ AllocateJSObjectFromMap(map);
@@ -278,13 +282,13 @@ Node* IntrinsicsGenerator::CreateAsyncFromSyncIterator(
__ StoreObjectFieldNoWriteBarrier(iterator,
JSAsyncFromSyncIterator::kNextOffset, next);
- return_value.Bind(iterator);
+ return_value = iterator;
__ Goto(&done);
__ BIND(&not_receiver);
{
- return_value.Bind(
- __ CallRuntime(Runtime::kThrowSymbolIteratorInvalid, context));
+ return_value =
+ __ CallRuntime(Runtime::kThrowSymbolIteratorInvalid, context);
// Unreachable due to the Throw in runtime call.
__ Goto(&done);
@@ -294,104 +298,105 @@ Node* IntrinsicsGenerator::CreateAsyncFromSyncIterator(
return return_value.value();
}
-Node* IntrinsicsGenerator::CreateJSGeneratorObject(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::CreateJSGeneratorObject(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
return IntrinsicAsBuiltinCall(args, context,
Builtins::kCreateGeneratorObject);
}
-Node* IntrinsicsGenerator::GeneratorGetResumeMode(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
- Node* generator = __ LoadRegisterFromRegisterList(args, 0);
+TNode<Object> IntrinsicsGenerator::GeneratorGetResumeMode(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
+ TNode<JSGeneratorObject> generator =
+ __ CAST(__ LoadRegisterFromRegisterList(args, 0));
TNode<Object> const value =
__ LoadObjectField(generator, JSGeneratorObject::kResumeModeOffset);
return value;
}
-Node* IntrinsicsGenerator::GeneratorClose(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
- Node* generator = __ LoadRegisterFromRegisterList(args, 0);
+TNode<Object> IntrinsicsGenerator::GeneratorClose(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
+ TNode<JSGeneratorObject> generator =
+ __ CAST(__ LoadRegisterFromRegisterList(args, 0));
__ StoreObjectFieldNoWriteBarrier(
generator, JSGeneratorObject::kContinuationOffset,
__ SmiConstant(JSGeneratorObject::kGeneratorClosed));
return __ UndefinedConstant();
}
-Node* IntrinsicsGenerator::GetImportMetaObject(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::GetImportMetaObject(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
TNode<Context> const module_context = __ LoadModuleContext(context);
TNode<HeapObject> const module =
__ CAST(__ LoadContextElement(module_context, Context::EXTENSION_INDEX));
TNode<Object> const import_meta =
__ LoadObjectField(module, SourceTextModule::kImportMetaOffset);
- InterpreterAssembler::Variable return_value(assembler_,
- MachineRepresentation::kTagged);
- return_value.Bind(import_meta);
+ InterpreterAssembler::TVariable<Object> return_value(assembler_);
+ return_value = import_meta;
InterpreterAssembler::Label end(assembler_);
__ GotoIfNot(__ IsTheHole(import_meta), &end);
- return_value.Bind(__ CallRuntime(Runtime::kGetImportMetaObject, context));
+ return_value = __ CallRuntime(Runtime::kGetImportMetaObject, context);
__ Goto(&end);
__ BIND(&end);
return return_value.value();
}
-Node* IntrinsicsGenerator::AsyncFunctionAwaitCaught(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::AsyncFunctionAwaitCaught(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
return IntrinsicAsBuiltinCall(args, context,
Builtins::kAsyncFunctionAwaitCaught);
}
-Node* IntrinsicsGenerator::AsyncFunctionAwaitUncaught(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::AsyncFunctionAwaitUncaught(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
return IntrinsicAsBuiltinCall(args, context,
Builtins::kAsyncFunctionAwaitUncaught);
}
-Node* IntrinsicsGenerator::AsyncFunctionEnter(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::AsyncFunctionEnter(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncFunctionEnter);
}
-Node* IntrinsicsGenerator::AsyncFunctionReject(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::AsyncFunctionReject(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncFunctionReject);
}
-Node* IntrinsicsGenerator::AsyncFunctionResolve(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::AsyncFunctionResolve(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncFunctionResolve);
}
-Node* IntrinsicsGenerator::AsyncGeneratorAwaitCaught(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::AsyncGeneratorAwaitCaught(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
return IntrinsicAsBuiltinCall(args, context,
Builtins::kAsyncGeneratorAwaitCaught);
}
-Node* IntrinsicsGenerator::AsyncGeneratorAwaitUncaught(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::AsyncGeneratorAwaitUncaught(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
return IntrinsicAsBuiltinCall(args, context,
Builtins::kAsyncGeneratorAwaitUncaught);
}
-Node* IntrinsicsGenerator::AsyncGeneratorReject(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::AsyncGeneratorReject(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncGeneratorReject);
}
-Node* IntrinsicsGenerator::AsyncGeneratorResolve(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::AsyncGeneratorResolve(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
return IntrinsicAsBuiltinCall(args, context,
Builtins::kAsyncGeneratorResolve);
}
-Node* IntrinsicsGenerator::AsyncGeneratorYield(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
+TNode<Object> IntrinsicsGenerator::AsyncGeneratorYield(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncGeneratorYield);
}
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics-generator.h b/deps/v8/src/interpreter/interpreter-intrinsics-generator.h
index fd4e167ed0..f0c22e7a59 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics-generator.h
+++ b/deps/v8/src/interpreter/interpreter-intrinsics-generator.h
@@ -16,9 +16,9 @@ class Node;
namespace interpreter {
-extern compiler::Node* GenerateInvokeIntrinsic(
- InterpreterAssembler* assembler, compiler::Node* function_id,
- compiler::Node* context, const InterpreterAssembler::RegListNodePair& args);
+extern TNode<Object> GenerateInvokeIntrinsic(
+ InterpreterAssembler* assembler, TNode<Uint32T> function_id,
+ TNode<Context> context, const InterpreterAssembler::RegListNodePair& args);
} // namespace interpreter
} // namespace internal
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index 482ffb7459..6c730d5a59 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -269,7 +269,7 @@ std::unique_ptr<UnoptimizedCompilationJob> Interpreter::NewCompilationJob(
ParseInfo* parse_info, FunctionLiteral* literal,
AccountingAllocator* allocator,
std::vector<FunctionLiteral*>* eager_inner_literals) {
- return base::make_unique<InterpreterCompilationJob>(
+ return std::make_unique<InterpreterCompilationJob>(
parse_info, literal, allocator, eager_inner_literals);
}
@@ -277,10 +277,10 @@ std::unique_ptr<UnoptimizedCompilationJob>
Interpreter::NewSourcePositionCollectionJob(
ParseInfo* parse_info, FunctionLiteral* literal,
Handle<BytecodeArray> existing_bytecode, AccountingAllocator* allocator) {
- auto job = base::make_unique<InterpreterCompilationJob>(parse_info, literal,
- allocator, nullptr);
+ auto job = std::make_unique<InterpreterCompilationJob>(parse_info, literal,
+ allocator, nullptr);
job->compilation_info()->SetBytecodeArray(existing_bytecode);
- return std::unique_ptr<UnoptimizedCompilationJob> { static_cast<UnoptimizedCompilationJob*>(job.release()) };
+ return job;
}
void Interpreter::ForEachBytecode(