diff options
author | Myles Borins <mylesborins@google.com> | 2018-04-10 21:39:51 -0400 |
---|---|---|
committer | Myles Borins <mylesborins@google.com> | 2018-04-11 13:22:42 -0400 |
commit | 12a1b9b8049462e47181a298120243dc83e81c55 (patch) | |
tree | 8605276308c8b4e3597516961266bae1af57557a /deps/v8/src/interpreter | |
parent | 78cd8263354705b767ef8c6a651740efe4931ba0 (diff) | |
download | android-node-v8-12a1b9b8049462e47181a298120243dc83e81c55.tar.gz android-node-v8-12a1b9b8049462e47181a298120243dc83e81c55.tar.bz2 android-node-v8-12a1b9b8049462e47181a298120243dc83e81c55.zip |
deps: update V8 to 6.6.346.23
PR-URL: https://github.com/nodejs/node/pull/19201
Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com>
Reviewed-By: Myles Borins <myles.borins@gmail.com>
Reviewed-By: Matteo Collina <matteo.collina@gmail.com>
Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
Diffstat (limited to 'deps/v8/src/interpreter')
22 files changed, 1007 insertions, 916 deletions
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.cc b/deps/v8/src/interpreter/bytecode-array-accessor.cc index 784bb14eb6..2a5923b2a4 100644 --- a/deps/v8/src/interpreter/bytecode-array-accessor.cc +++ b/deps/v8/src/interpreter/bytecode-array-accessor.cc @@ -4,6 +4,7 @@ #include "src/interpreter/bytecode-array-accessor.h" +#include "src/feedback-vector.h" #include "src/interpreter/bytecode-decoder.h" #include "src/interpreter/interpreter-intrinsics.h" #include "src/objects-inl.h" @@ -125,6 +126,11 @@ uint32_t BytecodeArrayAccessor::GetIndexOperand(int operand_index) const { return GetUnsignedOperand(operand_index, operand_type); } +FeedbackSlot BytecodeArrayAccessor::GetSlotOperand(int operand_index) const { + int index = GetIndexOperand(operand_index); + return FeedbackVector::ToSlot(index); +} + Register BytecodeArrayAccessor::GetRegisterOperand(int operand_index) const { OperandType operand_type = Bytecodes::GetOperandType(current_bytecode(), operand_index); @@ -206,12 +212,18 @@ int BytecodeArrayAccessor::GetJumpTargetOffset() const { JumpTableTargetOffsets BytecodeArrayAccessor::GetJumpTableTargetOffsets() const { - DCHECK_EQ(current_bytecode(), Bytecode::kSwitchOnSmiNoFeedback); - - uint32_t table_start = GetIndexOperand(0); - uint32_t table_size = GetUnsignedImmediateOperand(1); - int32_t case_value_base = GetImmediateOperand(2); - + uint32_t table_start, table_size; + int32_t case_value_base; + if (current_bytecode() == Bytecode::kSwitchOnGeneratorState) { + table_start = GetIndexOperand(1); + table_size = GetUnsignedImmediateOperand(2); + case_value_base = 0; + } else { + DCHECK_EQ(current_bytecode(), Bytecode::kSwitchOnSmiNoFeedback); + table_start = GetIndexOperand(0); + table_size = GetUnsignedImmediateOperand(1); + case_value_base = GetImmediateOperand(2); + } return JumpTableTargetOffsets(this, table_start, table_size, case_value_base); } diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.h b/deps/v8/src/interpreter/bytecode-array-accessor.h index d585e6dc33..f31d2d0e7f 100644 --- a/deps/v8/src/interpreter/bytecode-array-accessor.h +++ b/deps/v8/src/interpreter/bytecode-array-accessor.h @@ -83,6 +83,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayAccessor { uint32_t GetUnsignedImmediateOperand(int operand_index) const; int32_t GetImmediateOperand(int operand_index) const; uint32_t GetIndexOperand(int operand_index) const; + FeedbackSlot GetSlotOperand(int operand_index) const; uint32_t GetRegisterCountOperand(int operand_index) const; Register GetRegisterOperand(int operand_index) const; int GetRegisterOperandRange(int operand_index) const; @@ -130,4 +131,4 @@ class V8_EXPORT_PRIVATE BytecodeArrayAccessor { } // namespace internal } // namespace v8 -#endif // V8_INTERPRETER_BYTECODE_GRAPH_ACCESSOR_H_ +#endif // V8_INTERPRETER_BYTECODE_ARRAY_ACCESSOR_H_ diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc index dcbe8029f9..2d156e4095 100644 --- a/deps/v8/src/interpreter/bytecode-array-builder.cc +++ b/deps/v8/src/interpreter/bytecode-array-builder.cc @@ -92,7 +92,7 @@ Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray(Isolate* isolate) { register_count = register_optimizer_->maxiumum_register_index() + 1; } - Handle<FixedArray> handler_table = + Handle<ByteArray> handler_table = handler_table_builder()->ToHandlerTable(isolate); return bytecode_array_writer_.ToBytecodeArray( isolate, register_count, parameter_count(), handler_table); @@ -973,8 +973,8 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CreateEmptyObjectLiteral() { } BytecodeArrayBuilder& BytecodeArrayBuilder::GetTemplateObject( - size_t template_object_description_entry) { - OutputGetTemplateObject(template_object_description_entry); + size_t template_object_description_entry, int feedback_slot) { + OutputGetTemplateObject(template_object_description_entry, feedback_slot); return *this; } @@ -1271,16 +1271,19 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::SuspendGenerator( return *this; } -BytecodeArrayBuilder& BytecodeArrayBuilder::RestoreGeneratorState( - Register generator) { - OutputRestoreGeneratorState(generator); +BytecodeArrayBuilder& BytecodeArrayBuilder::SwitchOnGeneratorState( + Register generator, BytecodeJumpTable* jump_table) { + DCHECK_EQ(jump_table->case_value_base(), 0); + BytecodeNode node(CreateSwitchOnGeneratorStateNode( + generator, jump_table->constant_pool_index(), jump_table->size())); + WriteSwitch(&node, jump_table); + LeaveBasicBlock(); return *this; } BytecodeArrayBuilder& BytecodeArrayBuilder::ResumeGenerator( - Register generator, Register generator_state, RegisterList registers) { - OutputResumeGenerator(generator, generator_state, registers, - registers.register_count()); + Register generator, RegisterList registers) { + OutputResumeGenerator(generator, registers, registers.register_count()); return *this; } diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h index 021222abe5..05086bf714 100644 --- a/deps/v8/src/interpreter/bytecode-array-builder.h +++ b/deps/v8/src/interpreter/bytecode-array-builder.h @@ -240,7 +240,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final { // Gets or creates the template for a TemplateObjectDescription which will // be inserted at constant pool index |template_object_description_entry|. BytecodeArrayBuilder& GetTemplateObject( - size_t template_object_description_entry); + size_t template_object_description_entry, int feedback_slot); // Push the context in accumulator as the new context, and store in register // |context|. @@ -354,6 +354,9 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final { // the key to be deleted and the register contains a reference to the object. BytecodeArrayBuilder& Delete(Register object, LanguageMode language_mode); + // JavaScript defines two kinds of 'nil'. + enum NilValue { kNullValue, kUndefinedValue }; + // Tests. BytecodeArrayBuilder& CompareOperation(Token::Value op, Register reg, int feedback_slot); @@ -430,9 +433,9 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final { BytecodeArrayBuilder& SuspendGenerator(Register generator, RegisterList registers, int suspend_id); - BytecodeArrayBuilder& RestoreGeneratorState(Register generator); + BytecodeArrayBuilder& SwitchOnGeneratorState(Register generator, + BytecodeJumpTable* jump_table); BytecodeArrayBuilder& ResumeGenerator(Register generator, - Register generator_state, RegisterList registers); // Exception handling. diff --git a/deps/v8/src/interpreter/bytecode-array-writer.cc b/deps/v8/src/interpreter/bytecode-array-writer.cc index 9aea3d83fa..81f49baeea 100644 --- a/deps/v8/src/interpreter/bytecode-array-writer.cc +++ b/deps/v8/src/interpreter/bytecode-array-writer.cc @@ -38,7 +38,7 @@ BytecodeArrayWriter::BytecodeArrayWriter( Handle<BytecodeArray> BytecodeArrayWriter::ToBytecodeArray( Isolate* isolate, int register_count, int parameter_count, - Handle<FixedArray> handler_table) { + Handle<ByteArray> handler_table) { DCHECK_EQ(0, unbound_jumps_); int bytecode_size = static_cast<int>(bytecodes()->size()); @@ -158,6 +158,7 @@ void BytecodeArrayWriter::UpdateExitSeenInBlock(Bytecode bytecode) { case Bytecode::kAbort: case Bytecode::kJump: case Bytecode::kJumpConstant: + case Bytecode::kSuspendGenerator: exit_seen_in_block_ = true; break; default: diff --git a/deps/v8/src/interpreter/bytecode-array-writer.h b/deps/v8/src/interpreter/bytecode-array-writer.h index c53df10129..9700d2c1cf 100644 --- a/deps/v8/src/interpreter/bytecode-array-writer.h +++ b/deps/v8/src/interpreter/bytecode-array-writer.h @@ -43,7 +43,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayWriter final { void BindJumpTableEntry(BytecodeJumpTable* jump_table, int case_value); Handle<BytecodeArray> ToBytecodeArray(Isolate* isolate, int register_count, int parameter_count, - Handle<FixedArray> handler_table); + Handle<ByteArray> handler_table); private: // Maximum sized packed bytecode is comprised of a prefix bytecode, diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc index ee94e7a2e2..997c5a8da8 100644 --- a/deps/v8/src/interpreter/bytecode-generator.cc +++ b/deps/v8/src/interpreter/bytecode-generator.cc @@ -853,8 +853,22 @@ class BytecodeGenerator::IteratorRecord final { Register next_; }; +#ifdef DEBUG + +static bool IsInEagerLiterals( + FunctionLiteral* literal, + const ZoneVector<FunctionLiteral*>& eager_literals) { + for (FunctionLiteral* eager_literal : eager_literals) { + if (literal == eager_literal) return true; + } + return false; +} + +#endif // DEBUG + BytecodeGenerator::BytecodeGenerator( - CompilationInfo* info, const AstStringConstants* ast_string_constants) + CompilationInfo* info, const AstStringConstants* ast_string_constants, + ZoneVector<FunctionLiteral*>* eager_inner_literals) : zone_(info->zone()), builder_(zone(), info->num_parameters_including_this(), info->scope()->num_stack_slots(), info->feedback_vector_spec(), @@ -863,6 +877,7 @@ BytecodeGenerator::BytecodeGenerator( ast_string_constants_(ast_string_constants), closure_scope_(info->scope()), current_scope_(info->scope()), + eager_inner_literals_(eager_inner_literals), feedback_slot_cache_(new (zone()) FeedbackSlotCache(zone())), globals_builder_(new (zone()) GlobalDeclarationsBuilder(zone())), block_coverage_builder_(nullptr), @@ -878,7 +893,7 @@ BytecodeGenerator::BytecodeGenerator( execution_result_(nullptr), incoming_new_target_or_generator_(), generator_jump_table_(nullptr), - generator_state_(), + suspend_count_(0), loop_depth_(0), catch_prediction_(HandlerTable::UNCAUGHT) { DCHECK_EQ(closure_scope(), closure_scope()->GetClosureScope()); @@ -1091,8 +1106,6 @@ void BytecodeGenerator::GenerateBytecodeBody() { void BytecodeGenerator::AllocateTopLevelRegisters() { if (info()->literal()->CanSuspend()) { - // Allocate a register for generator_state_. - generator_state_ = register_allocator()->NewRegister(); // Either directly use generator_object_var or allocate a new register for // the incoming generator object. Variable* generator_object_var = closure_scope()->generator_object_var(); @@ -1115,81 +1128,19 @@ void BytecodeGenerator::AllocateTopLevelRegisters() { } } -void BytecodeGenerator::VisitIterationHeader(IterationStatement* stmt, - LoopBuilder* loop_builder) { - VisitIterationHeader(stmt->first_suspend_id(), stmt->suspend_count(), - loop_builder); -} - -void BytecodeGenerator::VisitIterationHeader(int first_suspend_id, - int suspend_count, - LoopBuilder* loop_builder) { - // Recall that suspend_count is always zero inside ordinary (i.e. - // non-generator) functions. - if (suspend_count == 0) { - loop_builder->LoopHeader(); - } else { - loop_builder->LoopHeaderInGenerator(&generator_jump_table_, - first_suspend_id, suspend_count); - - // Perform state dispatch on the generator state, assuming this is a resume. - builder() - ->LoadAccumulatorWithRegister(generator_state_) - .SwitchOnSmiNoFeedback(generator_jump_table_); - - // We fall through when the generator state is not in the jump table. If we - // are not resuming, we want to fall through to the loop body. - // TODO(leszeks): Only generate this test for debug builds, we can skip it - // entirely in release assuming that the generator states is always valid. - BytecodeLabel not_resuming; - builder() - ->LoadLiteral(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)) - .CompareOperation(Token::Value::EQ_STRICT, generator_state_) - .JumpIfTrue(ToBooleanMode::kAlreadyBoolean, ¬_resuming); - - // Otherwise this is an error. - builder()->Abort(AbortReason::kInvalidJumpTableIndex); - - builder()->Bind(¬_resuming); - } -} - void BytecodeGenerator::BuildGeneratorPrologue() { DCHECK_GT(info()->literal()->suspend_count(), 0); - DCHECK(generator_state_.is_valid()); DCHECK(generator_object().is_valid()); generator_jump_table_ = builder()->AllocateJumpTable(info()->literal()->suspend_count(), 0); - BytecodeLabel regular_call; - builder() - ->LoadAccumulatorWithRegister(generator_object()) - .JumpIfUndefined(®ular_call); - - // This is a resume call. Restore the current context and the registers, - // then perform state dispatch. - { - RegisterAllocationScope register_scope(this); - Register generator_context = register_allocator()->NewRegister(); - builder() - ->CallRuntime(Runtime::kInlineGeneratorGetContext, generator_object()) - .PushContext(generator_context) - .RestoreGeneratorState(generator_object()) - .StoreAccumulatorInRegister(generator_state_) - .SwitchOnSmiNoFeedback(generator_jump_table_); - } - // We fall through when the generator state is not in the jump table. - // TODO(leszeks): Only generate this for debug builds. - builder()->Abort(AbortReason::kInvalidJumpTableIndex); + // If the generator is not undefined, this is a resume, so perform state + // dispatch. + builder()->SwitchOnGeneratorState(generator_object(), generator_jump_table_); - // This is a regular call. - builder() - ->Bind(®ular_call) - .LoadLiteral(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)) - .StoreAccumulatorInRegister(generator_state_); - // Now fall through to the ordinary function prologue, after which we will run - // into the generator object creation and other extra code inserted by the - // parser. + // Otherwise, fall-through to the ordinary function prologue, after which we + // will run into the generator object creation and other extra code inserted + // by the parser. } void BytecodeGenerator::VisitBlock(Block* stmt) { @@ -1274,6 +1225,7 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) { FeedbackSlot literal_slot = GetCachedCreateClosureSlot(decl->fun()); globals_builder()->AddFunctionDeclaration(variable->raw_name(), slot, literal_slot, decl->fun()); + AddToEagerLiteralsIfEager(decl->fun()); break; } case VariableLocation::PARAMETER: @@ -1306,6 +1258,8 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) { BuildVariableAssignment(variable, Token::INIT, HoleCheckMode::kElided); break; } + DCHECK_IMPLIES(decl->fun()->ShouldEagerCompile(), + IsInEagerLiterals(decl->fun(), *eager_inner_literals_)); } void BytecodeGenerator::VisitModuleNamespaceImports() { @@ -1505,11 +1459,11 @@ void BytecodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) { if (stmt->cond()->ToBooleanIsFalse()) { VisitIterationBody(stmt, &loop_builder); } else if (stmt->cond()->ToBooleanIsTrue()) { - VisitIterationHeader(stmt, &loop_builder); + loop_builder.LoopHeader(); VisitIterationBody(stmt, &loop_builder); loop_builder.JumpToHeader(loop_depth_); } else { - VisitIterationHeader(stmt, &loop_builder); + loop_builder.LoopHeader(); VisitIterationBody(stmt, &loop_builder); builder()->SetExpressionAsStatementPosition(stmt->cond()); BytecodeLabels loop_backbranch(zone()); @@ -1528,7 +1482,7 @@ void BytecodeGenerator::VisitWhileStatement(WhileStatement* stmt) { return; } - VisitIterationHeader(stmt, &loop_builder); + loop_builder.LoopHeader(); if (!stmt->cond()->ToBooleanIsTrue()) { builder()->SetExpressionAsStatementPosition(stmt->cond()); BytecodeLabels loop_body(zone()); @@ -1552,7 +1506,7 @@ void BytecodeGenerator::VisitForStatement(ForStatement* stmt) { return; } - VisitIterationHeader(stmt, &loop_builder); + loop_builder.LoopHeader(); if (stmt->cond() && !stmt->cond()->ToBooleanIsTrue()) { builder()->SetExpressionAsStatementPosition(stmt->cond()); BytecodeLabels loop_body(zone()); @@ -1670,7 +1624,7 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) { // The loop { LoopBuilder loop_builder(builder(), block_coverage_builder_, stmt); - VisitIterationHeader(stmt, &loop_builder); + loop_builder.LoopHeader(); builder()->SetExpressionAsStatementPosition(stmt->each()); builder()->ForInContinue(index, cache_length); loop_builder.BreakIfFalse(ToBooleanMode::kAlreadyBoolean); @@ -1694,7 +1648,7 @@ void BytecodeGenerator::VisitForOfStatement(ForOfStatement* stmt) { VisitForEffect(stmt->assign_iterator()); VisitForEffect(stmt->assign_next()); - VisitIterationHeader(stmt, &loop_builder); + loop_builder.LoopHeader(); builder()->SetExpressionAsStatementPosition(stmt->next_result()); VisitForEffect(stmt->next_result()); TypeHint type_hint = VisitForAccumulatorValue(stmt->result_done()); @@ -1832,6 +1786,14 @@ void BytecodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) { FeedbackSlot slot = GetCachedCreateClosureSlot(expr); builder()->CreateClosure(entry, feedback_index(slot), flags); function_literals_.push_back(std::make_pair(expr, entry)); + AddToEagerLiteralsIfEager(expr); +} + +void BytecodeGenerator::AddToEagerLiteralsIfEager(FunctionLiteral* literal) { + if (eager_inner_literals_ && literal->ShouldEagerCompile()) { + DCHECK(!IsInEagerLiterals(literal, *eager_inner_literals_)); + eager_inner_literals_->push_back(literal); + } } void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr) { @@ -1867,6 +1829,7 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr) { for (int i = 0; i < expr->properties()->length(); i++) { ClassLiteral::Property* property = expr->properties()->at(i); if (property->is_computed_name()) { + DCHECK_NE(property->kind(), ClassLiteral::Property::PRIVATE_FIELD); Register key = register_allocator()->GrowRegisterList(&args); BuildLoadPropertyKey(property, key); @@ -1884,7 +1847,7 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr) { .Bind(&done); } - if (property->kind() == ClassLiteral::Property::FIELD) { + if (property->kind() == ClassLiteral::Property::PUBLIC_FIELD) { // Initialize field's name variable with the computed name. DCHECK_NOT_NULL(property->computed_name_var()); builder()->LoadAccumulatorWithRegister(key); @@ -1892,11 +1855,19 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr) { HoleCheckMode::kElided); } } - if (property->kind() == ClassLiteral::Property::FIELD) { + + if (property->kind() == ClassLiteral::Property::PUBLIC_FIELD) { // We don't compute field's value here, but instead do it in the // initializer function. continue; + } else if (property->kind() == ClassLiteral::Property::PRIVATE_FIELD) { + builder()->CallRuntime(Runtime::kCreatePrivateFieldSymbol); + DCHECK_NOT_NULL(property->private_field_name_var()); + BuildVariableAssignment(property->private_field_name_var(), Token::INIT, + HoleCheckMode::kElided); + continue; } + Register value = register_allocator()->GrowRegisterList(&args); VisitForRegisterValue(property->value(), value); } @@ -1976,12 +1947,18 @@ void BytecodeGenerator::VisitInitializeClassFieldsStatement( ClassLiteral::Property* property = expr->fields()->at(i); if (property->is_computed_name()) { + DCHECK_EQ(property->kind(), ClassLiteral::Property::PUBLIC_FIELD); Variable* var = property->computed_name_var(); DCHECK_NOT_NULL(var); // The computed name is already evaluated and stored in a // variable at class definition time. BuildVariableLoad(var, HoleCheckMode::kElided); builder()->StoreAccumulatorInRegister(key); + } else if (property->kind() == ClassLiteral::Property::PRIVATE_FIELD) { + Variable* private_field_name_var = property->private_field_name_var(); + DCHECK_NOT_NULL(private_field_name_var); + BuildVariableLoad(private_field_name_var, HoleCheckMode::kElided); + builder()->StoreAccumulatorInRegister(key); } else { BuildLoadPropertyKey(property, key); } @@ -1989,7 +1966,11 @@ void BytecodeGenerator::VisitInitializeClassFieldsStatement( VisitForRegisterValue(property->value(), value); VisitSetHomeObject(value, constructor, property); - builder()->CallRuntime(Runtime::kCreateDataProperty, args); + Runtime::FunctionId function_id = + property->kind() == ClassLiteral::Property::PUBLIC_FIELD + ? Runtime::kCreateDataProperty + : Runtime::kAddPrivateField; + builder()->CallRuntime(function_id, args); } } @@ -2140,7 +2121,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { UNREACHABLE(); case ObjectLiteral::Property::MATERIALIZED_LITERAL: DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value())); - // Fall through. + V8_FALLTHROUGH; case ObjectLiteral::Property::COMPUTED: { // It is safe to use [[Put]] here because the boilerplate already // contains computed properties with an uninitialized value. @@ -2563,7 +2544,7 @@ void BytecodeGenerator::BuildAsyncReturn(int source_position) { BuildVariableLoad(var_promise, HoleCheckMode::kElided); builder() ->StoreAccumulatorInRegister(promise) - .CallJSRuntime(Context::PROMISE_RESOLVE_INDEX, args) + .CallRuntime(Runtime::kInlineResolvePromise, args) .LoadAccumulatorWithRegister(promise); } @@ -2863,32 +2844,33 @@ void BytecodeGenerator::VisitCompoundAssignment(CompoundAssignment* expr) { VisitAssignment(expr); } -// Suspends the generator to resume at |suspend_id|, with output stored in the -// accumulator. When the generator is resumed, the sent value is loaded in the -// accumulator. -void BytecodeGenerator::BuildSuspendPoint(int suspend_id) { +// Suspends the generator to resume at the next suspend_id, with output stored +// in the accumulator. When the generator is resumed, the sent value is loaded +// in the accumulator. +void BytecodeGenerator::BuildSuspendPoint(Expression* suspend_expr) { + const int suspend_id = suspend_count_++; + RegisterList registers = register_allocator()->AllLiveRegisters(); - // Save context, registers, and state. Then return. + // Save context, registers, and state. This bytecode then returns the value + // in the accumulator. + builder()->SetExpressionPosition(suspend_expr); builder()->SuspendGenerator(generator_object(), registers, suspend_id); - builder()->SetReturnPosition(kNoSourcePosition, info()->literal()); - builder()->Return(); // Hard return (ignore any finally blocks). - // Upon resume, we continue here. builder()->Bind(generator_jump_table_, suspend_id); - // Clobbers all registers, updating the state to indicate that we have - // finished resuming and setting the accumulator to the [[input_or_debug_pos]] - // slot of the generator object. - builder()->ResumeGenerator(generator_object(), generator_state_, registers); + // Clobbers all registers and sets the accumulator to the + // [[input_or_debug_pos]] slot of the generator object. + builder()->ResumeGenerator(generator_object(), registers); } void BytecodeGenerator::VisitYield(Yield* expr) { builder()->SetExpressionPosition(expr); VisitForAccumulatorValue(expr->expression()); - if (!expr->IsInitialYield()) { + // If this is not the first yield + if (suspend_count_ > 0) { if (IsAsyncGeneratorFunction(function_kind())) { // AsyncGenerator yields (with the exception of the initial yield) // delegate work to the AsyncGeneratorYield stub, which Awaits the operand @@ -2914,7 +2896,7 @@ void BytecodeGenerator::VisitYield(Yield* expr) { } } - BuildSuspendPoint(expr->suspend_id()); + BuildSuspendPoint(expr); // At this point, the generator has been resumed, with the received value in // the accumulator. @@ -3053,10 +3035,16 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) { // visible to the user, and we therefore neither pass the block coverage // builder nor the expression. // - // YieldStar in AsyncGenerator functions includes 3 suspend points, rather - // than 1. These are documented in the YieldStar AST node. + // In addition to the normal suspend for yield*, a yield* in an async + // generator has 2 additional suspends: + // - One for awaiting the iterator result of closing the generator when + // resumed with a "throw" completion, and a throw method is not + // present on the delegated iterator + // - One for awaiting the iterator result yielded by the delegated + // iterator + LoopBuilder loop(builder(), nullptr, nullptr); - VisitIterationHeader(expr->suspend_id(), expr->suspend_count(), &loop); + loop.LoopHeader(); { BytecodeLabels after_switch(zone()); @@ -3110,7 +3098,7 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) { // If there is no "throw" method, perform IteratorClose, and finally // throw a TypeError. no_throw_method.Bind(builder()); - BuildIteratorClose(iterator, expr->await_iterator_close_suspend_id()); + BuildIteratorClose(iterator, expr); builder()->CallRuntime(Runtime::kThrowThrowMethodMissing); } @@ -3119,7 +3107,7 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) { if (iterator_type == IteratorType::kAsync) { // Await the result of the method invocation. - BuildAwait(expr->await_delegated_iterator_output_suspend_id()); + BuildAwait(expr); } // Check that output is an object. @@ -3159,7 +3147,7 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) { .CallRuntime(Runtime::kInlineAsyncGeneratorYield, args); } - BuildSuspendPoint(expr->suspend_id()); + BuildSuspendPoint(expr); builder()->StoreAccumulatorInRegister(input); builder() ->CallRuntime(Runtime::kInlineGeneratorGetResumeMode, @@ -3195,7 +3183,7 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) { builder()->LoadAccumulatorWithRegister(output_value); } -void BytecodeGenerator::BuildAwait(int suspend_id) { +void BytecodeGenerator::BuildAwait(Expression* await_expr) { // Rather than HandlerTable::UNCAUGHT, async functions use // HandlerTable::ASYNC_AWAIT to communicate that top-level exceptions are // transformed into promise rejections. This is necessary to prevent emitting @@ -3208,22 +3196,20 @@ void BytecodeGenerator::BuildAwait(int suspend_id) { // Await(operand) and suspend. RegisterAllocationScope register_scope(this); - int await_builtin_context_index; + Runtime::FunctionId id; RegisterList args; if (IsAsyncGeneratorFunction(function_kind())) { - await_builtin_context_index = - catch_prediction() == HandlerTable::ASYNC_AWAIT - ? Context::ASYNC_GENERATOR_AWAIT_UNCAUGHT - : Context::ASYNC_GENERATOR_AWAIT_CAUGHT; + id = catch_prediction() == HandlerTable::ASYNC_AWAIT + ? Runtime::kInlineAsyncGeneratorAwaitUncaught + : Runtime::kInlineAsyncGeneratorAwaitCaught; args = register_allocator()->NewRegisterList(2); builder() ->MoveRegister(generator_object(), args[0]) .StoreAccumulatorInRegister(args[1]); } else { - await_builtin_context_index = - catch_prediction() == HandlerTable::ASYNC_AWAIT - ? Context::ASYNC_FUNCTION_AWAIT_UNCAUGHT_INDEX - : Context::ASYNC_FUNCTION_AWAIT_CAUGHT_INDEX; + id = catch_prediction() == HandlerTable::ASYNC_AWAIT + ? Runtime::kInlineAsyncFunctionAwaitUncaught + : Runtime::kInlineAsyncFunctionAwaitCaught; args = register_allocator()->NewRegisterList(3); builder() ->MoveRegister(generator_object(), args[0]) @@ -3236,10 +3222,10 @@ void BytecodeGenerator::BuildAwait(int suspend_id) { builder()->StoreAccumulatorInRegister(args[2]); } - builder()->CallJSRuntime(await_builtin_context_index, args); + builder()->CallRuntime(id, args); } - BuildSuspendPoint(suspend_id); + BuildSuspendPoint(await_expr); Register input = register_allocator()->NewRegister(); Register resume_mode = register_allocator()->NewRegister(); @@ -3267,7 +3253,7 @@ void BytecodeGenerator::BuildAwait(int suspend_id) { void BytecodeGenerator::VisitAwait(Await* expr) { builder()->SetExpressionPosition(expr); VisitForAccumulatorValue(expr->expression()); - BuildAwait(expr->suspend_id()); + BuildAwait(expr); BuildIncrementBlockCoverageCounterIfEnabled(expr, SourceRangeKind::kContinuation); } @@ -3914,7 +3900,8 @@ void BytecodeGenerator::VisitNaryOperation(NaryOperation* expr) { } } -void BytecodeGenerator::BuildLiteralCompareNil(Token::Value op, NilValue nil) { +void BytecodeGenerator::BuildLiteralCompareNil( + Token::Value op, BytecodeArrayBuilder::NilValue nil) { if (execution_result()->IsTest()) { TestResultScope* test_result = execution_result()->AsTest(); switch (test_result->fallthrough()) { @@ -3953,11 +3940,11 @@ void BytecodeGenerator::VisitCompareOperation(CompareOperation* expr) { } else if (expr->IsLiteralCompareUndefined(&sub_expr)) { VisitForAccumulatorValue(sub_expr); builder()->SetExpressionPosition(expr); - BuildLiteralCompareNil(expr->op(), kUndefinedValue); + BuildLiteralCompareNil(expr->op(), BytecodeArrayBuilder::kUndefinedValue); } else if (expr->IsLiteralCompareNull(&sub_expr)) { VisitForAccumulatorValue(sub_expr); builder()->SetExpressionPosition(expr); - BuildLiteralCompareNil(expr->op(), kNullValue); + BuildLiteralCompareNil(expr->op(), BytecodeArrayBuilder::kNullValue); } else { Register lhs = VisitForRegisterValue(expr->left()); VisitForAccumulatorValue(expr->right()); @@ -4154,7 +4141,7 @@ void BytecodeGenerator::BuildCallIteratorMethod(Register iterator, } void BytecodeGenerator::BuildIteratorClose(const IteratorRecord& iterator, - int suspend_id) { + Expression* expr) { RegisterAllocationScope register_scope(this); BytecodeLabels done(zone()); BytecodeLabel if_called; @@ -4165,8 +4152,8 @@ void BytecodeGenerator::BuildIteratorClose(const IteratorRecord& iterator, builder()->Bind(&if_called); if (iterator.type() == IteratorType::kAsync) { - DCHECK_GE(suspend_id, 0); - BuildAwait(suspend_id); + DCHECK_NOT_NULL(expr); + BuildAwait(expr); } builder()->JumpIfJSReceiver(done.New()); @@ -4190,7 +4177,8 @@ void BytecodeGenerator::VisitGetTemplateObject(GetTemplateObject* expr) { builder()->SetExpressionPosition(expr); size_t entry = builder()->AllocateDeferredConstantPoolEntry(); template_objects_.push_back(std::make_pair(expr, entry)); - builder()->GetTemplateObject(entry); + FeedbackSlot literal_slot = feedback_spec()->AddLiteralSlot(); + builder()->GetTemplateObject(entry, feedback_index(literal_slot)); } void BytecodeGenerator::VisitThisFunction(ThisFunction* expr) { diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h index f9de9550fe..c96e5e9e83 100644 --- a/deps/v8/src/interpreter/bytecode-generator.h +++ b/deps/v8/src/interpreter/bytecode-generator.h @@ -28,8 +28,9 @@ class BytecodeJumpTable; class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> { public: - explicit BytecodeGenerator(CompilationInfo* info, - const AstStringConstants* ast_string_constants); + explicit BytecodeGenerator( + CompilationInfo* info, const AstStringConstants* ast_string_constants, + ZoneVector<FunctionLiteral*>* eager_inner_literals); void GenerateBytecode(uintptr_t stack_limit); Handle<BytecodeArray> FinalizeBytecode(Isolate* isolate, @@ -126,7 +127,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> { void BuildVariableAssignment( Variable* variable, Token::Value op, HoleCheckMode hole_check_mode, LookupHoistingMode lookup_hoisting_mode = LookupHoistingMode::kNormal); - void BuildLiteralCompareNil(Token::Value compare_op, NilValue nil); + void BuildLiteralCompareNil(Token::Value compare_op, + BytecodeArrayBuilder::NilValue nil); void BuildReturn(int source_position = kNoSourcePosition); void BuildAsyncReturn(int source_position = kNoSourcePosition); void BuildAsyncGeneratorReturn(); @@ -146,9 +148,9 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> { void BuildNewLocalWithContext(Scope* scope); void BuildGeneratorPrologue(); - void BuildSuspendPoint(int suspend_id); + void BuildSuspendPoint(Expression* suspend_expr); - void BuildAwait(int suspend_id); + void BuildAwait(Expression* await_expr); void BuildGetIterator(Expression* iterable, IteratorType hint); @@ -164,7 +166,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> { IteratorRecord BuildGetIteratorRecord(Expression* iterable, IteratorType hint); void BuildIteratorNext(const IteratorRecord& iterator, Register next_result); - void BuildIteratorClose(const IteratorRecord& iterator, int suspend_id = -1); + void BuildIteratorClose(const IteratorRecord& iterator, + Expression* expr = nullptr); void BuildCallIteratorMethod(Register iterator, const AstRawString* method, RegisterList receiver_and_args, BytecodeLabel* if_called, @@ -212,11 +215,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> { BytecodeLabels* end_labels, int coverage_slot); - // Visit the header/body of a loop iteration. - void VisitIterationHeader(IterationStatement* stmt, - LoopBuilder* loop_builder); - void VisitIterationHeader(int first_suspend_id, int suspend_count, - LoopBuilder* loop_builder); + // Visit the body of a loop iteration. void VisitIterationBody(IterationStatement* stmt, LoopBuilder* loop_builder); // Visit a statement and switch scopes, the context is in the accumulator. @@ -263,6 +262,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> { Variable* variable); FeedbackSlot GetCachedCreateClosureSlot(FunctionLiteral* literal); + void AddToEagerLiteralsIfEager(FunctionLiteral* literal); + static constexpr ToBooleanMode ToBooleanModeFromTypeHint(TypeHint type_hint) { return type_hint == TypeHint::kBoolean ? ToBooleanMode::kAlreadyBoolean : ToBooleanMode::kConvertToBoolean; @@ -324,6 +325,9 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> { DeclarationScope* closure_scope_; Scope* current_scope_; + // External vector of literals to be eagerly compiled. + ZoneVector<FunctionLiteral*>* eager_inner_literals_; + FeedbackSlotCache* feedback_slot_cache_; GlobalDeclarationsBuilder* globals_builder_; @@ -344,7 +348,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> { Register incoming_new_target_or_generator_; BytecodeJumpTable* generator_jump_table_; - Register generator_state_; + int suspend_count_; int loop_depth_; HandlerTable::CatchPrediction catch_prediction_; diff --git a/deps/v8/src/interpreter/bytecodes.cc b/deps/v8/src/interpreter/bytecodes.cc index 61173a8341..88cdae6ce5 100644 --- a/deps/v8/src/interpreter/bytecodes.cc +++ b/deps/v8/src/interpreter/bytecodes.cc @@ -200,6 +200,17 @@ bool Bytecodes::IsRegisterOperandType(OperandType operand_type) { return false; } +// static +bool Bytecodes::IsRegisterListOperandType(OperandType operand_type) { + switch (operand_type) { + case OperandType::kRegList: + case OperandType::kRegOutList: + return true; + default: + return false; + } +} + bool Bytecodes::MakesCallAlongCriticalPath(Bytecode bytecode) { if (IsCallOrConstruct(bytecode) || IsCallRuntime(bytecode)) return true; switch (bytecode) { diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h index ce01566d52..293c0562e9 100644 --- a/deps/v8/src/interpreter/bytecodes.h +++ b/deps/v8/src/interpreter/bytecodes.h @@ -233,7 +233,8 @@ namespace interpreter { V(CreateEmptyObjectLiteral, AccumulatorUse::kWrite) \ \ /* Tagged templates */ \ - V(GetTemplateObject, AccumulatorUse::kWrite, OperandType::kIdx) \ + V(GetTemplateObject, AccumulatorUse::kWrite, OperandType::kIdx, \ + OperandType::kIdx) \ \ /* Closure allocation */ \ V(CreateClosure, AccumulatorUse::kWrite, OperandType::kIdx, \ @@ -314,11 +315,12 @@ namespace interpreter { V(ThrowSuperAlreadyCalledIfNotHole, AccumulatorUse::kRead) \ \ /* Generators */ \ - V(RestoreGeneratorState, AccumulatorUse::kWrite, OperandType::kReg) \ - V(SuspendGenerator, AccumulatorUse::kNone, OperandType::kReg, \ + V(SwitchOnGeneratorState, AccumulatorUse::kNone, OperandType::kReg, \ + OperandType::kIdx, OperandType::kUImm) \ + V(SuspendGenerator, AccumulatorUse::kRead, OperandType::kReg, \ OperandType::kRegList, OperandType::kRegCount, OperandType::kUImm) \ V(ResumeGenerator, AccumulatorUse::kWrite, OperandType::kReg, \ - OperandType::kRegOut, OperandType::kRegOutList, OperandType::kRegCount) \ + OperandType::kRegOutList, OperandType::kRegCount) \ \ /* Debugger */ \ V(Debugger, AccumulatorUse::kNone) \ @@ -432,6 +434,10 @@ namespace interpreter { JUMP_FORWARD_BYTECODE_LIST(V) \ V(JumpLoop) +#define RETURN_BYTECODE_LIST(V) \ + V(Return) \ + V(SuspendGenerator) + // Enumeration of interpreter bytecodes. enum class Bytecode : uint8_t { #define DECLARE_BYTECODE(Name, ...) k##Name, @@ -613,11 +619,6 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic { bytecode <= Bytecode::kJumpIfJSReceiver; } - // Returns true if the bytecode is a conditional jump, a jump, or a return. - static constexpr bool IsJumpOrReturn(Bytecode bytecode) { - return bytecode == Bytecode::kReturn || IsJump(bytecode); - } - // Return true if |bytecode| is a jump without effects, // e.g. any jump excluding those that include type coercion like // JumpIfTrueToBoolean. @@ -627,7 +628,8 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic { // Returns true if the bytecode is a switch. static constexpr bool IsSwitch(Bytecode bytecode) { - return bytecode == Bytecode::kSwitchOnSmiNoFeedback; + return bytecode == Bytecode::kSwitchOnSmiNoFeedback || + bytecode == Bytecode::kSwitchOnGeneratorState; } // Returns true if |bytecode| has no effects. These bytecodes only manipulate @@ -681,9 +683,16 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic { return true; } + // Returns true if the bytecode returns. + static constexpr bool Returns(Bytecode bytecode) { +#define OR_BYTECODE(NAME) || bytecode == Bytecode::k##NAME + return false RETURN_BYTECODE_LIST(OR_BYTECODE); +#undef OR_BYTECODE + } + // Returns the number of values which |bytecode| returns. static constexpr size_t ReturnCount(Bytecode bytecode) { - return bytecode == Bytecode::kReturn ? 1 : 0; + return Returns(bytecode) ? 1 : 0; } // Returns the number of operands expected by |bytecode|. @@ -812,6 +821,9 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic { // Returns true if |operand_type| represents a register used as an output. static bool IsRegisterOutputOperandType(OperandType operand_type); + // Returns true if |operand_type| represents a register list operand. + static bool IsRegisterListOperandType(OperandType operand_type); + // Returns true if the handler for |bytecode| should look ahead and inline a // dispatch to a Star bytecode. static bool IsStarLookahead(Bytecode bytecode, OperandScale operand_scale); diff --git a/deps/v8/src/interpreter/control-flow-builders.cc b/deps/v8/src/interpreter/control-flow-builders.cc index ea316f286f..bada935e4a 100644 --- a/deps/v8/src/interpreter/control-flow-builders.cc +++ b/deps/v8/src/interpreter/control-flow-builders.cc @@ -47,10 +47,6 @@ void BreakableControlFlowBuilder::EmitJumpIfNull(BytecodeLabels* sites) { LoopBuilder::~LoopBuilder() { DCHECK(continue_labels_.empty() || continue_labels_.is_bound()); - // Restore the parent jump table. - if (generator_jump_table_location_ != nullptr) { - *generator_jump_table_location_ = parent_generator_jump_table_; - } } void LoopBuilder::LoopHeader() { @@ -62,26 +58,6 @@ void LoopBuilder::LoopHeader() { builder()->Bind(&loop_header_); } -void LoopBuilder::LoopHeaderInGenerator( - BytecodeJumpTable** generator_jump_table, int first_resume_id, - int resume_count) { - // Bind all the resume points that are inside the loop to be at the loop - // header. - for (int id = first_resume_id; id < first_resume_id + resume_count; ++id) { - builder()->Bind(*generator_jump_table, id); - } - - // Create the loop header. - LoopHeader(); - - // Create a new jump table for after the loop header for only these - // resume points. - generator_jump_table_location_ = generator_jump_table; - parent_generator_jump_table_ = *generator_jump_table; - *generator_jump_table = - builder()->AllocateJumpTable(resume_count, first_resume_id); -} - void LoopBuilder::LoopBody() { if (block_coverage_builder_ != nullptr) { block_coverage_builder_->IncrementBlockCounter(block_coverage_body_slot_); diff --git a/deps/v8/src/interpreter/control-flow-builders.h b/deps/v8/src/interpreter/control-flow-builders.h index 4a81b1f205..405e81bc76 100644 --- a/deps/v8/src/interpreter/control-flow-builders.h +++ b/deps/v8/src/interpreter/control-flow-builders.h @@ -105,9 +105,7 @@ class V8_EXPORT_PRIVATE LoopBuilder final : public BreakableControlFlowBuilder { LoopBuilder(BytecodeArrayBuilder* builder, BlockCoverageBuilder* block_coverage_builder, AstNode* node) : BreakableControlFlowBuilder(builder, block_coverage_builder, node), - continue_labels_(builder->zone()), - generator_jump_table_location_(nullptr), - parent_generator_jump_table_(nullptr) { + continue_labels_(builder->zone()) { if (block_coverage_builder_ != nullptr) { set_needs_continuation_counter(); block_coverage_body_slot_ = @@ -118,8 +116,6 @@ class V8_EXPORT_PRIVATE LoopBuilder final : public BreakableControlFlowBuilder { ~LoopBuilder(); void LoopHeader(); - void LoopHeaderInGenerator(BytecodeJumpTable** parent_generator_jump_table, - int first_resume_id, int resume_count); void LoopBody(); void JumpToHeader(int loop_depth); void BindContinueTarget(); @@ -138,13 +134,6 @@ class V8_EXPORT_PRIVATE LoopBuilder final : public BreakableControlFlowBuilder { // jumps from checking the loop condition to the header for do-while loops. BytecodeLabels continue_labels_; - // While we're in the loop, we want to have a different jump table for - // generator switch statements. We restore it at the end of the loop. - // TODO(leszeks): Storing a pointer to the BytecodeGenerator's jump table - // field is ugly, figure out a better way to do this. - BytecodeJumpTable** generator_jump_table_location_; - BytecodeJumpTable* parent_generator_jump_table_; - int block_coverage_body_slot_; }; diff --git a/deps/v8/src/interpreter/handler-table-builder.cc b/deps/v8/src/interpreter/handler-table-builder.cc index 4b6c44b95d..93db1e969a 100644 --- a/deps/v8/src/interpreter/handler-table-builder.cc +++ b/deps/v8/src/interpreter/handler-table-builder.cc @@ -15,20 +15,20 @@ namespace interpreter { HandlerTableBuilder::HandlerTableBuilder(Zone* zone) : entries_(zone) {} -Handle<HandlerTable> HandlerTableBuilder::ToHandlerTable(Isolate* isolate) { +Handle<ByteArray> HandlerTableBuilder::ToHandlerTable(Isolate* isolate) { int handler_table_size = static_cast<int>(entries_.size()); - Handle<HandlerTable> table = - Handle<HandlerTable>::cast(isolate->factory()->NewFixedArray( - HandlerTable::LengthForRange(handler_table_size), TENURED)); + Handle<ByteArray> table_byte_array = isolate->factory()->NewByteArray( + HandlerTable::LengthForRange(handler_table_size), TENURED); + HandlerTable table(*table_byte_array); for (int i = 0; i < handler_table_size; ++i) { Entry& entry = entries_[i]; HandlerTable::CatchPrediction pred = entry.catch_prediction_; - table->SetRangeStart(i, static_cast<int>(entry.offset_start)); - table->SetRangeEnd(i, static_cast<int>(entry.offset_end)); - table->SetRangeHandler(i, static_cast<int>(entry.offset_target), pred); - table->SetRangeData(i, entry.context.index()); + table.SetRangeStart(i, static_cast<int>(entry.offset_start)); + table.SetRangeEnd(i, static_cast<int>(entry.offset_end)); + table.SetRangeHandler(i, static_cast<int>(entry.offset_target), pred); + table.SetRangeData(i, entry.context.index()); } - return table; + return table_byte_array; } diff --git a/deps/v8/src/interpreter/handler-table-builder.h b/deps/v8/src/interpreter/handler-table-builder.h index 381606f98b..021fefad29 100644 --- a/deps/v8/src/interpreter/handler-table-builder.h +++ b/deps/v8/src/interpreter/handler-table-builder.h @@ -27,7 +27,7 @@ class V8_EXPORT_PRIVATE HandlerTableBuilder final BASE_EMBEDDED { // Builds the actual handler table by copying the current values into a heap // object. Any further mutations to the builder won't be reflected. - Handle<HandlerTable> ToHandlerTable(Isolate* isolate); + Handle<ByteArray> ToHandlerTable(Isolate* isolate); // Creates a new handler table entry and returns a {hander_id} identifying the // entry, so that it can be referenced by below setter functions. diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc index 846b69281e..b2c4ba2309 100644 --- a/deps/v8/src/interpreter/interpreter-assembler.cc +++ b/deps/v8/src/interpreter/interpreter-assembler.cc @@ -48,6 +48,8 @@ InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state, made_call_(false), reloaded_frame_ptr_(false), bytecode_array_valid_(true), + speculation_poison_(FLAG_untrusted_code_mitigations ? SpeculationPoison() + : nullptr), disable_stack_check_across_call_(false), stack_pointer_before_call_(nullptr) { #ifdef V8_TRACE_IGNITION @@ -59,7 +61,7 @@ InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state, // Save the bytecode offset immediately if bytecode will make a call along the // critical path, or it is a return bytecode. if (Bytecodes::MakesCallAlongCriticalPath(bytecode) || - bytecode_ == Bytecode::kReturn) { + Bytecodes::Returns(bytecode)) { SaveBytecodeOffset(); } } @@ -72,6 +74,24 @@ InterpreterAssembler::~InterpreterAssembler() { UnregisterCallGenerationCallbacks(); } +Node* InterpreterAssembler::PoisonOnSpeculationTagged(Node* value) { + if (speculation_poison_ == nullptr) return value; + return BitcastWordToTagged( + WordAnd(speculation_poison_, BitcastTaggedToWord(value))); +} + +Node* InterpreterAssembler::PoisonOnSpeculationWord(Node* value) { + if (speculation_poison_ == nullptr) return value; + return WordAnd(speculation_poison_, value); +} + +Node* InterpreterAssembler::PoisonOnSpeculationInt32(Node* value) { + if (speculation_poison_ == nullptr) return value; + Node* truncated_speculation_poison = + Is64() ? TruncateInt64ToInt32(speculation_poison_) : speculation_poison_; + return Word32And(truncated_speculation_poison, value); +} + Node* InterpreterAssembler::GetInterpretedFramePointer() { if (!interpreted_frame_pointer_.IsBound()) { interpreted_frame_pointer_.Bind(LoadParentFramePointer()); @@ -139,7 +159,7 @@ Node* InterpreterAssembler::GetAccumulatorUnchecked() { Node* InterpreterAssembler::GetAccumulator() { DCHECK(Bytecodes::ReadsAccumulator(bytecode_)); accumulator_use_ = accumulator_use_ | AccumulatorUse::kRead; - return GetAccumulatorUnchecked(); + return PoisonOnSpeculationTagged(GetAccumulatorUnchecked()); } void InterpreterAssembler::SetAccumulator(Node* value) { @@ -222,22 +242,27 @@ void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth(Node* context, } Node* InterpreterAssembler::RegisterLocation(Node* reg_index) { - return IntPtrAdd(GetInterpretedFramePointer(), - RegisterFrameOffset(reg_index)); + return PoisonOnSpeculationWord( + IntPtrAdd(GetInterpretedFramePointer(), RegisterFrameOffset(reg_index))); +} + +Node* InterpreterAssembler::RegisterLocation(Register reg) { + return RegisterLocation(IntPtrConstant(reg.ToOperand())); } Node* InterpreterAssembler::RegisterFrameOffset(Node* index) { return TimesPointerSize(index); } -Node* InterpreterAssembler::LoadRegister(Register reg) { - return Load(MachineType::AnyTagged(), GetInterpretedFramePointer(), - IntPtrConstant(reg.ToOperand() << kPointerSizeLog2)); +Node* InterpreterAssembler::LoadRegister(Node* reg_index) { + Node* value = Load(MachineType::AnyTagged(), GetInterpretedFramePointer(), + RegisterFrameOffset(reg_index)); + return PoisonOnSpeculationTagged(value); } -Node* InterpreterAssembler::LoadRegister(Node* reg_index) { +Node* InterpreterAssembler::LoadRegister(Register reg) { return Load(MachineType::AnyTagged(), GetInterpretedFramePointer(), - RegisterFrameOffset(reg_index)); + IntPtrConstant(reg.ToOperand() << kPointerSizeLog2)); } Node* InterpreterAssembler::LoadAndUntagRegister(Register reg) { @@ -245,22 +270,92 @@ Node* InterpreterAssembler::LoadAndUntagRegister(Register reg) { << kPointerSizeLog2); } -Node* InterpreterAssembler::StoreRegister(Node* value, Register reg) { - return StoreNoWriteBarrier( +Node* InterpreterAssembler::LoadRegisterAtOperandIndex(int operand_index) { + return LoadRegister(BytecodeOperandRegUnpoisoned(operand_index)); +} + +std::pair<Node*, Node*> InterpreterAssembler::LoadRegisterPairAtOperandIndex( + int operand_index) { + DCHECK_EQ(OperandType::kRegPair, + Bytecodes::GetOperandType(bytecode_, operand_index)); + Node* first_reg_index = BytecodeOperandRegUnpoisoned(operand_index); + Node* second_reg_index = NextRegister(first_reg_index); + return std::make_pair(LoadRegister(first_reg_index), + LoadRegister(second_reg_index)); +} + +InterpreterAssembler::RegListNodePair +InterpreterAssembler::GetRegisterListAtOperandIndex(int operand_index) { + DCHECK(Bytecodes::IsRegisterListOperandType( + Bytecodes::GetOperandType(bytecode_, operand_index))); + DCHECK_EQ(OperandType::kRegCount, + Bytecodes::GetOperandType(bytecode_, operand_index + 1)); + Node* base_reg = + RegisterLocation(BytecodeOperandRegUnpoisoned(operand_index)); + Node* reg_count = BytecodeOperandCount(operand_index + 1); + return RegListNodePair(base_reg, reg_count); +} + +Node* InterpreterAssembler::LoadRegisterFromRegisterList( + const RegListNodePair& reg_list, int index) { + Node* location = RegisterLocationInRegisterList(reg_list, index); + // Location is already poisoned on speculation, so no need to poison here. + return Load(MachineType::AnyTagged(), location); +} + +Node* InterpreterAssembler::RegisterLocationInRegisterList( + const RegListNodePair& reg_list, int index) { + CSA_ASSERT(this, + Uint32GreaterThan(reg_list.reg_count(), Int32Constant(index))); + Node* offset = RegisterFrameOffset(IntPtrConstant(index)); + // Register indexes are negative, so subtract index from base location to get + // location. + return IntPtrSub(reg_list.base_reg_location(), offset); +} + +void InterpreterAssembler::StoreRegister(Node* value, Register reg) { + StoreNoWriteBarrier( MachineRepresentation::kTagged, GetInterpretedFramePointer(), IntPtrConstant(reg.ToOperand() << kPointerSizeLog2), value); } -Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) { - return StoreNoWriteBarrier(MachineRepresentation::kTagged, - GetInterpretedFramePointer(), - RegisterFrameOffset(reg_index), value); +void InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) { + StoreNoWriteBarrier(MachineRepresentation::kTagged, + GetInterpretedFramePointer(), + RegisterFrameOffset(reg_index), value); } -Node* InterpreterAssembler::StoreAndTagRegister(compiler::Node* value, - Register reg) { +void InterpreterAssembler::StoreAndTagRegister(Node* value, Register reg) { int offset = reg.ToOperand() << kPointerSizeLog2; - return StoreAndTagSmi(GetInterpretedFramePointer(), offset, value); + StoreAndTagSmi(GetInterpretedFramePointer(), offset, value); +} + +void InterpreterAssembler::StoreRegisterAtOperandIndex(Node* value, + int operand_index) { + StoreRegister(value, BytecodeOperandRegUnpoisoned(operand_index)); +} + +void InterpreterAssembler::StoreRegisterPairAtOperandIndex(Node* value1, + Node* value2, + int operand_index) { + DCHECK_EQ(OperandType::kRegOutPair, + Bytecodes::GetOperandType(bytecode_, operand_index)); + Node* first_reg_index = BytecodeOperandRegUnpoisoned(operand_index); + StoreRegister(value1, first_reg_index); + Node* second_reg_index = NextRegister(first_reg_index); + StoreRegister(value2, second_reg_index); +} + +void InterpreterAssembler::StoreRegisterTripleAtOperandIndex( + Node* value1, Node* value2, Node* value3, int operand_index) { + DCHECK_EQ(OperandType::kRegOutTriple, + Bytecodes::GetOperandType(bytecode_, operand_index)); + Node* first_reg_index = BytecodeOperandRegUnpoisoned(operand_index); + StoreRegister(value1, first_reg_index); + Node* second_reg_index = NextRegister(first_reg_index); + StoreRegister(value2, second_reg_index); + Node* third_reg_index = NextRegister(second_reg_index); + StoreRegister(value3, third_reg_index); } Node* InterpreterAssembler::NextRegister(Node* reg_index) { @@ -273,7 +368,8 @@ Node* InterpreterAssembler::OperandOffset(int operand_index) { Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale())); } -Node* InterpreterAssembler::BytecodeOperandUnsignedByte(int operand_index) { +Node* InterpreterAssembler::BytecodeOperandUnsignedByteUnpoisoned( + int operand_index) { DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_)); DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize( bytecode_, operand_index, operand_scale())); @@ -282,7 +378,8 @@ Node* InterpreterAssembler::BytecodeOperandUnsignedByte(int operand_index) { IntPtrAdd(BytecodeOffset(), operand_offset)); } -Node* InterpreterAssembler::BytecodeOperandSignedByte(int operand_index) { +Node* InterpreterAssembler::BytecodeOperandSignedByteUnpoisoned( + int operand_index) { DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_)); DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize( bytecode_, operand_index, operand_scale())); @@ -291,7 +388,7 @@ Node* InterpreterAssembler::BytecodeOperandSignedByte(int operand_index) { IntPtrAdd(BytecodeOffset(), operand_offset)); } -compiler::Node* InterpreterAssembler::BytecodeOperandReadUnaligned( +Node* InterpreterAssembler::BytecodeOperandReadUnalignedUnpoisoned( int relative_offset, MachineType result_type) { static const int kMaxCount = 4; DCHECK(!TargetSupportsUnalignedAccess()); @@ -324,7 +421,7 @@ compiler::Node* InterpreterAssembler::BytecodeOperandReadUnaligned( // Read the most signicant bytecode into bytes[0] and then in order // down to least significant in bytes[count - 1]. DCHECK_LE(count, kMaxCount); - compiler::Node* bytes[kMaxCount]; + Node* bytes[kMaxCount]; for (int i = 0; i < count; i++) { MachineType machine_type = (i == 0) ? msb_type : MachineType::Uint8(); Node* offset = IntPtrConstant(relative_offset + msb_offset + i * kStep); @@ -342,7 +439,8 @@ compiler::Node* InterpreterAssembler::BytecodeOperandReadUnaligned( return result; } -Node* InterpreterAssembler::BytecodeOperandUnsignedShort(int operand_index) { +Node* InterpreterAssembler::BytecodeOperandUnsignedShortUnpoisoned( + int operand_index) { DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_)); DCHECK_EQ( OperandSize::kShort, @@ -353,11 +451,13 @@ Node* InterpreterAssembler::BytecodeOperandUnsignedShort(int operand_index) { return Load(MachineType::Uint16(), BytecodeArrayTaggedPointer(), IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset))); } else { - return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint16()); + return BytecodeOperandReadUnalignedUnpoisoned(operand_offset, + MachineType::Uint16()); } } -Node* InterpreterAssembler::BytecodeOperandSignedShort(int operand_index) { +Node* InterpreterAssembler::BytecodeOperandSignedShortUnpoisoned( + int operand_index) { DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_)); DCHECK_EQ( OperandSize::kShort, @@ -368,11 +468,13 @@ Node* InterpreterAssembler::BytecodeOperandSignedShort(int operand_index) { return Load(MachineType::Int16(), BytecodeArrayTaggedPointer(), IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset))); } else { - return BytecodeOperandReadUnaligned(operand_offset, MachineType::Int16()); + return BytecodeOperandReadUnalignedUnpoisoned(operand_offset, + MachineType::Int16()); } } -Node* InterpreterAssembler::BytecodeOperandUnsignedQuad(int operand_index) { +Node* InterpreterAssembler::BytecodeOperandUnsignedQuadUnpoisoned( + int operand_index) { DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_)); DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize( bytecode_, operand_index, operand_scale())); @@ -382,11 +484,13 @@ Node* InterpreterAssembler::BytecodeOperandUnsignedQuad(int operand_index) { return Load(MachineType::Uint32(), BytecodeArrayTaggedPointer(), IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset))); } else { - return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint32()); + return BytecodeOperandReadUnalignedUnpoisoned(operand_offset, + MachineType::Uint32()); } } -Node* InterpreterAssembler::BytecodeOperandSignedQuad(int operand_index) { +Node* InterpreterAssembler::BytecodeOperandSignedQuadUnpoisoned( + int operand_index) { DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_)); DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize( bytecode_, operand_index, operand_scale())); @@ -396,44 +500,57 @@ Node* InterpreterAssembler::BytecodeOperandSignedQuad(int operand_index) { return Load(MachineType::Int32(), BytecodeArrayTaggedPointer(), IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset))); } else { - return BytecodeOperandReadUnaligned(operand_offset, MachineType::Int32()); + return BytecodeOperandReadUnalignedUnpoisoned(operand_offset, + MachineType::Int32()); } } -Node* InterpreterAssembler::BytecodeSignedOperand(int operand_index, - OperandSize operand_size) { +Node* InterpreterAssembler::BytecodeSignedOperandUnpoisoned( + int operand_index, OperandSize operand_size) { DCHECK(!Bytecodes::IsUnsignedOperandType( Bytecodes::GetOperandType(bytecode_, operand_index))); switch (operand_size) { case OperandSize::kByte: - return BytecodeOperandSignedByte(operand_index); + return BytecodeOperandSignedByteUnpoisoned(operand_index); case OperandSize::kShort: - return BytecodeOperandSignedShort(operand_index); + return BytecodeOperandSignedShortUnpoisoned(operand_index); case OperandSize::kQuad: - return BytecodeOperandSignedQuad(operand_index); + return BytecodeOperandSignedQuadUnpoisoned(operand_index); case OperandSize::kNone: UNREACHABLE(); } return nullptr; } -Node* InterpreterAssembler::BytecodeUnsignedOperand(int operand_index, - OperandSize operand_size) { +Node* InterpreterAssembler::BytecodeUnsignedOperandUnpoisoned( + int operand_index, OperandSize operand_size) { DCHECK(Bytecodes::IsUnsignedOperandType( Bytecodes::GetOperandType(bytecode_, operand_index))); switch (operand_size) { case OperandSize::kByte: - return BytecodeOperandUnsignedByte(operand_index); + return BytecodeOperandUnsignedByteUnpoisoned(operand_index); case OperandSize::kShort: - return BytecodeOperandUnsignedShort(operand_index); + return BytecodeOperandUnsignedShortUnpoisoned(operand_index); case OperandSize::kQuad: - return BytecodeOperandUnsignedQuad(operand_index); + return BytecodeOperandUnsignedQuadUnpoisoned(operand_index); case OperandSize::kNone: UNREACHABLE(); } return nullptr; } +Node* InterpreterAssembler::BytecodeSignedOperand(int operand_index, + OperandSize operand_size) { + return PoisonOnSpeculationInt32( + BytecodeSignedOperandUnpoisoned(operand_index, operand_size)); +} + +Node* InterpreterAssembler::BytecodeUnsignedOperand(int operand_index, + OperandSize operand_size) { + return PoisonOnSpeculationInt32( + BytecodeUnsignedOperandUnpoisoned(operand_index, operand_size)); +} + Node* InterpreterAssembler::BytecodeOperandCount(int operand_index) { DCHECK_EQ(OperandType::kRegCount, Bytecodes::GetOperandType(bytecode_, operand_index)); @@ -464,7 +581,7 @@ Node* InterpreterAssembler::BytecodeOperandUImmWord(int operand_index) { } Node* InterpreterAssembler::BytecodeOperandUImmSmi(int operand_index) { - return SmiFromWord32(BytecodeOperandUImm(operand_index)); + return SmiFromInt32(BytecodeOperandUImm(operand_index)); } Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) { @@ -480,7 +597,7 @@ Node* InterpreterAssembler::BytecodeOperandImmIntPtr(int operand_index) { } Node* InterpreterAssembler::BytecodeOperandImmSmi(int operand_index) { - return SmiFromWord32(BytecodeOperandImm(operand_index)); + return SmiFromInt32(BytecodeOperandImm(operand_index)); } Node* InterpreterAssembler::BytecodeOperandIdxInt32(int operand_index) { @@ -499,13 +616,23 @@ Node* InterpreterAssembler::BytecodeOperandIdxSmi(int operand_index) { return SmiTag(BytecodeOperandIdx(operand_index)); } -Node* InterpreterAssembler::BytecodeOperandReg(int operand_index) { +Node* InterpreterAssembler::BytecodeOperandConstantPoolIdxUnpoisoned( + int operand_index) { + DCHECK_EQ(OperandType::kIdx, + Bytecodes::GetOperandType(bytecode_, operand_index)); + OperandSize operand_size = + Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()); + return ChangeUint32ToWord( + BytecodeUnsignedOperand(operand_index, operand_size)); +} + +Node* InterpreterAssembler::BytecodeOperandRegUnpoisoned(int operand_index) { DCHECK(Bytecodes::IsRegisterOperandType( Bytecodes::GetOperandType(bytecode_, operand_index))); OperandSize operand_size = Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()); return ChangeInt32ToIntPtr( - BytecodeSignedOperand(operand_index, operand_size)); + BytecodeSignedOperandUnpoisoned(operand_index, operand_size)); } Node* InterpreterAssembler::BytecodeOperandRuntimeId(int operand_index) { @@ -539,18 +666,27 @@ Node* InterpreterAssembler::BytecodeOperandIntrinsicId(int operand_index) { Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) { Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(), BytecodeArray::kConstantPoolOffset); - return LoadFixedArrayElement(constant_pool, index); + return PoisonOnSpeculationTagged(LoadFixedArrayElement(constant_pool, index)); } Node* InterpreterAssembler::LoadAndUntagConstantPoolEntry(Node* index) { return SmiUntag(LoadConstantPoolEntry(index)); } +Node* InterpreterAssembler::LoadConstantPoolEntryAtOperandIndex( + int operand_index) { + Node* index = BytecodeOperandConstantPoolIdxUnpoisoned(operand_index); + return LoadConstantPoolEntry(index); +} + +Node* InterpreterAssembler::LoadAndUntagConstantPoolEntryAtOperandIndex( + int operand_index) { + return SmiUntag(LoadConstantPoolEntryAtOperandIndex(operand_index)); +} + Node* InterpreterAssembler::LoadFeedbackVector() { Node* function = LoadRegister(Register::function_closure()); - Node* cell = LoadObjectField(function, JSFunction::kFeedbackVectorOffset); - Node* vector = LoadObjectField(cell, Cell::kValueOffset); - return vector; + return CodeStubAssembler::LoadFeedbackVector(function); } void InterpreterAssembler::CallPrologue() { @@ -586,11 +722,11 @@ void InterpreterAssembler::IncrementCallCount(Node* feedback_vector, Comment("increment call count"); Node* call_count = LoadFeedbackVectorSlot(feedback_vector, slot_id, kPointerSize); - // The lowest {CallICNexus::CallCountField::kShift} bits of the call + // The lowest {FeedbackNexus::CallCountField::kShift} bits of the call // count are used as flags. To increment the call count by 1 we hence - // have to increment by 1 << {CallICNexus::CallCountField::kShift}. - Node* new_count = - SmiAdd(call_count, SmiConstant(1 << CallICNexus::CallCountField::kShift)); + // have to increment by 1 << {FeedbackNexus::CallCountField::kShift}. + Node* new_count = SmiAdd( + call_count, SmiConstant(1 << FeedbackNexus::CallCountField::kShift)); // Count is Smi, so we don't need a write barrier. StoreFeedbackVectorSlot(feedback_vector, slot_id, new_count, SKIP_WRITE_BARRIER, kPointerSize); @@ -707,18 +843,30 @@ void InterpreterAssembler::CollectCallFeedback(Node* target, Node* context, } void InterpreterAssembler::CallJSAndDispatch( - Node* function, Node* context, Node* first_arg, Node* arg_count, + Node* function, Node* context, const RegListNodePair& args, ConvertReceiverMode receiver_mode) { DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_)); DCHECK(Bytecodes::IsCallOrConstruct(bytecode_) || bytecode_ == Bytecode::kInvokeIntrinsic); DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode); + + Node* args_count; + if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { + // The receiver is implied, so it is not in the argument list. + args_count = args.reg_count(); + } else { + // Subtract the receiver from the argument count. + Node* receiver_count = Int32Constant(1); + args_count = Int32Sub(args.reg_count(), receiver_count); + } + Callable callable = CodeFactory::InterpreterPushArgsThenCall( isolate(), receiver_mode, InterpreterPushArgsMode::kOther); Node* code_target = HeapConstant(callable.code()); TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context, - arg_count, first_arg, function); + args_count, args.base_reg_location(), + function); // TailCallStubThenDispatch updates accumulator with result. accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite; } @@ -764,8 +912,8 @@ template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch( ConvertReceiverMode receiver_mode, Node*, Node*, Node*); void InterpreterAssembler::CallJSWithSpreadAndDispatch( - Node* function, Node* context, Node* first_arg, Node* arg_count, - Node* slot_id, Node* feedback_vector) { + Node* function, Node* context, const RegListNodePair& args, Node* slot_id, + Node* feedback_vector) { DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_)); DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), ConvertReceiverMode::kAny); CollectCallFeedback(function, context, feedback_vector, slot_id); @@ -775,16 +923,19 @@ void InterpreterAssembler::CallJSWithSpreadAndDispatch( InterpreterPushArgsMode::kWithFinalSpread); Node* code_target = HeapConstant(callable.code()); + Node* receiver_count = Int32Constant(1); + Node* args_count = Int32Sub(args.reg_count(), receiver_count); TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context, - arg_count, first_arg, function); + args_count, args.base_reg_location(), + function); // TailCallStubThenDispatch updates accumulator with result. accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite; } Node* InterpreterAssembler::Construct(Node* target, Node* context, - Node* new_target, Node* first_arg, - Node* arg_count, Node* slot_id, - Node* feedback_vector) { + Node* new_target, + const RegListNodePair& args, + Node* slot_id, Node* feedback_vector) { DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_)); VARIABLE(var_result, MachineRepresentation::kTagged); VARIABLE(var_site, MachineRepresentation::kTagged); @@ -937,8 +1088,8 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context, isolate(), InterpreterPushArgsMode::kJSFunction); Node* code_target = HeapConstant(callable.code()); var_result.Bind(CallStub(callable.descriptor(), code_target, context, - arg_count, new_target, target, var_site.value(), - first_arg)); + args.reg_count(), new_target, target, + var_site.value(), args.base_reg_location())); Goto(&return_result); } @@ -950,8 +1101,8 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context, isolate(), InterpreterPushArgsMode::kOther); Node* code_target = HeapConstant(callable.code()); var_result.Bind(CallStub(callable.descriptor(), code_target, context, - arg_count, new_target, target, UndefinedConstant(), - first_arg)); + args.reg_count(), new_target, target, + UndefinedConstant(), args.base_reg_location())); Goto(&return_result); } @@ -961,8 +1112,8 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context, Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context, Node* new_target, - Node* first_arg, - Node* arg_count, Node* slot_id, + const RegListNodePair& args, + Node* slot_id, Node* feedback_vector) { // TODO(bmeurer): Unify this with the Construct bytecode feedback // above once we have a way to pass the AllocationSite to the Array @@ -1075,12 +1226,13 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context, Callable callable = CodeFactory::InterpreterPushArgsThenConstruct( isolate(), InterpreterPushArgsMode::kWithFinalSpread); Node* code_target = HeapConstant(callable.code()); - return CallStub(callable.descriptor(), code_target, context, arg_count, - new_target, target, UndefinedConstant(), first_arg); + return CallStub(callable.descriptor(), code_target, context, args.reg_count(), + new_target, target, UndefinedConstant(), + args.base_reg_location()); } Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context, - Node* first_arg, Node* arg_count, + const RegListNodePair& args, int result_size) { DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_)); DCHECK(Bytecodes::IsCallRuntime(bytecode_)); @@ -1099,7 +1251,7 @@ Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context, IntPtrConstant(offsetof(Runtime::Function, entry))); return CallStubR(callable.descriptor(), result_size, code_target, context, - arg_count, first_arg, function_entry); + args.reg_count(), args.base_reg_location(), function_entry); } void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) { @@ -1132,7 +1284,7 @@ void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) { BIND(&interrupt_check); { CallRuntime(Runtime::kInterrupt, GetContext()); - new_budget.Bind(Int32Constant(Interpreter::kInterruptBudget)); + new_budget.Bind(Int32Constant(Interpreter::InterruptBudget())); Goto(&ok); } @@ -1169,7 +1321,7 @@ Node* InterpreterAssembler::Advance(Node* delta, bool backward) { Node* InterpreterAssembler::Jump(Node* delta, bool backward) { DCHECK(!Bytecodes::IsStarLookahead(bytecode_, operand_scale_)); - UpdateInterruptBudget(TruncateWordToWord32(delta), backward); + UpdateInterruptBudget(TruncateIntPtrToInt32(delta), backward); Node* new_bytecode_offset = Advance(delta, backward); Node* target_bytecode = LoadBytecode(new_bytecode_offset); return DispatchToBytecode(target_bytecode, new_bytecode_offset); @@ -1200,7 +1352,7 @@ void InterpreterAssembler::JumpIfWordNotEqual(Node* lhs, Node* rhs, JumpConditional(WordNotEqual(lhs, rhs), delta); } -Node* InterpreterAssembler::LoadBytecode(compiler::Node* bytecode_offset) { +Node* InterpreterAssembler::LoadBytecode(Node* bytecode_offset) { Node* bytecode = Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(), bytecode_offset); return ChangeUint32ToWord(bytecode); @@ -1236,7 +1388,7 @@ void InterpreterAssembler::InlineStar() { #ifdef V8_TRACE_IGNITION TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry); #endif - StoreRegister(GetAccumulator(), BytecodeOperandReg(0)); + StoreRegister(GetAccumulator(), BytecodeOperandRegUnpoisoned(0)); DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_)); @@ -1267,24 +1419,29 @@ Node* InterpreterAssembler::DispatchToBytecode(Node* target_bytecode, Load(MachineType::Pointer(), DispatchTableRawPointer(), TimesPointerSize(target_bytecode)); - return DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset); + return DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset, + target_bytecode); } Node* InterpreterAssembler::DispatchToBytecodeHandler(Node* handler, - Node* bytecode_offset) { + Node* bytecode_offset, + Node* target_bytecode) { // TODO(ishell): Add CSA::CodeEntryPoint(code). Node* handler_entry = IntPtrAdd(BitcastTaggedToWord(handler), IntPtrConstant(Code::kHeaderSize - kHeapObjectTag)); - return DispatchToBytecodeHandlerEntry(handler_entry, bytecode_offset); + return DispatchToBytecodeHandlerEntry(handler_entry, bytecode_offset, + target_bytecode); } Node* InterpreterAssembler::DispatchToBytecodeHandlerEntry( - Node* handler_entry, Node* bytecode_offset) { + Node* handler_entry, Node* bytecode_offset, Node* target_bytecode) { InterpreterDispatchDescriptor descriptor(isolate()); + // Propagate speculation poisoning. + Node* poisoned_handler_entry = PoisonOnSpeculationWord(handler_entry); return TailCallBytecodeDispatch( - descriptor, handler_entry, GetAccumulatorUnchecked(), bytecode_offset, - BytecodeArrayTaggedPointer(), DispatchTableRawPointer()); + descriptor, poisoned_handler_entry, GetAccumulatorUnchecked(), + bytecode_offset, BytecodeArrayTaggedPointer(), DispatchTableRawPointer()); } void InterpreterAssembler::DispatchWide(OperandScale operand_scale) { @@ -1319,7 +1476,8 @@ void InterpreterAssembler::DispatchWide(OperandScale operand_scale) { Load(MachineType::Pointer(), DispatchTableRawPointer(), TimesPointerSize(target_index)); - DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset); + DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset, + next_bytecode); } void InterpreterAssembler::UpdateInterruptBudgetOnReturn() { @@ -1342,7 +1500,7 @@ void InterpreterAssembler::UpdateInterruptBudgetOnReturn() { // of the first bytecode. const int kFirstBytecodeOffset = BytecodeArray::kHeaderSize - kHeapObjectTag; - Node* profiling_weight = Int32Sub(TruncateWordToWord32(BytecodeOffset()), + Node* profiling_weight = Int32Sub(TruncateIntPtrToInt32(BytecodeOffset()), Int32Constant(kFirstBytecodeOffset)); UpdateInterruptBudget(profiling_weight, true); } @@ -1451,9 +1609,12 @@ void InterpreterAssembler::AbortIfRegisterCountInvalid(Node* register_file, BIND(&ok); } -Node* InterpreterAssembler::ExportRegisterFile(Node* array, - Node* register_count) { +Node* InterpreterAssembler::ExportRegisterFile( + Node* array, const RegListNodePair& registers) { + Node* register_count = ChangeUint32ToWord(registers.reg_count()); if (FLAG_debug_code) { + CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(), + RegisterLocation(Register(0)))); AbortIfRegisterCountInvalid(array, register_count); } @@ -1483,9 +1644,12 @@ Node* InterpreterAssembler::ExportRegisterFile(Node* array, return array; } -Node* InterpreterAssembler::ImportRegisterFile(Node* array, - Node* register_count) { +Node* InterpreterAssembler::ImportRegisterFile( + Node* array, const RegListNodePair& registers) { + Node* register_count = ChangeUint32ToWord(registers.reg_count()); if (FLAG_debug_code) { + CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(), + RegisterLocation(Register(0)))); AbortIfRegisterCountInvalid(array, register_count); } @@ -1587,8 +1751,7 @@ void InterpreterAssembler::DeserializeLazyAndDispatch() { Node* target_handler = CallRuntime(Runtime::kInterpreterDeserializeLazy, context, SmiTag(bytecode), SmiConstant(operand_scale())); - - DispatchToBytecodeHandler(target_handler, bytecode_offset); + DispatchToBytecodeHandler(target_handler, bytecode_offset, bytecode); } } // namespace interpreter diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h index 63d1709145..cb622d0b2d 100644 --- a/deps/v8/src/interpreter/interpreter-assembler.h +++ b/deps/v8/src/interpreter/interpreter-assembler.h @@ -56,9 +56,6 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler { // Returns the smi immediate for bytecode operand |operand_index| in the // current bytecode. compiler::Node* BytecodeOperandImmSmi(int operand_index); - // Returns the word-size sign-extended register index for bytecode operand - // |operand_index| in the current bytecode. - compiler::Node* BytecodeOperandReg(int operand_index); // Returns the 32-bit unsigned runtime id immediate for bytecode operand // |operand_index| in the current bytecode. compiler::Node* BytecodeOperandRuntimeId(int operand_index); @@ -86,31 +83,58 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler { void GotoIfHasContextExtensionUpToDepth(compiler::Node* context, compiler::Node* depth, Label* target); + // A RegListNodePair provides an abstraction over lists of registers. + class RegListNodePair { + public: + RegListNodePair(Node* base_reg_location, Node* reg_count) + : base_reg_location_(base_reg_location), reg_count_(reg_count) {} + + compiler::Node* reg_count() const { return reg_count_; } + compiler::Node* base_reg_location() const { return base_reg_location_; } + + private: + compiler::Node* base_reg_location_; + compiler::Node* reg_count_; + }; + // Backup/restore register file to/from a fixed array of the correct length. compiler::Node* ExportRegisterFile(compiler::Node* array, - compiler::Node* register_count); + const RegListNodePair& registers); compiler::Node* ImportRegisterFile(compiler::Node* array, - compiler::Node* register_count); + const RegListNodePair& registers); // Loads from and stores to the interpreter register file. compiler::Node* LoadRegister(Register reg); - compiler::Node* LoadRegister(compiler::Node* reg_index); compiler::Node* LoadAndUntagRegister(Register reg); - compiler::Node* StoreRegister(compiler::Node* value, Register reg); - compiler::Node* StoreRegister(compiler::Node* value, - compiler::Node* reg_index); - compiler::Node* StoreAndTagRegister(compiler::Node* value, Register reg); - - // Returns the next consecutive register. - compiler::Node* NextRegister(compiler::Node* reg_index); - - // Returns the location in memory of the register |reg_index| in the - // interpreter register file. - compiler::Node* RegisterLocation(compiler::Node* reg_index); - + compiler::Node* LoadRegisterAtOperandIndex(int operand_index); + std::pair<compiler::Node*, compiler::Node*> LoadRegisterPairAtOperandIndex( + int operand_index); + void StoreRegister(compiler::Node* value, Register reg); + void StoreAndTagRegister(compiler::Node* value, Register reg); + void StoreRegisterAtOperandIndex(compiler::Node* value, int operand_index); + void StoreRegisterPairAtOperandIndex(compiler::Node* value1, + compiler::Node* value2, + int operand_index); + void StoreRegisterTripleAtOperandIndex(compiler::Node* value1, + compiler::Node* value2, + compiler::Node* value3, + int operand_index); + + RegListNodePair GetRegisterListAtOperandIndex(int operand_index); + Node* LoadRegisterFromRegisterList(const RegListNodePair& reg_list, + int index); + Node* RegisterLocationInRegisterList(const RegListNodePair& reg_list, + int index); + + // Load constant at the index specified in operand |operand_index| from the + // constant pool. + compiler::Node* LoadConstantPoolEntryAtOperandIndex(int operand_index); + // Load and untag constant at the index specified in operand |operand_index| + // from the constant pool. + compiler::Node* LoadAndUntagConstantPoolEntryAtOperandIndex( + int operand_index); // Load constant at |index| in the constant pool. compiler::Node* LoadConstantPoolEntry(compiler::Node* index); - // Load and untag constant at |index| in the constant pool. compiler::Node* LoadAndUntagConstantPoolEntry(compiler::Node* index); @@ -135,12 +159,11 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler { compiler::Node* feedback_vector, compiler::Node* slot_id); - // Call JSFunction or Callable |function| with |arg_count| arguments (not - // including receiver) and the first argument located at |first_arg|, possibly + // Call JSFunction or Callable |function| with |args| arguments, possibly // including the receiver depending on |receiver_mode|. After the call returns // directly dispatches to the next bytecode. void CallJSAndDispatch(compiler::Node* function, compiler::Node* context, - compiler::Node* first_arg, compiler::Node* arg_count, + const RegListNodePair& args, ConvertReceiverMode receiver_mode); // Call JSFunction or Callable |function| with |arg_count| arguments (not @@ -151,46 +174,41 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler { void CallJSAndDispatch(Node* function, Node* context, Node* arg_count, ConvertReceiverMode receiver_mode, TArgs... args); - // Call JSFunction or Callable |function| with |arg_count| - // arguments (not including receiver) and the first argument - // located at |first_arg|, and the final argument being spread. After the call - // returns directly dispatches to the next bytecode. + // Call JSFunction or Callable |function| with |args| + // arguments (not including receiver), and the final argument being spread. + // After the call returns directly dispatches to the next bytecode. void CallJSWithSpreadAndDispatch(compiler::Node* function, compiler::Node* context, - compiler::Node* first_arg, - compiler::Node* arg_count, + const RegListNodePair& args, compiler::Node* slot_id, compiler::Node* feedback_vector); - // Call constructor |target| with |arg_count| arguments (not - // including receiver) and the first argument located at - // |first_arg|. The |new_target| is the same as the - // |target| for the new keyword, but differs for the super - // keyword. + // Call constructor |target| with |args| arguments (not including receiver). + // The |new_target| is the same as the |target| for the new keyword, but + // differs for the super keyword. compiler::Node* Construct(compiler::Node* target, compiler::Node* context, compiler::Node* new_target, - compiler::Node* first_arg, - compiler::Node* arg_count, compiler::Node* slot_id, + const RegListNodePair& args, + compiler::Node* slot_id, compiler::Node* feedback_vector); - // Call constructor |target| with |arg_count| arguments (not including - // receiver) and the first argument located at |first_arg|. The last argument - // is always a spread. The |new_target| is the same as the |target| for - // the new keyword, but differs for the super keyword. + // Call constructor |target| with |args| arguments (not including + // receiver). The last argument is always a spread. The |new_target| is the + // same as the |target| for the new keyword, but differs for the super + // keyword. compiler::Node* ConstructWithSpread(compiler::Node* target, compiler::Node* context, compiler::Node* new_target, - compiler::Node* first_arg, - compiler::Node* arg_count, + const RegListNodePair& args, compiler::Node* slot_id, compiler::Node* feedback_vector); - // Call runtime function with |arg_count| arguments and the first argument - // located at |first_arg|. + // Call runtime function with |args| arguments which will return |return_size| + // number of values. compiler::Node* CallRuntimeN(compiler::Node* function_id, compiler::Node* context, - compiler::Node* first_arg, - compiler::Node* arg_count, int return_size = 1); + const RegListNodePair& args, + int return_size = 1); // Jump forward relative to the current bytecode by the |jump_offset|. compiler::Node* Jump(compiler::Node* jump_offset); @@ -217,14 +235,14 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler { // Dispatch to the bytecode. compiler::Node* Dispatch(); - // Dispatch to bytecode handler. - compiler::Node* DispatchToBytecodeHandler(compiler::Node* handler) { - return DispatchToBytecodeHandler(handler, BytecodeOffset()); - } - // Dispatch bytecode as wide operand variant. void DispatchWide(OperandScale operand_scale); + // Dispatch to |target_bytecode| at |new_bytecode_offset|. + // |target_bytecode| should be equivalent to loading from the offset. + compiler::Node* DispatchToBytecode(compiler::Node* target_bytecode, + compiler::Node* new_bytecode_offset); + // Abort with the given abort reason. void Abort(AbortReason abort_reason); void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs, @@ -264,6 +282,18 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler { // interpreted. compiler::Node* GetInterpretedFramePointer(); + // Operations on registers. + compiler::Node* RegisterLocation(Register reg); + compiler::Node* RegisterLocation(compiler::Node* reg_index); + compiler::Node* NextRegister(compiler::Node* reg_index); + compiler::Node* LoadRegister(Node* reg_index); + void StoreRegister(compiler::Node* value, compiler::Node* reg_index); + + // Poison |value| on speculative paths. + compiler::Node* PoisonOnSpeculationTagged(Node* value); + compiler::Node* PoisonOnSpeculationWord(Node* value); + compiler::Node* PoisonOnSpeculationInt32(Node* value); + // Saves and restores interpreter bytecode offset to the interpreter stack // frame when performing a call. void CallPrologue(); @@ -291,16 +321,21 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler { // The |result_type| determines the size and signedness. of the // value read. This method should only be used on architectures that // do not support unaligned memory accesses. - compiler::Node* BytecodeOperandReadUnaligned(int relative_offset, - MachineType result_type); - - // Returns zero- or sign-extended to word32 value of the operand. - compiler::Node* BytecodeOperandUnsignedByte(int operand_index); - compiler::Node* BytecodeOperandSignedByte(int operand_index); - compiler::Node* BytecodeOperandUnsignedShort(int operand_index); - compiler::Node* BytecodeOperandSignedShort(int operand_index); - compiler::Node* BytecodeOperandUnsignedQuad(int operand_index); - compiler::Node* BytecodeOperandSignedQuad(int operand_index); + compiler::Node* BytecodeOperandReadUnalignedUnpoisoned( + int relative_offset, MachineType result_type); + + // Returns zero- or sign-extended to word32 value of the operand. Values are + // not poisoned on speculation - should be used with care. + compiler::Node* BytecodeOperandUnsignedByteUnpoisoned(int operand_index); + compiler::Node* BytecodeOperandSignedByteUnpoisoned(int operand_index); + compiler::Node* BytecodeOperandUnsignedShortUnpoisoned(int operand_index); + compiler::Node* BytecodeOperandSignedShortUnpoisoned(int operand_index); + compiler::Node* BytecodeOperandUnsignedQuadUnpoisoned(int operand_index); + compiler::Node* BytecodeOperandSignedQuadUnpoisoned(int operand_index); + compiler::Node* BytecodeSignedOperandUnpoisoned(int operand_index, + OperandSize operand_size); + compiler::Node* BytecodeUnsignedOperandUnpoisoned(int operand_index, + OperandSize operand_size); // Returns zero- or sign-extended to word32 value of the operand of // given size. @@ -309,6 +344,15 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler { compiler::Node* BytecodeUnsignedOperand(int operand_index, OperandSize operand_size); + // Returns the word-size sign-extended register index for bytecode operand + // |operand_index| in the current bytecode. Value is not poisoned on + // speculation since the value loaded from the register is poisoned instead. + compiler::Node* BytecodeOperandRegUnpoisoned(int operand_index); + + // Returns the word zero-extended index immediate for bytecode operand + // |operand_index| in the current bytecode for use when loading a . + compiler::Node* BytecodeOperandConstantPoolIdxUnpoisoned(int operand_index); + // Jump relative to the current bytecode by the |jump_offset|. If |backward|, // then jump backward (subtract the offset), otherwise jump forward (add the // offset). Helper function for Jump and JumpBackward. @@ -344,18 +388,15 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler { // next dispatch offset. void InlineStar(); - // Dispatch to |target_bytecode| at |new_bytecode_offset|. - // |target_bytecode| should be equivalent to loading from the offset. - compiler::Node* DispatchToBytecode(compiler::Node* target_bytecode, - compiler::Node* new_bytecode_offset); - // Dispatch to the bytecode handler with code offset |handler|. compiler::Node* DispatchToBytecodeHandler(compiler::Node* handler, - compiler::Node* bytecode_offset); + compiler::Node* bytecode_offset, + compiler::Node* target_bytecode); // Dispatch to the bytecode handler with code entry point |handler_entry|. compiler::Node* DispatchToBytecodeHandlerEntry( - compiler::Node* handler_entry, compiler::Node* bytecode_offset); + compiler::Node* handler_entry, compiler::Node* bytecode_offset, + compiler::Node* target_bytecode); int CurrentBytecodeSize() const; @@ -373,6 +414,8 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler { bool reloaded_frame_ptr_; bool bytecode_array_valid_; + Node* speculation_poison_; + bool disable_stack_check_across_call_; compiler::Node* stack_pointer_before_call_; diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc index 5dabc13ea0..65af249ea7 100644 --- a/deps/v8/src/interpreter/interpreter-generator.cc +++ b/deps/v8/src/interpreter/interpreter-generator.cc @@ -73,8 +73,7 @@ IGNITION_HANDLER(LdaSmi, InterpreterAssembler) { // // Load constant literal at |idx| in the constant pool into the accumulator. IGNITION_HANDLER(LdaConstant, InterpreterAssembler) { - Node* index = BytecodeOperandIdx(0); - Node* constant = LoadConstantPoolEntry(index); + Node* constant = LoadConstantPoolEntryAtOperandIndex(0); SetAccumulator(constant); Dispatch(); } @@ -123,8 +122,7 @@ IGNITION_HANDLER(LdaFalse, InterpreterAssembler) { // // Load accumulator with value from register <src>. IGNITION_HANDLER(Ldar, InterpreterAssembler) { - Node* reg_index = BytecodeOperandReg(0); - Node* value = LoadRegister(reg_index); + Node* value = LoadRegisterAtOperandIndex(0); SetAccumulator(value); Dispatch(); } @@ -133,9 +131,8 @@ IGNITION_HANDLER(Ldar, InterpreterAssembler) { // // Store accumulator to register <dst>. IGNITION_HANDLER(Star, InterpreterAssembler) { - Node* reg_index = BytecodeOperandReg(0); Node* accumulator = GetAccumulator(); - StoreRegister(accumulator, reg_index); + StoreRegisterAtOperandIndex(accumulator, 0); Dispatch(); } @@ -143,10 +140,8 @@ IGNITION_HANDLER(Star, InterpreterAssembler) { // // Stores the value of register <src> to register <dst>. IGNITION_HANDLER(Mov, InterpreterAssembler) { - Node* src_index = BytecodeOperandReg(0); - Node* src_value = LoadRegister(src_index); - Node* dst_index = BytecodeOperandReg(1); - StoreRegister(src_value, dst_index); + Node* src_value = LoadRegisterAtOperandIndex(0); + StoreRegisterAtOperandIndex(src_value, 1); Dispatch(); } @@ -169,8 +164,7 @@ class InterpreterLoadGlobalAssembler : public InterpreterAssembler { LazyNode<Context> lazy_context = [=] { return CAST(GetContext()); }; LazyNode<Name> lazy_name = [=] { - Node* name_index = BytecodeOperandIdx(name_operand_index); - Node* name = LoadConstantPoolEntry(name_index); + Node* name = LoadConstantPoolEntryAtOperandIndex(name_operand_index); return CAST(name); }; @@ -214,8 +208,7 @@ IGNITION_HANDLER(StaGlobal, InterpreterAssembler) { Node* context = GetContext(); // Store the global via the StoreGlobalIC. - Node* constant_index = BytecodeOperandIdx(0); - Node* name = LoadConstantPoolEntry(constant_index); + Node* name = LoadConstantPoolEntryAtOperandIndex(0); Node* value = GetAccumulator(); Node* raw_slot = BytecodeOperandIdx(1); Node* smi_slot = SmiTag(raw_slot); @@ -230,8 +223,7 @@ IGNITION_HANDLER(StaGlobal, InterpreterAssembler) { // Load the object in |slot_index| of the context at |depth| in the context // chain starting at |context| into the accumulator. IGNITION_HANDLER(LdaContextSlot, InterpreterAssembler) { - Node* reg_index = BytecodeOperandReg(0); - Node* context = LoadRegister(reg_index); + Node* context = LoadRegisterAtOperandIndex(0); Node* slot_index = BytecodeOperandIdx(1); Node* depth = BytecodeOperandUImm(2); Node* slot_context = GetContextAtDepth(context, depth); @@ -245,8 +237,7 @@ IGNITION_HANDLER(LdaContextSlot, InterpreterAssembler) { // Load the object in |slot_index| of the context at |depth| in the context // chain starting at |context| into the accumulator. IGNITION_HANDLER(LdaImmutableContextSlot, InterpreterAssembler) { - Node* reg_index = BytecodeOperandReg(0); - Node* context = LoadRegister(reg_index); + Node* context = LoadRegisterAtOperandIndex(0); Node* slot_index = BytecodeOperandIdx(1); Node* depth = BytecodeOperandUImm(2); Node* slot_context = GetContextAtDepth(context, depth); @@ -283,8 +274,7 @@ IGNITION_HANDLER(LdaImmutableCurrentContextSlot, InterpreterAssembler) { // |depth| in the context chain starting at |context|. IGNITION_HANDLER(StaContextSlot, InterpreterAssembler) { Node* value = GetAccumulator(); - Node* reg_index = BytecodeOperandReg(0); - Node* context = LoadRegister(reg_index); + Node* context = LoadRegisterAtOperandIndex(0); Node* slot_index = BytecodeOperandIdx(1); Node* depth = BytecodeOperandUImm(2); Node* slot_context = GetContextAtDepth(context, depth); @@ -309,8 +299,7 @@ IGNITION_HANDLER(StaCurrentContextSlot, InterpreterAssembler) { // Lookup the object with the name in constant pool entry |name_index| // dynamically. IGNITION_HANDLER(LdaLookupSlot, InterpreterAssembler) { - Node* name_index = BytecodeOperandIdx(0); - Node* name = LoadConstantPoolEntry(name_index); + Node* name = LoadConstantPoolEntryAtOperandIndex(0); Node* context = GetContext(); Node* result = CallRuntime(Runtime::kLoadLookupSlot, context, name); SetAccumulator(result); @@ -322,8 +311,7 @@ IGNITION_HANDLER(LdaLookupSlot, InterpreterAssembler) { // Lookup the object with the name in constant pool entry |name_index| // dynamically without causing a NoReferenceError. IGNITION_HANDLER(LdaLookupSlotInsideTypeof, InterpreterAssembler) { - Node* name_index = BytecodeOperandIdx(0); - Node* name = LoadConstantPoolEntry(name_index); + Node* name = LoadConstantPoolEntryAtOperandIndex(0); Node* context = GetContext(); Node* result = CallRuntime(Runtime::kLoadLookupSlotInsideTypeof, context, name); @@ -340,7 +328,6 @@ class InterpreterLookupContextSlotAssembler : public InterpreterAssembler { void LookupContextSlot(Runtime::FunctionId function_id) { Node* context = GetContext(); - Node* name_index = BytecodeOperandIdx(0); Node* slot_index = BytecodeOperandIdx(1); Node* depth = BytecodeOperandUImm(2); @@ -360,7 +347,7 @@ class InterpreterLookupContextSlotAssembler : public InterpreterAssembler { // Slow path when we have to call out to the runtime. BIND(&slowpath); { - Node* name = LoadConstantPoolEntry(name_index); + Node* name = LoadConstantPoolEntryAtOperandIndex(0); Node* result = CallRuntime(function_id, context, name); SetAccumulator(result); Dispatch(); @@ -416,8 +403,7 @@ class InterpreterLookupGlobalAssembler : public InterpreterLoadGlobalAssembler { // Slow path when we have to call out to the runtime BIND(&slowpath); { - Node* name_index = BytecodeOperandIdx(0); - Node* name = LoadConstantPoolEntry(name_index); + Node* name = LoadConstantPoolEntryAtOperandIndex(0); Node* result = CallRuntime(function_id, context, name); SetAccumulator(result); Dispatch(); @@ -448,9 +434,8 @@ IGNITION_HANDLER(LdaLookupGlobalSlotInsideTypeof, // pool entry |name_index|. IGNITION_HANDLER(StaLookupSlot, InterpreterAssembler) { Node* value = GetAccumulator(); - Node* index = BytecodeOperandIdx(0); + Node* name = LoadConstantPoolEntryAtOperandIndex(0); Node* bytecode_flags = BytecodeOperandFlag(1); - Node* name = LoadConstantPoolEntry(index); Node* context = GetContext(); Variable var_result(this, MachineRepresentation::kTagged); @@ -510,14 +495,11 @@ IGNITION_HANDLER(LdaNamedProperty, InterpreterAssembler) { Node* smi_slot = SmiTag(feedback_slot); // Load receiver. - Node* register_index = BytecodeOperandReg(0); - Node* recv = LoadRegister(register_index); + Node* recv = LoadRegisterAtOperandIndex(0); // Load the name. // TODO(jgruber): Not needed for monomorphic smi handler constant/field case. - Node* constant_index = BytecodeOperandIdx(1); - Node* name = LoadConstantPoolEntry(constant_index); - + Node* name = LoadConstantPoolEntryAtOperandIndex(1); Node* context = GetContext(); Label done(this); @@ -543,8 +525,7 @@ IGNITION_HANDLER(LdaNamedProperty, InterpreterAssembler) { IGNITION_HANDLER(LdaKeyedProperty, InterpreterAssembler) { Callable ic = Builtins::CallableFor(isolate(), Builtins::kKeyedLoadIC); Node* code_target = HeapConstant(ic.code()); - Node* reg_index = BytecodeOperandReg(0); - Node* object = LoadRegister(reg_index); + Node* object = LoadRegisterAtOperandIndex(0); Node* name = GetAccumulator(); Node* raw_slot = BytecodeOperandIdx(1); Node* smi_slot = SmiTag(raw_slot); @@ -565,10 +546,8 @@ class InterpreterStoreNamedPropertyAssembler : public InterpreterAssembler { void StaNamedProperty(Callable ic) { Node* code_target = HeapConstant(ic.code()); - Node* object_reg_index = BytecodeOperandReg(0); - Node* object = LoadRegister(object_reg_index); - Node* constant_index = BytecodeOperandIdx(1); - Node* name = LoadConstantPoolEntry(constant_index); + Node* object = LoadRegisterAtOperandIndex(0); + Node* name = LoadConstantPoolEntryAtOperandIndex(1); Node* value = GetAccumulator(); Node* raw_slot = BytecodeOperandIdx(2); Node* smi_slot = SmiTag(raw_slot); @@ -611,10 +590,8 @@ IGNITION_HANDLER(StaNamedOwnProperty, InterpreterStoreNamedPropertyAssembler) { IGNITION_HANDLER(StaKeyedProperty, InterpreterAssembler) { Callable ic = Builtins::CallableFor(isolate(), Builtins::kKeyedStoreIC); Node* code_target = HeapConstant(ic.code()); - Node* object_reg_index = BytecodeOperandReg(0); - Node* object = LoadRegister(object_reg_index); - Node* name_reg_index = BytecodeOperandReg(1); - Node* name = LoadRegister(name_reg_index); + Node* object = LoadRegisterAtOperandIndex(0); + Node* name = LoadRegisterAtOperandIndex(1); Node* value = GetAccumulator(); Node* raw_slot = BytecodeOperandIdx(2); Node* smi_slot = SmiTag(raw_slot); @@ -638,10 +615,10 @@ IGNITION_HANDLER(StaKeyedProperty, InterpreterAssembler) { // This definition is not observable and is used only for definitions // in object or class literals. IGNITION_HANDLER(StaDataPropertyInLiteral, InterpreterAssembler) { - Node* object = LoadRegister(BytecodeOperandReg(0)); - Node* name = LoadRegister(BytecodeOperandReg(1)); + Node* object = LoadRegisterAtOperandIndex(0); + Node* name = LoadRegisterAtOperandIndex(1); Node* value = GetAccumulator(); - Node* flags = SmiFromWord32(BytecodeOperandFlag(2)); + Node* flags = SmiFromInt32(BytecodeOperandFlag(2)); Node* vector_index = SmiTag(BytecodeOperandIdx(3)); Node* feedback_vector = LoadFeedbackVector(); @@ -749,10 +726,9 @@ IGNITION_HANDLER(StaModuleVariable, InterpreterAssembler) { // Saves the current context in <context>, and pushes the accumulator as the // new current context. IGNITION_HANDLER(PushContext, InterpreterAssembler) { - Node* reg_index = BytecodeOperandReg(0); Node* new_context = GetAccumulator(); Node* old_context = GetContext(); - StoreRegister(old_context, reg_index); + StoreRegisterAtOperandIndex(old_context, 0); SetContext(new_context); Dispatch(); } @@ -761,8 +737,7 @@ IGNITION_HANDLER(PushContext, InterpreterAssembler) { // // Pops the current context and sets <context> as the new context. IGNITION_HANDLER(PopContext, InterpreterAssembler) { - Node* reg_index = BytecodeOperandReg(0); - Node* context = LoadRegister(reg_index); + Node* context = LoadRegisterAtOperandIndex(0); SetContext(context); Dispatch(); } @@ -780,8 +755,7 @@ class InterpreterBinaryOpAssembler : public InterpreterAssembler { bool lhs_is_smi); void BinaryOpWithFeedback(BinaryOpGenerator generator) { - Node* reg_index = BytecodeOperandReg(0); - Node* lhs = LoadRegister(reg_index); + Node* lhs = LoadRegisterAtOperandIndex(0); Node* rhs = GetAccumulator(); Node* context = GetContext(); Node* slot_index = BytecodeOperandIdx(1); @@ -902,8 +876,7 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler { : InterpreterAssembler(state, bytecode, operand_scale) {} void BitwiseBinaryOpWithFeedback(Operation bitwise_op) { - Node* reg_index = BytecodeOperandReg(0); - Node* left = LoadRegister(reg_index); + Node* left = LoadRegisterAtOperandIndex(0); Node* right = GetAccumulator(); Node* context = GetContext(); Node* slot_index = BytecodeOperandIdx(1); @@ -969,7 +942,7 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler { &var_left_bigint, &var_left_feedback); BIND(&do_smi_op); Node* result = - BitwiseOp(var_left_word32.value(), SmiToWord32(right), bitwise_op); + BitwiseOp(var_left_word32.value(), SmiToInt32(right), bitwise_op); Node* result_type = SelectSmiConstant(TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall, BinaryOperationFeedback::kNumber); @@ -1279,7 +1252,7 @@ IGNITION_HANDLER(ToName, InterpreterAssembler) { Node* object = GetAccumulator(); Node* context = GetContext(); Node* result = ToName(context, object); - StoreRegister(result, BytecodeOperandReg(0)); + StoreRegisterAtOperandIndex(result, 0); Dispatch(); } @@ -1306,7 +1279,7 @@ IGNITION_HANDLER(ToObject, InterpreterAssembler) { Node* accumulator = GetAccumulator(); Node* context = GetContext(); Node* result = CallStub(callable.descriptor(), target, context, accumulator); - StoreRegister(result, BytecodeOperandReg(0)); + StoreRegisterAtOperandIndex(result, 0); Dispatch(); } @@ -1449,8 +1422,7 @@ IGNITION_HANDLER(TypeOf, InterpreterAssembler) { // Delete the property specified in the accumulator from the object // referenced by the register operand following strict mode semantics. IGNITION_HANDLER(DeletePropertyStrict, InterpreterAssembler) { - Node* reg_index = BytecodeOperandReg(0); - Node* object = LoadRegister(reg_index); + Node* object = LoadRegisterAtOperandIndex(0); Node* key = GetAccumulator(); Node* context = GetContext(); Node* result = CallBuiltin(Builtins::kDeleteProperty, context, object, key, @@ -1464,8 +1436,7 @@ IGNITION_HANDLER(DeletePropertyStrict, InterpreterAssembler) { // Delete the property specified in the accumulator from the object // referenced by the register operand following sloppy mode semantics. IGNITION_HANDLER(DeletePropertySloppy, InterpreterAssembler) { - Node* reg_index = BytecodeOperandReg(0); - Node* object = LoadRegister(reg_index); + Node* object = LoadRegisterAtOperandIndex(0); Node* key = GetAccumulator(); Node* context = GetContext(); Node* result = CallBuiltin(Builtins::kDeleteProperty, context, object, key, @@ -1482,8 +1453,7 @@ IGNITION_HANDLER(GetSuperConstructor, InterpreterAssembler) { Node* active_function = GetAccumulator(); Node* context = GetContext(); Node* result = GetSuperConstructor(active_function, context); - Node* reg = BytecodeOperandReg(0); - StoreRegister(result, reg); + StoreRegisterAtOperandIndex(result, 0); Dispatch(); } @@ -1495,20 +1465,8 @@ class InterpreterJSCallAssembler : public InterpreterAssembler { // Generates code to perform a JS call that collects type feedback. void JSCall(ConvertReceiverMode receiver_mode) { - Node* function_reg = BytecodeOperandReg(0); - Node* function = LoadRegister(function_reg); - Node* first_arg_reg = BytecodeOperandReg(1); - Node* first_arg = RegisterLocation(first_arg_reg); - Node* arg_list_count = BytecodeOperandCount(2); - Node* args_count; - if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { - // The receiver is implied, so it is not in the argument list. - args_count = arg_list_count; - } else { - // Subtract the receiver from the argument count. - Node* receiver_count = Int32Constant(1); - args_count = Int32Sub(arg_list_count, receiver_count); - } + Node* function = LoadRegisterAtOperandIndex(0); + RegListNodePair args = GetRegisterListAtOperandIndex(1); Node* slot_id = BytecodeOperandIdx(3); Node* feedback_vector = LoadFeedbackVector(); Node* context = GetContext(); @@ -1517,7 +1475,7 @@ class InterpreterJSCallAssembler : public InterpreterAssembler { CollectCallFeedback(function, context, feedback_vector, slot_id); // Call the function and dispatch to the next handler. - CallJSAndDispatch(function, context, first_arg, args_count, receiver_mode); + CallJSAndDispatch(function, context, args, receiver_mode); } // Generates code to perform a JS call with a known number of arguments that @@ -1531,8 +1489,7 @@ class InterpreterJSCallAssembler : public InterpreterAssembler { const int kSlotOperandIndex = kFirstArgumentOperandIndex + kRecieverAndArgOperandCount; - Node* function_reg = BytecodeOperandReg(0); - Node* function = LoadRegister(function_reg); + Node* function = LoadRegisterAtOperandIndex(0); Node* slot_id = BytecodeOperandIdx(kSlotOperandIndex); Node* feedback_vector = LoadFeedbackVector(); Node* context = GetContext(); @@ -1548,20 +1505,20 @@ class InterpreterJSCallAssembler : public InterpreterAssembler { case 1: CallJSAndDispatch( function, context, Int32Constant(arg_count), receiver_mode, - LoadRegister(BytecodeOperandReg(kFirstArgumentOperandIndex))); + LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex)); break; case 2: CallJSAndDispatch( function, context, Int32Constant(arg_count), receiver_mode, - LoadRegister(BytecodeOperandReg(kFirstArgumentOperandIndex)), - LoadRegister(BytecodeOperandReg(kFirstArgumentOperandIndex + 1))); + LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex), + LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1)); break; case 3: CallJSAndDispatch( function, context, Int32Constant(arg_count), receiver_mode, - LoadRegister(BytecodeOperandReg(kFirstArgumentOperandIndex)), - LoadRegister(BytecodeOperandReg(kFirstArgumentOperandIndex + 1)), - LoadRegister(BytecodeOperandReg(kFirstArgumentOperandIndex + 2))); + LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex), + LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1), + LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 2)); break; default: UNREACHABLE(); @@ -1617,11 +1574,9 @@ IGNITION_HANDLER(CallUndefinedReceiver2, InterpreterJSCallAssembler) { // registers. IGNITION_HANDLER(CallRuntime, InterpreterAssembler) { Node* function_id = BytecodeOperandRuntimeId(0); - Node* first_arg_reg = BytecodeOperandReg(1); - Node* first_arg = RegisterLocation(first_arg_reg); - Node* args_count = BytecodeOperandCount(2); + RegListNodePair args = GetRegisterListAtOperandIndex(1); Node* context = GetContext(); - Node* result = CallRuntimeN(function_id, context, first_arg, args_count); + Node* result = CallRuntimeN(function_id, context, args); SetAccumulator(result); Dispatch(); } @@ -1633,11 +1588,9 @@ IGNITION_HANDLER(CallRuntime, InterpreterAssembler) { // arguments in subsequent registers. IGNITION_HANDLER(InvokeIntrinsic, InterpreterAssembler) { Node* function_id = BytecodeOperandIntrinsicId(0); - Node* first_arg_reg = BytecodeOperandReg(1); - Node* arg_count = BytecodeOperandCount(2); + RegListNodePair args = GetRegisterListAtOperandIndex(1); Node* context = GetContext(); - Node* result = GenerateInvokeIntrinsic(this, function_id, context, - first_arg_reg, arg_count); + Node* result = GenerateInvokeIntrinsic(this, function_id, context, args); SetAccumulator(result); Dispatch(); } @@ -1651,19 +1604,13 @@ IGNITION_HANDLER(InvokeIntrinsic, InterpreterAssembler) { IGNITION_HANDLER(CallRuntimeForPair, InterpreterAssembler) { // Call the runtime function. Node* function_id = BytecodeOperandRuntimeId(0); - Node* first_arg_reg = BytecodeOperandReg(1); - Node* first_arg = RegisterLocation(first_arg_reg); - Node* args_count = BytecodeOperandCount(2); + RegListNodePair args = GetRegisterListAtOperandIndex(1); Node* context = GetContext(); - Node* result_pair = - CallRuntimeN(function_id, context, first_arg, args_count, 2); + Node* result_pair = CallRuntimeN(function_id, context, args, 2); // Store the results in <first_return> and <first_return + 1> - Node* first_return_reg = BytecodeOperandReg(3); - Node* second_return_reg = NextRegister(first_return_reg); Node* result0 = Projection(0, result_pair); Node* result1 = Projection(1, result_pair); - StoreRegister(result0, first_return_reg); - StoreRegister(result1, second_return_reg); + StoreRegisterPairAtOperandIndex(result0, result1, 3); Dispatch(); } @@ -1673,9 +1620,7 @@ IGNITION_HANDLER(CallRuntimeForPair, InterpreterAssembler) { // in register |receiver| and |arg_count| arguments in subsequent registers. IGNITION_HANDLER(CallJSRuntime, InterpreterAssembler) { Node* context_index = BytecodeOperandNativeContextIndex(0); - Node* receiver_reg = BytecodeOperandReg(1); - Node* first_arg = RegisterLocation(receiver_reg); - Node* args_count = BytecodeOperandCount(2); + RegListNodePair args = GetRegisterListAtOperandIndex(1); // Get the function to call from the native context. Node* context = GetContext(); @@ -1683,7 +1628,7 @@ IGNITION_HANDLER(CallJSRuntime, InterpreterAssembler) { Node* function = LoadContextElement(native_context, context_index); // Call the function. - CallJSAndDispatch(function, context, first_arg, args_count, + CallJSAndDispatch(function, context, args, ConvertReceiverMode::kNullOrUndefined); } @@ -1694,20 +1639,15 @@ IGNITION_HANDLER(CallJSRuntime, InterpreterAssembler) { // final argument is always a spread. // IGNITION_HANDLER(CallWithSpread, InterpreterAssembler) { - Node* callable_reg = BytecodeOperandReg(0); - Node* callable = LoadRegister(callable_reg); - Node* receiver_reg = BytecodeOperandReg(1); - Node* receiver_arg = RegisterLocation(receiver_reg); - Node* receiver_args_count = BytecodeOperandCount(2); - Node* receiver_count = Int32Constant(1); - Node* args_count = Int32Sub(receiver_args_count, receiver_count); + Node* callable = LoadRegisterAtOperandIndex(0); + RegListNodePair args = GetRegisterListAtOperandIndex(1); Node* slot_id = BytecodeOperandIdx(3); Node* feedback_vector = LoadFeedbackVector(); Node* context = GetContext(); // Call into Runtime function CallWithSpread which does everything. - CallJSWithSpreadAndDispatch(callable, context, receiver_arg, args_count, - slot_id, feedback_vector); + CallJSWithSpreadAndDispatch(callable, context, args, slot_id, + feedback_vector); } // ConstructWithSpread <first_arg> <arg_count> @@ -1718,17 +1658,13 @@ IGNITION_HANDLER(CallWithSpread, InterpreterAssembler) { // IGNITION_HANDLER(ConstructWithSpread, InterpreterAssembler) { Node* new_target = GetAccumulator(); - Node* constructor_reg = BytecodeOperandReg(0); - Node* constructor = LoadRegister(constructor_reg); - Node* first_arg_reg = BytecodeOperandReg(1); - Node* first_arg = RegisterLocation(first_arg_reg); - Node* args_count = BytecodeOperandCount(2); + Node* constructor = LoadRegisterAtOperandIndex(0); + RegListNodePair args = GetRegisterListAtOperandIndex(1); Node* slot_id = BytecodeOperandIdx(3); Node* feedback_vector = LoadFeedbackVector(); Node* context = GetContext(); - Node* result = - ConstructWithSpread(constructor, context, new_target, first_arg, - args_count, slot_id, feedback_vector); + Node* result = ConstructWithSpread(constructor, context, new_target, args, + slot_id, feedback_vector); SetAccumulator(result); Dispatch(); } @@ -1741,16 +1677,13 @@ IGNITION_HANDLER(ConstructWithSpread, InterpreterAssembler) { // IGNITION_HANDLER(Construct, InterpreterAssembler) { Node* new_target = GetAccumulator(); - Node* constructor_reg = BytecodeOperandReg(0); - Node* constructor = LoadRegister(constructor_reg); - Node* first_arg_reg = BytecodeOperandReg(1); - Node* first_arg = RegisterLocation(first_arg_reg); - Node* args_count = BytecodeOperandCount(2); + Node* constructor = LoadRegisterAtOperandIndex(0); + RegListNodePair args = GetRegisterListAtOperandIndex(1); Node* slot_id = BytecodeOperandIdx(3); Node* feedback_vector = LoadFeedbackVector(); Node* context = GetContext(); - Node* result = Construct(constructor, context, new_target, first_arg, - args_count, slot_id, feedback_vector); + Node* result = Construct(constructor, context, new_target, args, slot_id, + feedback_vector); SetAccumulator(result); Dispatch(); } @@ -1762,8 +1695,7 @@ class InterpreterCompareOpAssembler : public InterpreterAssembler { : InterpreterAssembler(state, bytecode, operand_scale) {} void CompareOpWithFeedback(Operation compare_op) { - Node* reg_index = BytecodeOperandReg(0); - Node* lhs = LoadRegister(reg_index); + Node* lhs = LoadRegisterAtOperandIndex(0); Node* rhs = GetAccumulator(); Node* context = GetContext(); @@ -1844,8 +1776,7 @@ IGNITION_HANDLER(TestGreaterThanOrEqual, InterpreterCompareOpAssembler) { // Test if the value in the <src> register is strictly equal to the accumulator. // Type feedback is not collected. IGNITION_HANDLER(TestEqualStrictNoFeedback, InterpreterAssembler) { - Node* reg_index = BytecodeOperandReg(0); - Node* lhs = LoadRegister(reg_index); + Node* lhs = LoadRegisterAtOperandIndex(0); Node* rhs = GetAccumulator(); // TODO(5310): This is called only when lhs and rhs are Smis (for ex: // try-finally or generators) or strings (only when visiting @@ -1861,8 +1792,7 @@ IGNITION_HANDLER(TestEqualStrictNoFeedback, InterpreterAssembler) { // Test if the object referenced by the register operand is a property of the // object referenced by the accumulator. IGNITION_HANDLER(TestIn, InterpreterAssembler) { - Node* reg_index = BytecodeOperandReg(0); - Node* property = LoadRegister(reg_index); + Node* property = LoadRegisterAtOperandIndex(0); Node* object = GetAccumulator(); Node* context = GetContext(); @@ -1875,8 +1805,7 @@ IGNITION_HANDLER(TestIn, InterpreterAssembler) { // Test if the object referenced by the <src> register is an an instance of type // referenced by the accumulator. IGNITION_HANDLER(TestInstanceOf, InterpreterAssembler) { - Node* object_reg = BytecodeOperandReg(0); - Node* object = LoadRegister(object_reg); + Node* object = LoadRegisterAtOperandIndex(0); Node* callable = GetAccumulator(); Node* slot_id = BytecodeOperandIdx(1); Node* feedback_vector = LoadFeedbackVector(); @@ -2063,8 +1992,7 @@ IGNITION_HANDLER(Jump, InterpreterAssembler) { // Jump by the number of bytes in the Smi in the |idx| entry in the constant // pool. IGNITION_HANDLER(JumpConstant, InterpreterAssembler) { - Node* index = BytecodeOperandIdx(0); - Node* relative_jump = LoadAndUntagConstantPoolEntry(index); + Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0); Jump(relative_jump); } @@ -2088,8 +2016,7 @@ IGNITION_HANDLER(JumpIfTrue, InterpreterAssembler) { // and will misbehave if passed arbitrary input values. IGNITION_HANDLER(JumpIfTrueConstant, InterpreterAssembler) { Node* accumulator = GetAccumulator(); - Node* index = BytecodeOperandIdx(0); - Node* relative_jump = LoadAndUntagConstantPoolEntry(index); + Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0); CSA_ASSERT(this, TaggedIsNotSmi(accumulator)); CSA_ASSERT(this, IsBoolean(accumulator)); JumpIfWordEqual(accumulator, TrueConstant(), relative_jump); @@ -2115,8 +2042,7 @@ IGNITION_HANDLER(JumpIfFalse, InterpreterAssembler) { // and will misbehave if passed arbitrary input values. IGNITION_HANDLER(JumpIfFalseConstant, InterpreterAssembler) { Node* accumulator = GetAccumulator(); - Node* index = BytecodeOperandIdx(0); - Node* relative_jump = LoadAndUntagConstantPoolEntry(index); + Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0); CSA_ASSERT(this, TaggedIsNotSmi(accumulator)); CSA_ASSERT(this, IsBoolean(accumulator)); JumpIfWordEqual(accumulator, FalseConstant(), relative_jump); @@ -2144,8 +2070,7 @@ IGNITION_HANDLER(JumpIfToBooleanTrue, InterpreterAssembler) { // cast to boolean. IGNITION_HANDLER(JumpIfToBooleanTrueConstant, InterpreterAssembler) { Node* value = GetAccumulator(); - Node* index = BytecodeOperandIdx(0); - Node* relative_jump = LoadAndUntagConstantPoolEntry(index); + Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0); Label if_true(this), if_false(this); BranchIfToBooleanIsTrue(value, &if_true, &if_false); BIND(&if_true); @@ -2176,8 +2101,7 @@ IGNITION_HANDLER(JumpIfToBooleanFalse, InterpreterAssembler) { // cast to boolean. IGNITION_HANDLER(JumpIfToBooleanFalseConstant, InterpreterAssembler) { Node* value = GetAccumulator(); - Node* index = BytecodeOperandIdx(0); - Node* relative_jump = LoadAndUntagConstantPoolEntry(index); + Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0); Label if_true(this), if_false(this); BranchIfToBooleanIsTrue(value, &if_true, &if_false); BIND(&if_true); @@ -2202,8 +2126,7 @@ IGNITION_HANDLER(JumpIfNull, InterpreterAssembler) { // pool if the object referenced by the accumulator is the null constant. IGNITION_HANDLER(JumpIfNullConstant, InterpreterAssembler) { Node* accumulator = GetAccumulator(); - Node* index = BytecodeOperandIdx(0); - Node* relative_jump = LoadAndUntagConstantPoolEntry(index); + Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0); JumpIfWordEqual(accumulator, NullConstant(), relative_jump); } @@ -2223,8 +2146,7 @@ IGNITION_HANDLER(JumpIfNotNull, InterpreterAssembler) { // pool if the object referenced by the accumulator is not the null constant. IGNITION_HANDLER(JumpIfNotNullConstant, InterpreterAssembler) { Node* accumulator = GetAccumulator(); - Node* index = BytecodeOperandIdx(0); - Node* relative_jump = LoadAndUntagConstantPoolEntry(index); + Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0); JumpIfWordNotEqual(accumulator, NullConstant(), relative_jump); } @@ -2244,8 +2166,7 @@ IGNITION_HANDLER(JumpIfUndefined, InterpreterAssembler) { // pool if the object referenced by the accumulator is the undefined constant. IGNITION_HANDLER(JumpIfUndefinedConstant, InterpreterAssembler) { Node* accumulator = GetAccumulator(); - Node* index = BytecodeOperandIdx(0); - Node* relative_jump = LoadAndUntagConstantPoolEntry(index); + Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0); JumpIfWordEqual(accumulator, UndefinedConstant(), relative_jump); } @@ -2266,8 +2187,7 @@ IGNITION_HANDLER(JumpIfNotUndefined, InterpreterAssembler) { // constant. IGNITION_HANDLER(JumpIfNotUndefinedConstant, InterpreterAssembler) { Node* accumulator = GetAccumulator(); - Node* index = BytecodeOperandIdx(0); - Node* relative_jump = LoadAndUntagConstantPoolEntry(index); + Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0); JumpIfWordNotEqual(accumulator, UndefinedConstant(), relative_jump); } @@ -2297,8 +2217,7 @@ IGNITION_HANDLER(JumpIfJSReceiver, InterpreterAssembler) { // pool if the object referenced by the accumulator is a JSReceiver. IGNITION_HANDLER(JumpIfJSReceiverConstant, InterpreterAssembler) { Node* accumulator = GetAccumulator(); - Node* index = BytecodeOperandIdx(0); - Node* relative_jump = LoadAndUntagConstantPoolEntry(index); + Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0); Label if_object(this), if_notobject(this), if_notsmi(this); Branch(TaggedIsSmi(accumulator), &if_notobject, &if_notsmi); @@ -2378,11 +2297,10 @@ IGNITION_HANDLER(SwitchOnSmiNoFeedback, InterpreterAssembler) { // Creates a regular expression literal for literal index <literal_idx> with // <flags> and the pattern in <pattern_idx>. IGNITION_HANDLER(CreateRegExpLiteral, InterpreterAssembler) { - Node* pattern_index = BytecodeOperandIdx(0); - Node* pattern = LoadConstantPoolEntry(pattern_index); + Node* pattern = LoadConstantPoolEntryAtOperandIndex(0); Node* feedback_vector = LoadFeedbackVector(); Node* slot_id = BytecodeOperandIdx(1); - Node* flags = SmiFromWord32(BytecodeOperandFlag(2)); + Node* flags = SmiFromInt32(BytecodeOperandFlag(2)); Node* context = GetContext(); ConstructorBuiltinsAssembler constructor_assembler(state()); Node* result = constructor_assembler.EmitCreateRegExpLiteral( @@ -2421,8 +2339,7 @@ IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) { Node* flags_raw = DecodeWordFromWord32<CreateArrayLiteralFlags::FlagsBits>( bytecode_flags); Node* flags = SmiTag(flags_raw); - Node* index = BytecodeOperandIdx(0); - Node* constant_elements = LoadConstantPoolEntry(index); + Node* constant_elements = LoadConstantPoolEntryAtOperandIndex(0); Node* result = CallRuntime(Runtime::kCreateArrayLiteral, context, feedback_vector, SmiTag(slot_id), constant_elements, flags); @@ -2466,15 +2383,14 @@ IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) { ConstructorBuiltinsAssembler constructor_assembler(state()); Node* result = constructor_assembler.EmitCreateShallowObjectLiteral( feedback_vector, slot_id, &if_not_fast_clone); - StoreRegister(result, BytecodeOperandReg(3)); + StoreRegisterAtOperandIndex(result, 3); Dispatch(); } BIND(&if_not_fast_clone); { // If we can't do a fast clone, call into the runtime. - Node* index = BytecodeOperandIdx(0); - Node* boilerplate_description = LoadConstantPoolEntry(index); + Node* boilerplate_description = LoadConstantPoolEntryAtOperandIndex(0); Node* context = GetContext(); Node* flags_raw = DecodeWordFromWord32<CreateObjectLiteralFlags::FlagsBits>( @@ -2484,7 +2400,7 @@ IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) { Node* result = CallRuntime(Runtime::kCreateObjectLiteral, context, feedback_vector, SmiTag(slot_id), boilerplate_description, flags); - StoreRegister(result, BytecodeOperandReg(3)); + StoreRegisterAtOperandIndex(result, 3); // TODO(klaasb) build a single dispatch once the call is inlined Dispatch(); } @@ -2501,19 +2417,34 @@ IGNITION_HANDLER(CreateEmptyObjectLiteral, InterpreterAssembler) { Dispatch(); } -// GetTemplateObject +// GetTemplateObject <descriptor_idx> <literal_idx> // // Creates the template to pass for tagged templates and returns it in the // accumulator, creating and caching the site object on-demand as per the // specification. IGNITION_HANDLER(GetTemplateObject, InterpreterAssembler) { - Node* description_index = BytecodeOperandIdx(0); - Node* description = LoadConstantPoolEntry(description_index); - Node* context = GetContext(); + Node* feedback_vector = LoadFeedbackVector(); + Node* slot = BytecodeOperandIdx(1); + Node* cached_value = + LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS); - Node* result = CallRuntime(Runtime::kGetTemplateObject, context, description); - SetAccumulator(result); + Label call_runtime(this, Label::kDeferred); + GotoIf(WordEqual(cached_value, SmiConstant(0)), &call_runtime); + + SetAccumulator(cached_value); Dispatch(); + + BIND(&call_runtime); + { + Node* description = LoadConstantPoolEntryAtOperandIndex(0); + Node* context = GetContext(); + Node* result = + CallRuntime(Runtime::kCreateTemplateObject, context, description); + StoreFeedbackVectorSlot(feedback_vector, slot, result, UPDATE_WRITE_BARRIER, + 0, INTPTR_PARAMETERS); + SetAccumulator(result); + Dispatch(); + } } // CreateClosure <index> <slot> <tenured> @@ -2521,35 +2452,47 @@ IGNITION_HANDLER(GetTemplateObject, InterpreterAssembler) { // Creates a new closure for SharedFunctionInfo at position |index| in the // constant pool and with the PretenureFlag <tenured>. IGNITION_HANDLER(CreateClosure, InterpreterAssembler) { - Node* index = BytecodeOperandIdx(0); - Node* shared = LoadConstantPoolEntry(index); + Node* shared = LoadConstantPoolEntryAtOperandIndex(0); Node* flags = BytecodeOperandFlag(2); Node* context = GetContext(); - - Label call_runtime(this, Label::kDeferred); - GotoIfNot(IsSetWord32<CreateClosureFlags::FastNewClosureBit>(flags), - &call_runtime); - ConstructorBuiltinsAssembler constructor_assembler(state()); - Node* vector_index = BytecodeOperandIdx(1); - vector_index = SmiTag(vector_index); + Node* slot = BytecodeOperandIdx(1); Node* feedback_vector = LoadFeedbackVector(); - SetAccumulator(constructor_assembler.EmitFastNewClosure( - shared, feedback_vector, vector_index, context)); - Dispatch(); + Node* feedback_cell = LoadFeedbackVectorSlot(feedback_vector, slot); - BIND(&call_runtime); + Label if_fast(this), if_slow(this, Label::kDeferred); + Branch(IsSetWord32<CreateClosureFlags::FastNewClosureBit>(flags), &if_fast, + &if_slow); + + BIND(&if_fast); { - Node* tenured_raw = - DecodeWordFromWord32<CreateClosureFlags::PretenuredBit>(flags); - Node* tenured = SmiTag(tenured_raw); - feedback_vector = LoadFeedbackVector(); - vector_index = BytecodeOperandIdx(1); - vector_index = SmiTag(vector_index); - Node* result = CallRuntime(Runtime::kInterpreterNewClosure, context, shared, - feedback_vector, vector_index, tenured); + Node* result = + CallBuiltin(Builtins::kFastNewClosure, context, shared, feedback_cell); SetAccumulator(result); Dispatch(); } + + BIND(&if_slow); + { + Label if_newspace(this), if_oldspace(this); + Branch(IsSetWord32<CreateClosureFlags::PretenuredBit>(flags), &if_oldspace, + &if_newspace); + + BIND(&if_newspace); + { + Node* result = + CallRuntime(Runtime::kNewClosure, context, shared, feedback_cell); + SetAccumulator(result); + Dispatch(); + } + + BIND(&if_oldspace); + { + Node* result = CallRuntime(Runtime::kNewClosure_Tenured, context, shared, + feedback_cell); + SetAccumulator(result); + Dispatch(); + } + } } // CreateBlockContext <index> @@ -2557,8 +2500,7 @@ IGNITION_HANDLER(CreateClosure, InterpreterAssembler) { // Creates a new block context with the scope info constant at |index| and the // closure in the accumulator. IGNITION_HANDLER(CreateBlockContext, InterpreterAssembler) { - Node* index = BytecodeOperandIdx(0); - Node* scope_info = LoadConstantPoolEntry(index); + Node* scope_info = LoadConstantPoolEntryAtOperandIndex(0); Node* closure = GetAccumulator(); Node* context = GetContext(); SetAccumulator( @@ -2572,12 +2514,9 @@ IGNITION_HANDLER(CreateBlockContext, InterpreterAssembler) { // the variable name at |name_idx|, the ScopeInfo at |scope_info_idx|, and the // closure in the accumulator. IGNITION_HANDLER(CreateCatchContext, InterpreterAssembler) { - Node* exception_reg = BytecodeOperandReg(0); - Node* exception = LoadRegister(exception_reg); - Node* name_idx = BytecodeOperandIdx(1); - Node* name = LoadConstantPoolEntry(name_idx); - Node* scope_info_idx = BytecodeOperandIdx(2); - Node* scope_info = LoadConstantPoolEntry(scope_info_idx); + Node* exception = LoadRegisterAtOperandIndex(0); + Node* name = LoadConstantPoolEntryAtOperandIndex(1); + Node* scope_info = LoadConstantPoolEntryAtOperandIndex(2); Node* closure = GetAccumulator(); Node* context = GetContext(); SetAccumulator(CallRuntime(Runtime::kPushCatchContext, context, name, @@ -2617,10 +2556,8 @@ IGNITION_HANDLER(CreateEvalContext, InterpreterAssembler) { // with-statement with the object in |register| and the closure in the // accumulator. IGNITION_HANDLER(CreateWithContext, InterpreterAssembler) { - Node* reg_index = BytecodeOperandReg(0); - Node* object = LoadRegister(reg_index); - Node* scope_info_idx = BytecodeOperandIdx(1); - Node* scope_info = LoadConstantPoolEntry(scope_info_idx); + Node* object = LoadRegisterAtOperandIndex(0); + Node* scope_info = LoadConstantPoolEntryAtOperandIndex(1); Node* closure = GetAccumulator(); Node* context = GetContext(); SetAccumulator(CallRuntime(Runtime::kPushWithContext, context, object, @@ -2771,7 +2708,7 @@ IGNITION_HANDLER(ThrowReferenceErrorIfHole, InterpreterAssembler) { BIND(&throw_error); { - Node* name = LoadConstantPoolEntry(BytecodeOperandIdx(0)); + Node* name = LoadConstantPoolEntryAtOperandIndex(0); CallRuntime(Runtime::kThrowReferenceError, GetContext(), name); // We shouldn't ever return from a throw. Abort(AbortReason::kUnexpectedReturnFromThrow); @@ -2834,10 +2771,10 @@ IGNITION_HANDLER(Debugger, InterpreterAssembler) { Node* result_pair = \ CallRuntime(Runtime::kDebugBreakOnBytecode, context, accumulator); \ Node* return_value = Projection(0, result_pair); \ - Node* original_handler = Projection(1, result_pair); \ + Node* original_bytecode = SmiUntag(Projection(1, result_pair)); \ MaybeDropFrames(context); \ SetAccumulator(return_value); \ - DispatchToBytecodeHandler(original_handler); \ + DispatchToBytecode(original_bytecode, BytecodeOffset()); \ } DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK); #undef DEBUG_BREAK @@ -2856,30 +2793,13 @@ IGNITION_HANDLER(IncBlockCounter, InterpreterAssembler) { Dispatch(); } -class InterpreterForInPrepareAssembler : public InterpreterAssembler { - public: - InterpreterForInPrepareAssembler(CodeAssemblerState* state, Bytecode bytecode, - OperandScale operand_scale) - : InterpreterAssembler(state, bytecode, operand_scale) {} - - void BuildForInPrepareResult(Node* output_register, Node* cache_type, - Node* cache_array, Node* cache_length) { - StoreRegister(cache_type, output_register); - output_register = NextRegister(output_register); - StoreRegister(cache_array, output_register); - output_register = NextRegister(output_register); - StoreRegister(cache_length, output_register); - } -}; - // ForInEnumerate <receiver> // // Enumerates the enumerable keys of the |receiver| and either returns the // map of the |receiver| if it has a usable enum cache or a fixed array // with the keys to enumerate in the accumulator. IGNITION_HANDLER(ForInEnumerate, InterpreterAssembler) { - Node* receiver_register = BytecodeOperandReg(0); - Node* receiver = LoadRegister(receiver_register); + Node* receiver = LoadRegisterAtOperandIndex(0); Node* context = GetContext(); Label if_empty(this), if_runtime(this, Label::kDeferred); @@ -2910,9 +2830,8 @@ IGNITION_HANDLER(ForInEnumerate, InterpreterAssembler) { // The result is output in registers |cache_info_triple| to // |cache_info_triple + 2|, with the registers holding cache_type, cache_array, // and cache_length respectively. -IGNITION_HANDLER(ForInPrepare, InterpreterForInPrepareAssembler) { +IGNITION_HANDLER(ForInPrepare, InterpreterAssembler) { Node* enumerator = GetAccumulator(); - Node* output_register = BytecodeOperandReg(0); Node* vector_index = BytecodeOperandIdx(1); Node* feedback_vector = LoadFeedbackVector(); @@ -2946,8 +2865,7 @@ IGNITION_HANDLER(ForInPrepare, InterpreterForInPrepareAssembler) { Node* cache_type = enumerator; Node* cache_array = enum_keys; Node* cache_length = SmiTag(enum_length); - BuildForInPrepareResult(output_register, cache_type, cache_array, - cache_length); + StoreRegisterTripleAtOperandIndex(cache_type, cache_array, cache_length, 0); Dispatch(); } @@ -2964,8 +2882,7 @@ IGNITION_HANDLER(ForInPrepare, InterpreterForInPrepareAssembler) { Node* cache_type = enumerator; Node* cache_array = enumerator; Node* cache_length = LoadFixedArrayBaseLength(enumerator); - BuildForInPrepareResult(output_register, cache_type, cache_array, - cache_length); + StoreRegisterTripleAtOperandIndex(cache_type, cache_array, cache_length, 0); Dispatch(); } } @@ -2974,14 +2891,11 @@ IGNITION_HANDLER(ForInPrepare, InterpreterForInPrepareAssembler) { // // Returns the next enumerable property in the the accumulator. IGNITION_HANDLER(ForInNext, InterpreterAssembler) { - Node* receiver_reg = BytecodeOperandReg(0); - Node* receiver = LoadRegister(receiver_reg); - Node* index_reg = BytecodeOperandReg(1); - Node* index = LoadRegister(index_reg); - Node* cache_type_reg = BytecodeOperandReg(2); - Node* cache_type = LoadRegister(cache_type_reg); - Node* cache_array_reg = NextRegister(cache_type_reg); - Node* cache_array = LoadRegister(cache_array_reg); + Node* receiver = LoadRegisterAtOperandIndex(0); + Node* index = LoadRegisterAtOperandIndex(1); + Node* cache_type; + Node* cache_array; + std::tie(cache_type, cache_array) = LoadRegisterPairAtOperandIndex(2); Node* vector_index = BytecodeOperandIdx(3); Node* feedback_vector = LoadFeedbackVector(); @@ -3017,10 +2931,8 @@ IGNITION_HANDLER(ForInNext, InterpreterAssembler) { // // Returns false if the end of the enumerable properties has been reached. IGNITION_HANDLER(ForInContinue, InterpreterAssembler) { - Node* index_reg = BytecodeOperandReg(0); - Node* index = LoadRegister(index_reg); - Node* cache_length_reg = BytecodeOperandReg(1); - Node* cache_length = LoadRegister(cache_length_reg); + Node* index = LoadRegisterAtOperandIndex(0); + Node* cache_length = LoadRegisterAtOperandIndex(1); // Check if {index} is at {cache_length} already. Label if_true(this), if_false(this), end(this); @@ -3044,8 +2956,7 @@ IGNITION_HANDLER(ForInContinue, InterpreterAssembler) { // Increments the loop counter in register |index| and stores the result // in the accumulator. IGNITION_HANDLER(ForInStep, InterpreterAssembler) { - Node* index_reg = BytecodeOperandReg(0); - Node* index = LoadRegister(index_reg); + Node* index = LoadRegisterAtOperandIndex(0); Node* one = SmiConstant(1); Node* result = SmiAdd(index, one); SetAccumulator(result); @@ -3078,34 +2989,16 @@ IGNITION_HANDLER(Illegal, InterpreterAssembler) { // // Exports the register file and stores it into the generator. Also stores the // current context, |suspend_id|, and the current bytecode offset (for debugging -// purposes) into the generator. +// purposes) into the generator. Then, returns the value in the accumulator. IGNITION_HANDLER(SuspendGenerator, InterpreterAssembler) { - Node* generator_reg = BytecodeOperandReg(0); - - Node* generator = LoadRegister(generator_reg); - - Label if_stepping(this, Label::kDeferred), ok(this); - Node* step_action_address = ExternalConstant( - ExternalReference::debug_last_step_action_address(isolate())); - Node* step_action = Load(MachineType::Int8(), step_action_address); - STATIC_ASSERT(StepIn > StepNext); - STATIC_ASSERT(LastStepAction == StepIn); - Node* step_next = Int32Constant(StepNext); - Branch(Int32LessThanOrEqual(step_next, step_action), &if_stepping, &ok); - BIND(&ok); - + Node* generator = LoadRegisterAtOperandIndex(0); Node* array = LoadObjectField(generator, JSGeneratorObject::kRegisterFileOffset); Node* context = GetContext(); + RegListNodePair registers = GetRegisterListAtOperandIndex(1); Node* suspend_id = BytecodeOperandUImmSmi(3); - // Bytecode operand 1 should be always 0 (we are always store registers - // from the beginning). - CSA_ASSERT(this, WordEqual(BytecodeOperandReg(1), - IntPtrConstant(Register(0).ToOperand()))); - // Bytecode operand 2 is the number of registers to store to the generator. - Node* register_count = ChangeUint32ToWord(BytecodeOperandCount(2)); - ExportRegisterFile(array, register_count); + ExportRegisterFile(array, registers); StoreObjectField(generator, JSGeneratorObject::kContextOffset, context); StoreObjectField(generator, JSGeneratorObject::kContinuationOffset, suspend_id); @@ -3115,59 +3008,66 @@ IGNITION_HANDLER(SuspendGenerator, InterpreterAssembler) { Node* offset = SmiTag(BytecodeOffset()); StoreObjectField(generator, JSGeneratorObject::kInputOrDebugPosOffset, offset); - Dispatch(); - BIND(&if_stepping); - { - Node* context = GetContext(); - CallRuntime(Runtime::kDebugRecordGenerator, context, generator); - Goto(&ok); - } + UpdateInterruptBudgetOnReturn(); + Return(GetAccumulator()); } -// RestoreGeneratorState <generator> +// SwitchOnGeneratorState <generator> <table_start> <table_length> // -// Loads the generator's state and stores it in the accumulator, -// before overwriting it with kGeneratorExecuting. -IGNITION_HANDLER(RestoreGeneratorState, InterpreterAssembler) { - Node* generator_reg = BytecodeOperandReg(0); - Node* generator = LoadRegister(generator_reg); +// If |generator| is undefined, falls through. Otherwise, loads the +// generator's state (overwriting it with kGeneratorExecuting), sets the context +// to the generator's resume context, and performs state dispatch on the +// generator's state by looking up the generator state in a jump table in the +// constant pool, starting at |table_start|, and of length |table_length|. +IGNITION_HANDLER(SwitchOnGeneratorState, InterpreterAssembler) { + Node* generator = LoadRegisterAtOperandIndex(0); + + Label fallthrough(this); + GotoIf(WordEqual(generator, UndefinedConstant()), &fallthrough); - Node* old_state = + Node* state = LoadObjectField(generator, JSGeneratorObject::kContinuationOffset); - Node* new_state = Int32Constant(JSGeneratorObject::kGeneratorExecuting); + Node* new_state = SmiConstant(JSGeneratorObject::kGeneratorExecuting); StoreObjectField(generator, JSGeneratorObject::kContinuationOffset, - SmiTag(new_state)); - SetAccumulator(old_state); + new_state); + + Node* context = LoadObjectField(generator, JSGeneratorObject::kContextOffset); + SetContext(context); + + Node* table_start = BytecodeOperandIdx(1); + // TODO(leszeks): table_length is only used for a CSA_ASSERT, we don't + // actually need it otherwise. + Node* table_length = BytecodeOperandUImmWord(2); + + // The state must be a Smi. + CSA_ASSERT(this, TaggedIsSmi(state)); + Node* case_value = SmiUntag(state); + + CSA_ASSERT(this, IntPtrGreaterThanOrEqual(case_value, IntPtrConstant(0))); + CSA_ASSERT(this, IntPtrLessThan(case_value, table_length)); + USE(table_length); + + Node* entry = IntPtrAdd(table_start, case_value); + Node* relative_jump = LoadAndUntagConstantPoolEntry(entry); + Jump(relative_jump); + + BIND(&fallthrough); Dispatch(); } -// ResumeGenerator <generator> <generator_state> <first output -// register> <register count> +// ResumeGenerator <generator> <first output register> <register count> // // Imports the register file stored in the generator and marks the generator // state as executing. IGNITION_HANDLER(ResumeGenerator, InterpreterAssembler) { - Node* generator_reg = BytecodeOperandReg(0); - Node* generator_state_reg = BytecodeOperandReg(1); - // Bytecode operand 2 is the start register. It should always be 0, so let's - // ignore it. - CSA_ASSERT(this, WordEqual(BytecodeOperandReg(2), - IntPtrConstant(Register(0).ToOperand()))); - // Bytecode operand 3 is the number of registers to store to the generator. - Node* register_count = ChangeUint32ToWord(BytecodeOperandCount(3)); - - Node* generator = LoadRegister(generator_reg); + Node* generator = LoadRegisterAtOperandIndex(0); + RegListNodePair registers = GetRegisterListAtOperandIndex(1); ImportRegisterFile( LoadObjectField(generator, JSGeneratorObject::kRegisterFileOffset), - register_count); - - // Since we're resuming, update the generator state to indicate that the - // generator is now executing. - StoreRegister(SmiConstant(JSGeneratorObject::kGeneratorExecuting), - generator_state_reg); + registers); // Return the generator's input_or_debug_pos in the accumulator. SetAccumulator( diff --git a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc index 7ad8d49b63..e44289bb6c 100644 --- a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc +++ b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc @@ -7,6 +7,7 @@ #include "src/allocation.h" #include "src/builtins/builtins.h" #include "src/code-factory.h" +#include "src/factory-inl.h" #include "src/frames.h" #include "src/interpreter/bytecodes.h" #include "src/interpreter/interpreter-assembler.h" @@ -27,8 +28,8 @@ class IntrinsicsGenerator { zone_(assembler->zone()), assembler_(assembler) {} - Node* InvokeIntrinsic(Node* function_id, Node* context, Node* first_arg_reg, - Node* arg_count); + Node* InvokeIntrinsic(Node* function_id, Node* context, + const InterpreterAssembler::RegListNodePair& args); private: enum InstanceTypeCompareMode { @@ -38,18 +39,21 @@ class IntrinsicsGenerator { Node* IsInstanceType(Node* input, int type); Node* CompareInstanceType(Node* map, int type, InstanceTypeCompareMode mode); - Node* IntrinsicAsStubCall(Node* input, Node* context, - Callable const& callable); - Node* IntrinsicAsBuiltinCall(Node* input, Node* context, Builtins::Name name); + Node* IntrinsicAsStubCall(const InterpreterAssembler::RegListNodePair& args, + Node* context, Callable const& callable); + Node* IntrinsicAsBuiltinCall( + const InterpreterAssembler::RegListNodePair& args, Node* context, + Builtins::Name name); void AbortIfArgCountMismatch(int expected, compiler::Node* actual); #define DECLARE_INTRINSIC_HELPER(name, lower_case, count) \ - Node* name(Node* input, Node* arg_count, Node* context); + Node* name(const InterpreterAssembler::RegListNodePair& args, Node* context); INTRINSICS_LIST(DECLARE_INTRINSIC_HELPER) #undef DECLARE_INTRINSIC_HELPER Isolate* isolate() { return isolate_; } Zone* zone() { return zone_; } + Factory* factory() { return isolate()->factory(); } Isolate* isolate_; Zone* zone_; @@ -58,19 +62,18 @@ class IntrinsicsGenerator { DISALLOW_COPY_AND_ASSIGN(IntrinsicsGenerator); }; -Node* GenerateInvokeIntrinsic(InterpreterAssembler* assembler, - Node* function_id, Node* context, - Node* first_arg_reg, Node* arg_count) { +Node* GenerateInvokeIntrinsic( + InterpreterAssembler* assembler, Node* function_id, Node* context, + const InterpreterAssembler::RegListNodePair& args) { IntrinsicsGenerator generator(assembler); - return generator.InvokeIntrinsic(function_id, context, first_arg_reg, - arg_count); + return generator.InvokeIntrinsic(function_id, context, args); } #define __ assembler_-> -Node* IntrinsicsGenerator::InvokeIntrinsic(Node* function_id, Node* context, - Node* first_arg_reg, - Node* arg_count) { +Node* IntrinsicsGenerator::InvokeIntrinsic( + Node* function_id, Node* context, + const InterpreterAssembler::RegListNodePair& args) { InterpreterAssembler::Label abort(assembler_), end(assembler_); InterpreterAssembler::Variable result(assembler_, MachineRepresentation::kTagged); @@ -90,17 +93,17 @@ Node* IntrinsicsGenerator::InvokeIntrinsic(Node* function_id, Node* context, #undef CASE __ Switch(function_id, &abort, cases, labels, arraysize(cases)); -#define HANDLE_CASE(name, lower_case, expected_arg_count) \ - __ BIND(&lower_case); \ - { \ - if (FLAG_debug_code && expected_arg_count >= 0) { \ - AbortIfArgCountMismatch(expected_arg_count, arg_count); \ - } \ - Node* value = name(first_arg_reg, arg_count, context); \ - if (value) { \ - result.Bind(value); \ - __ Goto(&end); \ - } \ +#define HANDLE_CASE(name, lower_case, expected_arg_count) \ + __ BIND(&lower_case); \ + { \ + if (FLAG_debug_code && expected_arg_count >= 0) { \ + AbortIfArgCountMismatch(expected_arg_count, args.reg_count()); \ + } \ + Node* value = name(args, context); \ + if (value) { \ + result.Bind(value); \ + __ Goto(&end); \ + } \ } INTRINSICS_LIST(HANDLE_CASE) #undef HANDLE_CASE @@ -129,238 +132,195 @@ Node* IntrinsicsGenerator::CompareInstanceType(Node* object, int type, } Node* IntrinsicsGenerator::IsInstanceType(Node* input, int type) { - InterpreterAssembler::Variable return_value(assembler_, - MachineRepresentation::kTagged); - // TODO(ishell): Use Select here. - InterpreterAssembler::Label if_not_smi(assembler_), return_true(assembler_), - return_false(assembler_), end(assembler_); - Node* arg = __ LoadRegister(input); - __ GotoIf(__ TaggedIsSmi(arg), &return_false); - - Node* condition = CompareInstanceType(arg, type, kInstanceTypeEqual); - __ Branch(condition, &return_true, &return_false); - - __ BIND(&return_true); - { - return_value.Bind(__ TrueConstant()); - __ Goto(&end); - } - - __ BIND(&return_false); - { - return_value.Bind(__ FalseConstant()); - __ Goto(&end); - } - - __ BIND(&end); - return return_value.value(); -} - -Node* IntrinsicsGenerator::IsJSReceiver(Node* input, Node* arg_count, - Node* context) { - // TODO(ishell): Use Select here. - // TODO(ishell): Use CSA::IsJSReceiverInstanceType here. - InterpreterAssembler::Variable return_value(assembler_, - MachineRepresentation::kTagged); - InterpreterAssembler::Label return_true(assembler_), return_false(assembler_), - end(assembler_); - - Node* arg = __ LoadRegister(input); - __ GotoIf(__ TaggedIsSmi(arg), &return_false); - - STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); - Node* condition = CompareInstanceType(arg, FIRST_JS_RECEIVER_TYPE, - kInstanceTypeGreaterThanOrEqual); - __ Branch(condition, &return_true, &return_false); - - __ BIND(&return_true); - { - return_value.Bind(__ TrueConstant()); - __ Goto(&end); - } - - __ BIND(&return_false); - { - return_value.Bind(__ FalseConstant()); - __ Goto(&end); - } - - __ BIND(&end); - return return_value.value(); -} - -Node* IntrinsicsGenerator::IsArray(Node* input, Node* arg_count, - Node* context) { + Node* result = + __ Select(__ TaggedIsSmi(input), [=] { return __ FalseConstant(); }, + [=] { + return __ SelectBooleanConstant( + CompareInstanceType(input, type, kInstanceTypeEqual)); + }, + MachineRepresentation::kTagged); + return result; +} + +Node* IntrinsicsGenerator::IsJSReceiver( + const InterpreterAssembler::RegListNodePair& args, Node* context) { + Node* input = __ LoadRegisterFromRegisterList(args, 0); + Node* result = __ Select( + __ TaggedIsSmi(input), [=] { return __ FalseConstant(); }, + [=] { return __ SelectBooleanConstant(__ IsJSReceiver(input)); }, + MachineRepresentation::kTagged); + return result; +} + +Node* IntrinsicsGenerator::IsArray( + const InterpreterAssembler::RegListNodePair& args, Node* context) { + Node* input = __ LoadRegisterFromRegisterList(args, 0); return IsInstanceType(input, JS_ARRAY_TYPE); } -Node* IntrinsicsGenerator::IsJSProxy(Node* input, Node* arg_count, - Node* context) { +Node* IntrinsicsGenerator::IsJSProxy( + const InterpreterAssembler::RegListNodePair& args, Node* context) { + Node* input = __ LoadRegisterFromRegisterList(args, 0); return IsInstanceType(input, JS_PROXY_TYPE); } -Node* IntrinsicsGenerator::IsTypedArray(Node* input, Node* arg_count, - Node* context) { +Node* IntrinsicsGenerator::IsTypedArray( + const InterpreterAssembler::RegListNodePair& args, Node* context) { + Node* input = __ LoadRegisterFromRegisterList(args, 0); return IsInstanceType(input, JS_TYPED_ARRAY_TYPE); } -Node* IntrinsicsGenerator::IsJSMap(Node* input, Node* arg_count, - Node* context) { +Node* IntrinsicsGenerator::IsJSMap( + const InterpreterAssembler::RegListNodePair& args, Node* context) { + Node* input = __ LoadRegisterFromRegisterList(args, 0); return IsInstanceType(input, JS_MAP_TYPE); } -Node* IntrinsicsGenerator::IsJSSet(Node* input, Node* arg_count, - Node* context) { +Node* IntrinsicsGenerator::IsJSSet( + const InterpreterAssembler::RegListNodePair& args, Node* context) { + Node* input = __ LoadRegisterFromRegisterList(args, 0); return IsInstanceType(input, JS_SET_TYPE); } -Node* IntrinsicsGenerator::IsJSWeakMap(Node* input, Node* arg_count, - Node* context) { +Node* IntrinsicsGenerator::IsJSWeakMap( + const InterpreterAssembler::RegListNodePair& args, Node* context) { + Node* input = __ LoadRegisterFromRegisterList(args, 0); return IsInstanceType(input, JS_WEAK_MAP_TYPE); } -Node* IntrinsicsGenerator::IsJSWeakSet(Node* input, Node* arg_count, - Node* context) { +Node* IntrinsicsGenerator::IsJSWeakSet( + const InterpreterAssembler::RegListNodePair& args, Node* context) { + Node* input = __ LoadRegisterFromRegisterList(args, 0); return IsInstanceType(input, JS_WEAK_SET_TYPE); } -Node* IntrinsicsGenerator::IsSmi(Node* input, Node* arg_count, Node* context) { - // TODO(ishell): Use SelectBooleanConstant here. - InterpreterAssembler::Variable return_value(assembler_, - MachineRepresentation::kTagged); - InterpreterAssembler::Label if_smi(assembler_), if_not_smi(assembler_), - end(assembler_); - - Node* arg = __ LoadRegister(input); - - __ Branch(__ TaggedIsSmi(arg), &if_smi, &if_not_smi); - __ BIND(&if_smi); - { - return_value.Bind(__ TrueConstant()); - __ Goto(&end); - } - - __ BIND(&if_not_smi); - { - return_value.Bind(__ FalseConstant()); - __ Goto(&end); - } - - __ BIND(&end); - return return_value.value(); +Node* IntrinsicsGenerator::IsSmi( + const InterpreterAssembler::RegListNodePair& args, Node* context) { + Node* input = __ LoadRegisterFromRegisterList(args, 0); + return __ SelectBooleanConstant(__ TaggedIsSmi(input)); } -Node* IntrinsicsGenerator::IntrinsicAsStubCall(Node* args_reg, Node* context, - Callable const& callable) { +Node* IntrinsicsGenerator::IntrinsicAsStubCall( + const InterpreterAssembler::RegListNodePair& args, Node* context, + Callable const& callable) { int param_count = callable.descriptor().GetParameterCount(); int input_count = param_count + 2; // +2 for target and context - Node** args = zone()->NewArray<Node*>(input_count); + Node** stub_args = zone()->NewArray<Node*>(input_count); int index = 0; - args[index++] = __ HeapConstant(callable.code()); + stub_args[index++] = __ HeapConstant(callable.code()); for (int i = 0; i < param_count; i++) { - args[index++] = __ LoadRegister(args_reg); - args_reg = __ NextRegister(args_reg); + stub_args[index++] = __ LoadRegisterFromRegisterList(args, i); } - args[index++] = context; - return __ CallStubN(callable.descriptor(), 1, input_count, args); + stub_args[index++] = context; + return __ CallStubN(callable.descriptor(), 1, input_count, stub_args); } -Node* IntrinsicsGenerator::IntrinsicAsBuiltinCall(Node* input, Node* context, - Builtins::Name name) { +Node* IntrinsicsGenerator::IntrinsicAsBuiltinCall( + const InterpreterAssembler::RegListNodePair& args, Node* context, + Builtins::Name name) { Callable callable = Builtins::CallableFor(isolate_, name); - return IntrinsicAsStubCall(input, context, callable); + return IntrinsicAsStubCall(args, context, callable); } -Node* IntrinsicsGenerator::CreateIterResultObject(Node* input, Node* arg_count, - Node* context) { +Node* IntrinsicsGenerator::CreateIterResultObject( + const InterpreterAssembler::RegListNodePair& args, Node* context) { return IntrinsicAsStubCall( - input, context, + args, context, Builtins::CallableFor(isolate(), Builtins::kCreateIterResultObject)); } -Node* IntrinsicsGenerator::HasProperty(Node* input, Node* arg_count, - Node* context) { +Node* IntrinsicsGenerator::HasProperty( + const InterpreterAssembler::RegListNodePair& args, Node* context) { return IntrinsicAsStubCall( - input, context, Builtins::CallableFor(isolate(), Builtins::kHasProperty)); + args, context, Builtins::CallableFor(isolate(), Builtins::kHasProperty)); } -Node* IntrinsicsGenerator::ToString(Node* input, Node* arg_count, - Node* context) { +Node* IntrinsicsGenerator::RejectPromise( + const InterpreterAssembler::RegListNodePair& args, Node* context) { return IntrinsicAsStubCall( - input, context, Builtins::CallableFor(isolate(), Builtins::kToString)); + args, context, + Builtins::CallableFor(isolate(), Builtins::kRejectPromise)); } -Node* IntrinsicsGenerator::ToLength(Node* input, Node* arg_count, - Node* context) { +Node* IntrinsicsGenerator::ResolvePromise( + const InterpreterAssembler::RegListNodePair& args, Node* context) { return IntrinsicAsStubCall( - input, context, Builtins::CallableFor(isolate(), Builtins::kToLength)); + args, context, + Builtins::CallableFor(isolate(), Builtins::kResolvePromise)); } -Node* IntrinsicsGenerator::ToInteger(Node* input, Node* arg_count, - Node* context) { +Node* IntrinsicsGenerator::ToString( + const InterpreterAssembler::RegListNodePair& args, Node* context) { return IntrinsicAsStubCall( - input, context, Builtins::CallableFor(isolate(), Builtins::kToInteger)); + args, context, Builtins::CallableFor(isolate(), Builtins::kToString)); } -Node* IntrinsicsGenerator::ToNumber(Node* input, Node* arg_count, - Node* context) { +Node* IntrinsicsGenerator::ToLength( + const InterpreterAssembler::RegListNodePair& args, Node* context) { return IntrinsicAsStubCall( - input, context, Builtins::CallableFor(isolate(), Builtins::kToNumber)); + args, context, Builtins::CallableFor(isolate(), Builtins::kToLength)); } -Node* IntrinsicsGenerator::ToObject(Node* input, Node* arg_count, - Node* context) { +Node* IntrinsicsGenerator::ToInteger( + const InterpreterAssembler::RegListNodePair& args, Node* context) { return IntrinsicAsStubCall( - input, context, Builtins::CallableFor(isolate(), Builtins::kToObject)); + args, context, Builtins::CallableFor(isolate(), Builtins::kToInteger)); } -Node* IntrinsicsGenerator::Call(Node* args_reg, Node* arg_count, - Node* context) { - // First argument register contains the function target. - Node* function = __ LoadRegister(args_reg); +Node* IntrinsicsGenerator::ToNumber( + const InterpreterAssembler::RegListNodePair& args, Node* context) { + return IntrinsicAsStubCall( + args, context, Builtins::CallableFor(isolate(), Builtins::kToNumber)); +} - // Receiver is the second runtime call argument. - Node* receiver_reg = __ NextRegister(args_reg); - Node* receiver_arg = __ RegisterLocation(receiver_reg); +Node* IntrinsicsGenerator::ToObject( + const InterpreterAssembler::RegListNodePair& args, Node* context) { + return IntrinsicAsStubCall( + args, context, Builtins::CallableFor(isolate(), Builtins::kToObject)); +} - // Subtract function and receiver from arg count. - Node* function_and_receiver_count = __ Int32Constant(2); - Node* target_args_count = __ Int32Sub(arg_count, function_and_receiver_count); +Node* IntrinsicsGenerator::Call( + const InterpreterAssembler::RegListNodePair& args, Node* context) { + // First argument register contains the function target. + Node* function = __ LoadRegisterFromRegisterList(args, 0); + + // The arguments for the target function are from the second runtime call + // argument. + InterpreterAssembler::RegListNodePair target_args( + __ RegisterLocationInRegisterList(args, 1), + __ Int32Sub(args.reg_count(), __ Int32Constant(1))); if (FLAG_debug_code) { InterpreterAssembler::Label arg_count_positive(assembler_); - Node* comparison = __ Int32LessThan(target_args_count, __ Int32Constant(0)); + Node* comparison = + __ Int32LessThan(target_args.reg_count(), __ Int32Constant(0)); __ GotoIfNot(comparison, &arg_count_positive); __ Abort(AbortReason::kWrongArgumentCountForInvokeIntrinsic); __ Goto(&arg_count_positive); __ BIND(&arg_count_positive); } - __ CallJSAndDispatch(function, context, receiver_arg, target_args_count, + __ CallJSAndDispatch(function, context, target_args, ConvertReceiverMode::kAny); return nullptr; // We never return from the CallJSAndDispatch above. } -Node* IntrinsicsGenerator::ClassOf(Node* args_reg, Node* arg_count, - Node* context) { - Node* value = __ LoadRegister(args_reg); - return __ ClassOf(value); -} - -Node* IntrinsicsGenerator::CreateAsyncFromSyncIterator(Node* args_reg, - Node* arg_count, - Node* context) { +Node* IntrinsicsGenerator::CreateAsyncFromSyncIterator( + const InterpreterAssembler::RegListNodePair& args, Node* context) { InterpreterAssembler::Label not_receiver( assembler_, InterpreterAssembler::Label::kDeferred); InterpreterAssembler::Label done(assembler_); InterpreterAssembler::Variable return_value(assembler_, MachineRepresentation::kTagged); - Node* sync_iterator = __ LoadRegister(args_reg); + Node* sync_iterator = __ LoadRegisterFromRegisterList(args, 0); __ GotoIf(__ TaggedIsSmi(sync_iterator), ¬_receiver); __ GotoIfNot(__ IsJSReceiver(sync_iterator), ¬_receiver); + Node* const next = + __ GetProperty(context, sync_iterator, factory()->next_string()); + Node* const native_context = __ LoadNativeContext(context); Node* const map = __ LoadContextElement( native_context, Context::ASYNC_FROM_SYNC_ITERATOR_MAP_INDEX); @@ -368,6 +328,8 @@ Node* IntrinsicsGenerator::CreateAsyncFromSyncIterator(Node* args_reg, __ StoreObjectFieldNoWriteBarrier( iterator, JSAsyncFromSyncIterator::kSyncIteratorOffset, sync_iterator); + __ StoreObjectFieldNoWriteBarrier(iterator, + JSAsyncFromSyncIterator::kNextOffset, next); return_value.Bind(iterator); __ Goto(&done); @@ -385,52 +347,41 @@ Node* IntrinsicsGenerator::CreateAsyncFromSyncIterator(Node* args_reg, return return_value.value(); } -Node* IntrinsicsGenerator::CreateJSGeneratorObject(Node* input, Node* arg_count, - Node* context) { - return IntrinsicAsBuiltinCall(input, context, +Node* IntrinsicsGenerator::CreateJSGeneratorObject( + const InterpreterAssembler::RegListNodePair& args, Node* context) { + return IntrinsicAsBuiltinCall(args, context, Builtins::kCreateGeneratorObject); } -Node* IntrinsicsGenerator::GeneratorGetContext(Node* args_reg, Node* arg_count, - Node* context) { - Node* generator = __ LoadRegister(args_reg); - Node* const value = - __ LoadObjectField(generator, JSGeneratorObject::kContextOffset); - - return value; -} - -Node* IntrinsicsGenerator::GeneratorGetInputOrDebugPos(Node* args_reg, - Node* arg_count, - Node* context) { - Node* generator = __ LoadRegister(args_reg); +Node* IntrinsicsGenerator::GeneratorGetInputOrDebugPos( + const InterpreterAssembler::RegListNodePair& args, Node* context) { + Node* generator = __ LoadRegisterFromRegisterList(args, 0); Node* const value = __ LoadObjectField(generator, JSGeneratorObject::kInputOrDebugPosOffset); return value; } -Node* IntrinsicsGenerator::GeneratorGetResumeMode(Node* args_reg, - Node* arg_count, - Node* context) { - Node* generator = __ LoadRegister(args_reg); +Node* IntrinsicsGenerator::GeneratorGetResumeMode( + const InterpreterAssembler::RegListNodePair& args, Node* context) { + Node* generator = __ LoadRegisterFromRegisterList(args, 0); Node* const value = __ LoadObjectField(generator, JSGeneratorObject::kResumeModeOffset); return value; } -Node* IntrinsicsGenerator::GeneratorClose(Node* args_reg, Node* arg_count, - Node* context) { - Node* generator = __ LoadRegister(args_reg); +Node* IntrinsicsGenerator::GeneratorClose( + const InterpreterAssembler::RegListNodePair& args, Node* context) { + Node* generator = __ LoadRegisterFromRegisterList(args, 0); __ StoreObjectFieldNoWriteBarrier( generator, JSGeneratorObject::kContinuationOffset, __ SmiConstant(JSGeneratorObject::kGeneratorClosed)); return __ UndefinedConstant(); } -Node* IntrinsicsGenerator::GetImportMetaObject(Node* args_reg, Node* arg_count, - Node* context) { +Node* IntrinsicsGenerator::GetImportMetaObject( + const InterpreterAssembler::RegListNodePair& args, Node* context) { Node* const module_context = __ LoadModuleContext(context); Node* const module = __ LoadContextElement(module_context, Context::EXTENSION_INDEX); @@ -451,21 +402,44 @@ Node* IntrinsicsGenerator::GetImportMetaObject(Node* args_reg, Node* arg_count, return return_value.value(); } -Node* IntrinsicsGenerator::AsyncGeneratorReject(Node* input, Node* arg_count, - Node* context) { - return IntrinsicAsBuiltinCall(input, context, - Builtins::kAsyncGeneratorReject); +Node* IntrinsicsGenerator::AsyncFunctionAwaitCaught( + const InterpreterAssembler::RegListNodePair& args, Node* context) { + return IntrinsicAsBuiltinCall(args, context, + Builtins::kAsyncFunctionAwaitCaught); +} + +Node* IntrinsicsGenerator::AsyncFunctionAwaitUncaught( + const InterpreterAssembler::RegListNodePair& args, Node* context) { + return IntrinsicAsBuiltinCall(args, context, + Builtins::kAsyncFunctionAwaitUncaught); +} + +Node* IntrinsicsGenerator::AsyncGeneratorAwaitCaught( + const InterpreterAssembler::RegListNodePair& args, Node* context) { + return IntrinsicAsBuiltinCall(args, context, + Builtins::kAsyncGeneratorAwaitCaught); +} + +Node* IntrinsicsGenerator::AsyncGeneratorAwaitUncaught( + const InterpreterAssembler::RegListNodePair& args, Node* context) { + return IntrinsicAsBuiltinCall(args, context, + Builtins::kAsyncGeneratorAwaitUncaught); +} + +Node* IntrinsicsGenerator::AsyncGeneratorReject( + const InterpreterAssembler::RegListNodePair& args, Node* context) { + return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncGeneratorReject); } -Node* IntrinsicsGenerator::AsyncGeneratorResolve(Node* input, Node* arg_count, - Node* context) { - return IntrinsicAsBuiltinCall(input, context, +Node* IntrinsicsGenerator::AsyncGeneratorResolve( + const InterpreterAssembler::RegListNodePair& args, Node* context) { + return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncGeneratorResolve); } -Node* IntrinsicsGenerator::AsyncGeneratorYield(Node* input, Node* arg_count, - Node* context) { - return IntrinsicAsBuiltinCall(input, context, Builtins::kAsyncGeneratorYield); +Node* IntrinsicsGenerator::AsyncGeneratorYield( + const InterpreterAssembler::RegListNodePair& args, Node* context) { + return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncGeneratorYield); } void IntrinsicsGenerator::AbortIfArgCountMismatch(int expected, Node* actual) { diff --git a/deps/v8/src/interpreter/interpreter-intrinsics-generator.h b/deps/v8/src/interpreter/interpreter-intrinsics-generator.h index 11442438d5..fd4e167ed0 100644 --- a/deps/v8/src/interpreter/interpreter-intrinsics-generator.h +++ b/deps/v8/src/interpreter/interpreter-intrinsics-generator.h @@ -5,6 +5,8 @@ #ifndef V8_INTERPRETER_INTERPRETER_INTRINSICS_GENERATOR_H_ #define V8_INTERPRETER_INTERPRETER_INTRINSICS_GENERATOR_H_ +#include "src/interpreter/interpreter-assembler.h" + namespace v8 { namespace internal { @@ -14,13 +16,9 @@ class Node; namespace interpreter { -class InterpreterAssembler; - -extern compiler::Node* GenerateInvokeIntrinsic(InterpreterAssembler* assembler, - compiler::Node* function_id, - compiler::Node* context, - compiler::Node* first_arg_reg, - compiler::Node* arg_count); +extern compiler::Node* GenerateInvokeIntrinsic( + InterpreterAssembler* assembler, compiler::Node* function_id, + compiler::Node* context, const InterpreterAssembler::RegListNodePair& args); } // namespace interpreter } // namespace internal diff --git a/deps/v8/src/interpreter/interpreter-intrinsics.h b/deps/v8/src/interpreter/interpreter-intrinsics.h index b9137c8559..6cdfec2d04 100644 --- a/deps/v8/src/interpreter/interpreter-intrinsics.h +++ b/deps/v8/src/interpreter/interpreter-intrinsics.h @@ -14,17 +14,19 @@ namespace interpreter { // List of supported intrisics, with upper case name, lower case name and // expected number of arguments (-1 denoting argument count is variable). #define INTRINSICS_LIST(V) \ + V(AsyncFunctionAwaitCaught, async_function_await_caught, 3) \ + V(AsyncFunctionAwaitUncaught, async_function_await_uncaught, 3) \ + V(AsyncGeneratorAwaitCaught, async_generator_await_caught, 2) \ + V(AsyncGeneratorAwaitUncaught, async_generator_await_uncaught, 2) \ V(AsyncGeneratorReject, async_generator_reject, 2) \ V(AsyncGeneratorResolve, async_generator_resolve, 3) \ V(AsyncGeneratorYield, async_generator_yield, 3) \ V(CreateJSGeneratorObject, create_js_generator_object, 2) \ - V(GeneratorGetContext, generator_get_context, 1) \ V(GeneratorGetResumeMode, generator_get_resume_mode, 1) \ V(GeneratorGetInputOrDebugPos, generator_get_input_or_debug_pos, 1) \ V(GeneratorClose, generator_close, 1) \ V(GetImportMetaObject, get_import_meta_object, 0) \ V(Call, call, -1) \ - V(ClassOf, class_of, 1) \ V(CreateIterResultObject, create_iter_result_object, 2) \ V(CreateAsyncFromSyncIterator, create_async_from_sync_iterator, 1) \ V(HasProperty, has_property, 2) \ @@ -37,6 +39,8 @@ namespace interpreter { V(IsJSWeakSet, is_js_weak_set, 1) \ V(IsSmi, is_smi, 1) \ V(IsTypedArray, is_typed_array, 1) \ + V(RejectPromise, reject_promise, 3) \ + V(ResolvePromise, resolve_promise, 2) \ V(ToString, to_string, 1) \ V(ToLength, to_length, 1) \ V(ToInteger, to_integer, 1) \ @@ -65,4 +69,4 @@ class IntrinsicsHelper { } // namespace internal } // namespace v8 -#endif +#endif // V8_INTERPRETER_INTERPRETER_INTRINSICS_H_ diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc index fb74d37df4..0702536b3d 100644 --- a/deps/v8/src/interpreter/interpreter.cc +++ b/deps/v8/src/interpreter/interpreter.cc @@ -29,7 +29,8 @@ namespace interpreter { class InterpreterCompilationJob final : public CompilationJob { public: InterpreterCompilationJob(ParseInfo* parse_info, FunctionLiteral* literal, - AccountingAllocator* allocator); + AccountingAllocator* allocator, + ZoneVector<FunctionLiteral*>* eager_inner_literals); protected: Status PrepareJobImpl(Isolate* isolate) final; @@ -66,11 +67,6 @@ Code* Interpreter::GetAndMaybeDeserializeBytecodeHandler( if (!isolate_->heap()->IsDeserializeLazyHandler(code)) return code; DCHECK(FLAG_lazy_handler_deserialization); - if (FLAG_trace_lazy_deserialization) { - PrintF("Lazy-deserializing handler %s\n", - Bytecodes::ToString(bytecode, operand_scale).c_str()); - } - DCHECK(Bytecodes::BytecodeHasHandler(bytecode, operand_scale)); code = Snapshot::DeserializeHandler(isolate_, bytecode, operand_scale); @@ -123,13 +119,17 @@ void Interpreter::IterateDispatchTable(RootVisitor* v) { ? nullptr : Code::GetCodeFromTargetAddress(code_entry); Object* old_code = code; - v->VisitRootPointer(Root::kDispatchTable, &code); + v->VisitRootPointer(Root::kDispatchTable, nullptr, &code); if (code != old_code) { dispatch_table_[i] = reinterpret_cast<Code*>(code)->entry(); } } } +int Interpreter::InterruptBudget() { + return FLAG_interrupt_budget; +} + namespace { void MaybePrintAst(ParseInfo* parse_info, CompilationInfo* compilation_info) { @@ -163,12 +163,14 @@ bool ShouldPrintBytecode(Handle<SharedFunctionInfo> shared) { InterpreterCompilationJob::InterpreterCompilationJob( ParseInfo* parse_info, FunctionLiteral* literal, - AccountingAllocator* allocator) + AccountingAllocator* allocator, + ZoneVector<FunctionLiteral*>* eager_inner_literals) : CompilationJob(parse_info->stack_limit(), parse_info, &compilation_info_, "Ignition", State::kReadyToExecute), zone_(allocator, ZONE_NAME), compilation_info_(&zone_, parse_info, literal), - generator_(&compilation_info_, parse_info->ast_string_constants()) {} + generator_(&compilation_info_, parse_info->ast_string_constants(), + eager_inner_literals) {} InterpreterCompilationJob::Status InterpreterCompilationJob::PrepareJobImpl( Isolate* isolate) { @@ -226,10 +228,12 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl( return SUCCEEDED; } -CompilationJob* Interpreter::NewCompilationJob(ParseInfo* parse_info, - FunctionLiteral* literal, - AccountingAllocator* allocator) { - return new InterpreterCompilationJob(parse_info, literal, allocator); +CompilationJob* Interpreter::NewCompilationJob( + ParseInfo* parse_info, FunctionLiteral* literal, + AccountingAllocator* allocator, + ZoneVector<FunctionLiteral*>* eager_inner_literals) { + return new InterpreterCompilationJob(parse_info, literal, allocator, + eager_inner_literals); } bool Interpreter::IsDispatchTableInitialized() const { diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h index 7e6d013a29..83dfea89f9 100644 --- a/deps/v8/src/interpreter/interpreter.h +++ b/deps/v8/src/interpreter/interpreter.h @@ -27,6 +27,8 @@ class FunctionLiteral; class ParseInfo; class RootVisitor; class SetupIsolateDelegate; +template <typename> +class ZoneVector; namespace interpreter { @@ -37,10 +39,16 @@ class Interpreter { explicit Interpreter(Isolate* isolate); virtual ~Interpreter() {} + // Returns the interrupt budget which should be used for the profiler counter. + static int InterruptBudget(); + // Creates a compilation job which will generate bytecode for |literal|. - static CompilationJob* NewCompilationJob(ParseInfo* parse_info, - FunctionLiteral* literal, - AccountingAllocator* allocator); + // Additionally, if |eager_inner_literals| is not null, adds any eagerly + // compilable inner FunctionLiterals to this list. + static CompilationJob* NewCompilationJob( + ParseInfo* parse_info, FunctionLiteral* literal, + AccountingAllocator* allocator, + ZoneVector<FunctionLiteral*>* eager_inner_literals); // If the bytecode handler for |bytecode| and |operand_scale| has not yet // been loaded, deserialize it. Then return the handler. @@ -72,9 +80,6 @@ class Interpreter { return reinterpret_cast<Address>(bytecode_dispatch_counters_table_.get()); } - // The interrupt budget which should be used for the profiler counter. - static const int kInterruptBudget = 144 * KB; - private: friend class SetupInterpreter; friend class v8::internal::SetupIsolateDelegate; |