diff options
author | Ujjwal Sharma <usharma1998@gmail.com> | 2019-03-15 18:35:06 +0530 |
---|---|---|
committer | Refael Ackermann <refack@gmail.com> | 2019-03-28 16:36:18 -0400 |
commit | f579e1194046c50f2e6bb54348d48c8e7d1a53cf (patch) | |
tree | 9125787c758358365f74f9fd9673c14f57e67870 /deps/v8/src/interpreter | |
parent | 2c73868b0471fbd4038f500d076df056cbf697fe (diff) | |
download | android-node-v8-f579e1194046c50f2e6bb54348d48c8e7d1a53cf.tar.gz android-node-v8-f579e1194046c50f2e6bb54348d48c8e7d1a53cf.tar.bz2 android-node-v8-f579e1194046c50f2e6bb54348d48c8e7d1a53cf.zip |
deps: update V8 to 7.4.288.13
PR-URL: https://github.com/nodejs/node/pull/26685
Reviewed-By: Anna Henningsen <anna@addaleax.net>
Reviewed-By: Michaƫl Zasso <targos@protonmail.com>
Reviewed-By: Refael Ackermann <refack@gmail.com>
Diffstat (limited to 'deps/v8/src/interpreter')
-rw-r--r-- | deps/v8/src/interpreter/bytecode-array-builder.cc | 109 | ||||
-rw-r--r-- | deps/v8/src/interpreter/bytecode-array-builder.h | 26 | ||||
-rw-r--r-- | deps/v8/src/interpreter/bytecode-array-writer.cc | 181 | ||||
-rw-r--r-- | deps/v8/src/interpreter/bytecode-array-writer.h | 15 | ||||
-rw-r--r-- | deps/v8/src/interpreter/bytecode-generator.cc | 88 | ||||
-rw-r--r-- | deps/v8/src/interpreter/bytecode-generator.h | 3 | ||||
-rw-r--r-- | deps/v8/src/interpreter/bytecode-label.cc | 9 | ||||
-rw-r--r-- | deps/v8/src/interpreter/bytecode-label.h | 83 | ||||
-rw-r--r-- | deps/v8/src/interpreter/bytecode-register-optimizer.h | 3 | ||||
-rw-r--r-- | deps/v8/src/interpreter/bytecode-register.h | 2 | ||||
-rw-r--r-- | deps/v8/src/interpreter/bytecodes.h | 2 | ||||
-rw-r--r-- | deps/v8/src/interpreter/control-flow-builders.cc | 4 | ||||
-rw-r--r-- | deps/v8/src/interpreter/control-flow-builders.h | 3 | ||||
-rw-r--r-- | deps/v8/src/interpreter/interpreter-assembler.cc | 30 | ||||
-rw-r--r-- | deps/v8/src/interpreter/interpreter-generator.cc | 123 |
15 files changed, 342 insertions, 339 deletions
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc index 2183068576..d0a30349ca 100644 --- a/deps/v8/src/interpreter/bytecode-array-builder.cc +++ b/deps/v8/src/interpreter/bytecode-array-builder.cc @@ -47,7 +47,6 @@ BytecodeArrayBuilder::BytecodeArrayBuilder( bytecode_generated_(false), constant_array_builder_(zone), handler_table_builder_(zone), - return_seen_in_block_(false), parameter_count_(parameter_count), local_register_count_(locals_count), register_allocator_(fixed_register_count()), @@ -82,7 +81,7 @@ Register BytecodeArrayBuilder::Local(int index) const { } Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray(Isolate* isolate) { - DCHECK(return_seen_in_block_); + DCHECK(RemainderOfBlockIsDead()); DCHECK(!bytecode_generated_); bytecode_generated_ = true; @@ -146,6 +145,12 @@ void BytecodeArrayBuilder::WriteJump(BytecodeNode* node, BytecodeLabel* label) { bytecode_array_writer_.WriteJump(node, label); } +void BytecodeArrayBuilder::WriteJumpLoop(BytecodeNode* node, + BytecodeLoopHeader* loop_header) { + AttachOrEmitDeferredSourceInfo(node); + bytecode_array_writer_.WriteJumpLoop(node, loop_header); +} + void BytecodeArrayBuilder::WriteSwitch(BytecodeNode* node, BytecodeJumpTable* jump_table) { AttachOrEmitDeferredSourceInfo(node); @@ -330,21 +335,25 @@ class BytecodeNodeBuilder { template <typename... Operands> \ void BytecodeArrayBuilder::Output##name(BytecodeLabel* label, \ Operands... operands) { \ - DCHECK(Bytecodes::IsJump(Bytecode::k##name)); \ + DCHECK(Bytecodes::IsForwardJump(Bytecode::k##name)); \ BytecodeNode node(Create##name##Node(operands...)); \ WriteJump(&node, label); \ - LeaveBasicBlock(); \ } BYTECODE_LIST(DEFINE_BYTECODE_OUTPUT) #undef DEFINE_BYTECODE_OUTPUT +void BytecodeArrayBuilder::OutputJumpLoop(BytecodeLoopHeader* loop_header, + int loop_depth) { + BytecodeNode node(CreateJumpLoopNode(0, loop_depth)); + WriteJumpLoop(&node, loop_header); +} + void BytecodeArrayBuilder::OutputSwitchOnSmiNoFeedback( BytecodeJumpTable* jump_table) { BytecodeNode node(CreateSwitchOnSmiNoFeedbackNode( jump_table->constant_pool_index(), jump_table->size(), jump_table->case_value_base())); WriteSwitch(&node, jump_table); - LeaveBasicBlock(); } BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperation(Token::Value op, @@ -506,17 +515,8 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation( case Token::Value::INSTANCEOF: OutputTestInstanceOf(reg, feedback_slot); break; - default: - UNREACHABLE(); - } - return *this; -} - -BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation(Token::Value op, - Register reg) { - switch (op) { case Token::Value::IN: - OutputTestIn(reg); + OutputTestIn(reg, feedback_slot); break; default: UNREACHABLE(); @@ -1053,18 +1053,23 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::ToNumeric(int feedback_slot) { } BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(BytecodeLabel* label) { + // Don't generate code for a label which hasn't had a corresponding forward + // jump generated already. For backwards jumps, use BindLoopHeader. + if (!label->has_referrer_jump()) return *this; + // Flush the register optimizer when binding a label to ensure all // expected registers are valid when jumping to this label. if (register_optimizer_) register_optimizer_->Flush(); bytecode_array_writer_.BindLabel(label); - LeaveBasicBlock(); return *this; } -BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(const BytecodeLabel& target, - BytecodeLabel* label) { - bytecode_array_writer_.BindLabel(target, label); - LeaveBasicBlock(); +BytecodeArrayBuilder& BytecodeArrayBuilder::Bind( + BytecodeLoopHeader* loop_header) { + // Flush the register optimizer when starting a loop to ensure all expected + // registers are valid when jumping to the loop header. + if (register_optimizer_) register_optimizer_->Flush(); + bytecode_array_writer_.BindLoopHeader(loop_header); return *this; } @@ -1074,7 +1079,33 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(BytecodeJumpTable* jump_table, // all expected registers are valid when jumping to this location. if (register_optimizer_) register_optimizer_->Flush(); bytecode_array_writer_.BindJumpTableEntry(jump_table, case_value); - LeaveBasicBlock(); + return *this; +} + +BytecodeArrayBuilder& BytecodeArrayBuilder::MarkHandler( + int handler_id, HandlerTable::CatchPrediction catch_prediction) { + // The handler starts a new basic block, and any reasonable try block won't + // let control fall through into it. + DCHECK_IMPLIES(register_optimizer_, + register_optimizer_->EnsureAllRegistersAreFlushed()); + bytecode_array_writer_.BindHandlerTarget(handler_table_builder(), handler_id); + handler_table_builder()->SetPrediction(handler_id, catch_prediction); + return *this; +} + +BytecodeArrayBuilder& BytecodeArrayBuilder::MarkTryBegin(int handler_id, + Register context) { + // Flush registers to make sure everything visible to the handler is + // materialized. + if (register_optimizer_) register_optimizer_->Flush(); + bytecode_array_writer_.BindTryRegionStart(handler_table_builder(), + handler_id); + handler_table_builder()->SetContextRegister(handler_id, context); + return *this; +} + +BytecodeArrayBuilder& BytecodeArrayBuilder::MarkTryEnd(int handler_id) { + bytecode_array_writer_.BindTryRegionEnd(handler_table_builder(), handler_id); return *this; } @@ -1178,10 +1209,9 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfJSReceiver( return *this; } -BytecodeArrayBuilder& BytecodeArrayBuilder::JumpLoop(BytecodeLabel* label, - int loop_depth) { - DCHECK(label->is_bound()); - OutputJumpLoop(label, 0, loop_depth); +BytecodeArrayBuilder& BytecodeArrayBuilder::JumpLoop( + BytecodeLoopHeader* loop_header, int loop_depth) { + OutputJumpLoop(loop_header, loop_depth); return *this; } @@ -1233,7 +1263,6 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Abort(AbortReason reason) { BytecodeArrayBuilder& BytecodeArrayBuilder::Return() { OutputReturn(); - return_seen_in_block_ = true; return *this; } @@ -1321,7 +1350,6 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::SwitchOnGeneratorState( BytecodeNode node(CreateSwitchOnGeneratorStateNode( generator, jump_table->constant_pool_index(), jump_table->size())); WriteSwitch(&node, jump_table); - LeaveBasicBlock(); return *this; } @@ -1331,33 +1359,6 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::ResumeGenerator( return *this; } -BytecodeArrayBuilder& BytecodeArrayBuilder::MarkHandler( - int handler_id, HandlerTable::CatchPrediction catch_prediction) { - BytecodeLabel handler; - Bind(&handler); - handler_table_builder()->SetHandlerTarget(handler_id, handler.offset()); - handler_table_builder()->SetPrediction(handler_id, catch_prediction); - return *this; -} - -BytecodeArrayBuilder& BytecodeArrayBuilder::MarkTryBegin(int handler_id, - Register context) { - // TODO(leszeks): Do we need to start a new basic block here? Could we simply - // get the current bytecode offset from the array writer instead? - BytecodeLabel try_begin; - Bind(&try_begin); - handler_table_builder()->SetTryRegionStart(handler_id, try_begin.offset()); - handler_table_builder()->SetContextRegister(handler_id, context); - return *this; -} - -BytecodeArrayBuilder& BytecodeArrayBuilder::MarkTryEnd(int handler_id) { - BytecodeLabel try_end; - Bind(&try_end); - handler_table_builder()->SetTryRegionEnd(handler_id, try_end.offset()); - return *this; -} - BytecodeArrayBuilder& BytecodeArrayBuilder::CallProperty(Register callable, RegisterList args, int feedback_slot) { diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h index d362ffffa4..93d108f7be 100644 --- a/deps/v8/src/interpreter/bytecode-array-builder.h +++ b/deps/v8/src/interpreter/bytecode-array-builder.h @@ -27,6 +27,7 @@ class Isolate; namespace interpreter { class BytecodeLabel; +class BytecodeLoopHeader; class BytecodeNode; class BytecodeRegisterOptimizer; class BytecodeJumpTable; @@ -379,7 +380,6 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final { // Tests. BytecodeArrayBuilder& CompareOperation(Token::Value op, Register reg, int feedback_slot); - BytecodeArrayBuilder& CompareOperation(Token::Value op, Register reg); BytecodeArrayBuilder& CompareReference(Register reg); BytecodeArrayBuilder& CompareUndetectable(); BytecodeArrayBuilder& CompareUndefined(); @@ -397,13 +397,20 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final { BytecodeArrayBuilder& ToNumber(int feedback_slot); BytecodeArrayBuilder& ToNumeric(int feedback_slot); + // Exception handling. + BytecodeArrayBuilder& MarkHandler(int handler_id, + HandlerTable::CatchPrediction will_catch); + BytecodeArrayBuilder& MarkTryBegin(int handler_id, Register context); + BytecodeArrayBuilder& MarkTryEnd(int handler_id); + // Flow Control. BytecodeArrayBuilder& Bind(BytecodeLabel* label); - BytecodeArrayBuilder& Bind(const BytecodeLabel& target, BytecodeLabel* label); + BytecodeArrayBuilder& Bind(BytecodeLoopHeader* label); BytecodeArrayBuilder& Bind(BytecodeJumpTable* jump_table, int case_value); BytecodeArrayBuilder& Jump(BytecodeLabel* label); - BytecodeArrayBuilder& JumpLoop(BytecodeLabel* label, int loop_depth); + BytecodeArrayBuilder& JumpLoop(BytecodeLoopHeader* loop_header, + int loop_depth); BytecodeArrayBuilder& JumpIfTrue(ToBooleanMode mode, BytecodeLabel* label); BytecodeArrayBuilder& JumpIfFalse(ToBooleanMode mode, BytecodeLabel* label); @@ -458,12 +465,6 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final { BytecodeArrayBuilder& ResumeGenerator(Register generator, RegisterList registers); - // Exception handling. - BytecodeArrayBuilder& MarkHandler(int handler_id, - HandlerTable::CatchPrediction will_catch); - BytecodeArrayBuilder& MarkTryBegin(int handler_id, Register context); - BytecodeArrayBuilder& MarkTryEnd(int handler_id); - // Creates a new handler table entry and returns a {hander_id} identifying the // entry, so that it can be referenced by above exception handling support. int NewHandlerEntry() { return handler_table_builder()->NewHandlerEntry(); } @@ -519,7 +520,6 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final { } } - bool RequiresImplicitReturn() const { return !return_seen_in_block_; } bool RemainderOfBlockIsDead() const { return bytecode_array_writer_.RemainderOfBlockIsDead(); } @@ -568,6 +568,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final { BYTECODE_LIST(DECLARE_BYTECODE_OUTPUT) #undef DECLARE_OPERAND_TYPE_INFO + V8_INLINE void OutputJumpLoop(BytecodeLoopHeader* loop_header, + int loop_depth); V8_INLINE void OutputSwitchOnSmiNoFeedback(BytecodeJumpTable* jump_table); bool RegisterIsValid(Register reg) const; @@ -583,6 +585,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final { // Write bytecode to bytecode array. void Write(BytecodeNode* node); void WriteJump(BytecodeNode* node, BytecodeLabel* label); + void WriteJumpLoop(BytecodeNode* node, BytecodeLoopHeader* loop_header); void WriteSwitch(BytecodeNode* node, BytecodeJumpTable* label); // Not implemented as the illegal bytecode is used inside internally @@ -593,8 +596,6 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final { template <Bytecode bytecode, AccumulatorUse accumulator_use> void PrepareToOutputBytecode(); - void LeaveBasicBlock() { return_seen_in_block_ = false; } - BytecodeArrayWriter* bytecode_array_writer() { return &bytecode_array_writer_; } @@ -613,7 +614,6 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final { bool bytecode_generated_; ConstantArrayBuilder constant_array_builder_; HandlerTableBuilder handler_table_builder_; - bool return_seen_in_block_; int parameter_count_; int local_register_count_; BytecodeRegisterAllocator register_allocator_; diff --git a/deps/v8/src/interpreter/bytecode-array-writer.cc b/deps/v8/src/interpreter/bytecode-array-writer.cc index a563ff4fc3..05f655b71a 100644 --- a/deps/v8/src/interpreter/bytecode-array-writer.cc +++ b/deps/v8/src/interpreter/bytecode-array-writer.cc @@ -11,6 +11,7 @@ #include "src/interpreter/bytecode-register.h" #include "src/interpreter/bytecode-source-info.h" #include "src/interpreter/constant-array-builder.h" +#include "src/interpreter/handler-table-builder.h" #include "src/log.h" #include "src/objects-inl.h" @@ -45,16 +46,20 @@ Handle<BytecodeArray> BytecodeArrayWriter::ToBytecodeArray( int frame_size = register_count * kSystemPointerSize; Handle<FixedArray> constant_pool = constant_array_builder()->ToFixedArray(isolate); - Handle<ByteArray> source_position_table = - source_position_table_builder()->ToSourcePositionTable(isolate); Handle<BytecodeArray> bytecode_array = isolate->factory()->NewBytecodeArray( bytecode_size, &bytecodes()->front(), frame_size, parameter_count, constant_pool); bytecode_array->set_handler_table(*handler_table); - bytecode_array->set_source_position_table(*source_position_table); - LOG_CODE_EVENT(isolate, CodeLinePosInfoRecordEvent( - bytecode_array->GetFirstBytecodeAddress(), - *source_position_table)); + // TODO(v8:8510): Need to support native functions that should always have + // source positions suppressed and should write empty_byte_array here. + if (!source_position_table_builder_.Omit()) { + Handle<ByteArray> source_position_table = + source_position_table_builder()->ToSourcePositionTable(isolate); + bytecode_array->set_source_position_table(*source_position_table); + LOG_CODE_EVENT(isolate, CodeLinePosInfoRecordEvent( + bytecode_array->GetFirstBytecodeAddress(), + *source_position_table)); + } return bytecode_array; } @@ -70,10 +75,8 @@ void BytecodeArrayWriter::Write(BytecodeNode* node) { } void BytecodeArrayWriter::WriteJump(BytecodeNode* node, BytecodeLabel* label) { - DCHECK(Bytecodes::IsJump(node->bytecode())); + DCHECK(Bytecodes::IsForwardJump(node->bytecode())); - // TODO(rmcilroy): For forward jumps we could also mark the label as dead, - // thereby avoiding emitting dead code when we bind the label. if (exit_seen_in_block_) return; // Don't emit dead code. UpdateExitSeenInBlock(node->bytecode()); MaybeElideLastBytecode(node->bytecode(), node->source_info().is_valid()); @@ -82,12 +85,22 @@ void BytecodeArrayWriter::WriteJump(BytecodeNode* node, BytecodeLabel* label) { EmitJump(node, label); } +void BytecodeArrayWriter::WriteJumpLoop(BytecodeNode* node, + BytecodeLoopHeader* loop_header) { + DCHECK_EQ(node->bytecode(), Bytecode::kJumpLoop); + + if (exit_seen_in_block_) return; // Don't emit dead code. + UpdateExitSeenInBlock(node->bytecode()); + MaybeElideLastBytecode(node->bytecode(), node->source_info().is_valid()); + + UpdateSourcePositionTable(node); + EmitJumpLoop(node, loop_header); +} + void BytecodeArrayWriter::WriteSwitch(BytecodeNode* node, BytecodeJumpTable* jump_table) { DCHECK(Bytecodes::IsSwitch(node->bytecode())); - // TODO(rmcilroy): For jump tables we could also mark the table as dead, - // thereby avoiding emitting dead code when we bind the entries. if (exit_seen_in_block_) return; // Don't emit dead code. UpdateExitSeenInBlock(node->bytecode()); MaybeElideLastBytecode(node->bytecode(), node->source_info().is_valid()); @@ -97,30 +110,18 @@ void BytecodeArrayWriter::WriteSwitch(BytecodeNode* node, } void BytecodeArrayWriter::BindLabel(BytecodeLabel* label) { + DCHECK(label->has_referrer_jump()); size_t current_offset = bytecodes()->size(); - if (label->is_forward_target()) { - // An earlier jump instruction refers to this label. Update it's location. - PatchJump(current_offset, label->offset()); - // Now treat as if the label will only be back referred to. - } - label->bind_to(current_offset); - InvalidateLastBytecode(); - exit_seen_in_block_ = false; // Starting a new basic block. + // Update the jump instruction's location. + PatchJump(current_offset, label->jump_offset()); + label->bind(); + StartBasicBlock(); } -void BytecodeArrayWriter::BindLabel(const BytecodeLabel& target, - BytecodeLabel* label) { - DCHECK(!label->is_bound()); - DCHECK(target.is_bound()); - if (label->is_forward_target()) { - // An earlier jump instruction refers to this label. Update it's location. - PatchJump(target.offset(), label->offset()); - // Now treat as if the label will only be back referred to. - } - label->bind_to(target.offset()); - InvalidateLastBytecode(); - // exit_seen_in_block_ was reset when target was bound, so shouldn't be - // changed here. +void BytecodeArrayWriter::BindLoopHeader(BytecodeLoopHeader* loop_header) { + size_t current_offset = bytecodes()->size(); + loop_header->bind_to(current_offset); + StartBasicBlock(); } void BytecodeArrayWriter::BindJumpTableEntry(BytecodeJumpTable* jump_table, @@ -135,8 +136,37 @@ void BytecodeArrayWriter::BindJumpTableEntry(BytecodeJumpTable* jump_table, Smi::FromInt(static_cast<int>(relative_jump))); jump_table->mark_bound(case_value); + StartBasicBlock(); +} + +void BytecodeArrayWriter::BindHandlerTarget( + HandlerTableBuilder* handler_table_builder, int handler_id) { + size_t current_offset = bytecodes()->size(); + StartBasicBlock(); + handler_table_builder->SetHandlerTarget(handler_id, current_offset); +} + +void BytecodeArrayWriter::BindTryRegionStart( + HandlerTableBuilder* handler_table_builder, int handler_id) { + size_t current_offset = bytecodes()->size(); + // Try blocks don't have to be in a separate basic block, but we do have to + // invalidate the bytecode to avoid eliding it and changing the offset. + InvalidateLastBytecode(); + handler_table_builder->SetTryRegionStart(handler_id, current_offset); +} + +void BytecodeArrayWriter::BindTryRegionEnd( + HandlerTableBuilder* handler_table_builder, int handler_id) { + // Try blocks don't have to be in a separate basic block, but we do have to + // invalidate the bytecode to avoid eliding it and changing the offset. + InvalidateLastBytecode(); + size_t current_offset = bytecodes()->size(); + handler_table_builder->SetTryRegionEnd(handler_id, current_offset); +} + +void BytecodeArrayWriter::StartBasicBlock() { InvalidateLastBytecode(); - exit_seen_in_block_ = false; // Starting a new basic block. + exit_seen_in_block_ = false; } void BytecodeArrayWriter::UpdateSourcePositionTable( @@ -374,50 +404,57 @@ void BytecodeArrayWriter::PatchJump(size_t jump_target, size_t jump_location) { unbound_jumps_--; } +void BytecodeArrayWriter::EmitJumpLoop(BytecodeNode* node, + BytecodeLoopHeader* loop_header) { + DCHECK_EQ(node->bytecode(), Bytecode::kJumpLoop); + DCHECK_EQ(0u, node->operand(0)); + + size_t current_offset = bytecodes()->size(); + + CHECK_GE(current_offset, loop_header->offset()); + CHECK_LE(current_offset, static_cast<size_t>(kMaxUInt32)); + // Label has been bound already so this is a backwards jump. + uint32_t delta = + static_cast<uint32_t>(current_offset - loop_header->offset()); + OperandScale operand_scale = Bytecodes::ScaleForUnsignedOperand(delta); + if (operand_scale > OperandScale::kSingle) { + // Adjust for scaling byte prefix for wide jump offset. + delta += 1; + } + node->update_operand0(delta); + EmitBytecode(node); +} + void BytecodeArrayWriter::EmitJump(BytecodeNode* node, BytecodeLabel* label) { - DCHECK(Bytecodes::IsJump(node->bytecode())); + DCHECK(Bytecodes::IsForwardJump(node->bytecode())); DCHECK_EQ(0u, node->operand(0)); size_t current_offset = bytecodes()->size(); - if (label->is_bound()) { - CHECK_GE(current_offset, label->offset()); - CHECK_LE(current_offset, static_cast<size_t>(kMaxUInt32)); - // Label has been bound already so this is a backwards jump. - uint32_t delta = static_cast<uint32_t>(current_offset - label->offset()); - OperandScale operand_scale = Bytecodes::ScaleForUnsignedOperand(delta); - if (operand_scale > OperandScale::kSingle) { - // Adjust for scaling byte prefix for wide jump offset. - delta += 1; - } - DCHECK_EQ(Bytecode::kJumpLoop, node->bytecode()); - node->update_operand0(delta); - } else { - // The label has not yet been bound so this is a forward reference - // that will be patched when the label is bound. We create a - // reservation in the constant pool so the jump can be patched - // when the label is bound. The reservation means the maximum size - // of the operand for the constant is known and the jump can - // be emitted into the bytecode stream with space for the operand. - unbound_jumps_++; - label->set_referrer(current_offset); - OperandSize reserved_operand_size = - constant_array_builder()->CreateReservedEntry(); - DCHECK_NE(Bytecode::kJumpLoop, node->bytecode()); - switch (reserved_operand_size) { - case OperandSize::kNone: - UNREACHABLE(); - break; - case OperandSize::kByte: - node->update_operand0(k8BitJumpPlaceholder); - break; - case OperandSize::kShort: - node->update_operand0(k16BitJumpPlaceholder); - break; - case OperandSize::kQuad: - node->update_operand0(k32BitJumpPlaceholder); - break; - } + // The label has not yet been bound so this is a forward reference + // that will be patched when the label is bound. We create a + // reservation in the constant pool so the jump can be patched + // when the label is bound. The reservation means the maximum size + // of the operand for the constant is known and the jump can + // be emitted into the bytecode stream with space for the operand. + unbound_jumps_++; + label->set_referrer(current_offset); + OperandSize reserved_operand_size = + constant_array_builder()->CreateReservedEntry(); + DCHECK_NE(Bytecode::kJumpLoop, node->bytecode()); + switch (reserved_operand_size) { + case OperandSize::kNone: + UNREACHABLE(); + break; + case OperandSize::kByte: + node->update_operand0(k8BitJumpPlaceholder); + break; + case OperandSize::kShort: + node->update_operand0(k16BitJumpPlaceholder); + break; + case OperandSize::kQuad: + node->update_operand0(k32BitJumpPlaceholder); + break; } EmitBytecode(node); } diff --git a/deps/v8/src/interpreter/bytecode-array-writer.h b/deps/v8/src/interpreter/bytecode-array-writer.h index e6db2fce22..d18c62a90f 100644 --- a/deps/v8/src/interpreter/bytecode-array-writer.h +++ b/deps/v8/src/interpreter/bytecode-array-writer.h @@ -19,9 +19,11 @@ class SourcePositionTableBuilder; namespace interpreter { class BytecodeLabel; +class BytecodeLoopHeader; class BytecodeNode; class BytecodeJumpTable; class ConstantArrayBuilder; +class HandlerTableBuilder; namespace bytecode_array_writer_unittest { class BytecodeArrayWriterUnittest; @@ -37,10 +39,18 @@ class V8_EXPORT_PRIVATE BytecodeArrayWriter final { void Write(BytecodeNode* node); void WriteJump(BytecodeNode* node, BytecodeLabel* label); + void WriteJumpLoop(BytecodeNode* node, BytecodeLoopHeader* loop_header); void WriteSwitch(BytecodeNode* node, BytecodeJumpTable* jump_table); void BindLabel(BytecodeLabel* label); - void BindLabel(const BytecodeLabel& target, BytecodeLabel* label); + void BindLoopHeader(BytecodeLoopHeader* loop_header); void BindJumpTableEntry(BytecodeJumpTable* jump_table, int case_value); + void BindHandlerTarget(HandlerTableBuilder* handler_table_builder, + int handler_id); + void BindTryRegionStart(HandlerTableBuilder* handler_table_builder, + int handler_id); + void BindTryRegionEnd(HandlerTableBuilder* handler_table_builder, + int handler_id); + Handle<BytecodeArray> ToBytecodeArray(Isolate* isolate, int register_count, int parameter_count, Handle<ByteArray> handler_table); @@ -71,6 +81,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayWriter final { void EmitBytecode(const BytecodeNode* const node); void EmitJump(BytecodeNode* node, BytecodeLabel* label); + void EmitJumpLoop(BytecodeNode* node, BytecodeLoopHeader* loop_header); void EmitSwitch(BytecodeNode* node, BytecodeJumpTable* jump_table); void UpdateSourcePositionTable(const BytecodeNode* const node); @@ -79,6 +90,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayWriter final { void MaybeElideLastBytecode(Bytecode next_bytecode, bool has_source_info); void InvalidateLastBytecode(); + void StartBasicBlock(); + ZoneVector<uint8_t>* bytecodes() { return &bytecodes_; } SourcePositionTableBuilder* source_position_table_builder() { return &source_position_table_builder_; diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc index 00b1916c92..be142dbd17 100644 --- a/deps/v8/src/interpreter/bytecode-generator.cc +++ b/deps/v8/src/interpreter/bytecode-generator.cc @@ -18,6 +18,7 @@ #include "src/objects/debug-objects.h" #include "src/objects/literal-objects-inl.h" #include "src/objects/smi.h" +#include "src/objects/template-objects-inl.h" #include "src/parsing/parse-info.h" #include "src/parsing/token.h" #include "src/unoptimized-compilation-info.h" @@ -958,8 +959,7 @@ Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode( #ifdef DEBUG // Unoptimized compilation should be context-independent. Verify that we don't // access the native context by nulling it out during finalization. - SaveContext save(isolate); - isolate->set_context(Context()); + SaveAndSwitchContext save(isolate, Context()); #endif AllocateDeferredConstants(isolate, script); @@ -1093,7 +1093,7 @@ void BytecodeGenerator::GenerateBytecode(uintptr_t stack_limit) { } // Check that we are not falling off the end. - DCHECK(!builder()->RequiresImplicitReturn()); + DCHECK(builder()->RemainderOfBlockIsDead()); } void BytecodeGenerator::GenerateBytecodeBody() { @@ -1153,7 +1153,7 @@ void BytecodeGenerator::GenerateBytecodeBody() { // Emit an implicit return instruction in case control flow can fall off the // end of the function without an explicit return being present on all paths. - if (builder()->RequiresImplicitReturn()) { + if (!builder()->RemainderOfBlockIsDead()) { builder()->LoadUndefined(); BuildReturn(); } @@ -1221,6 +1221,9 @@ void BytecodeGenerator::VisitBlockDeclarationsAndStatements(Block* stmt) { void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) { Variable* variable = decl->var(); + // Unused variables don't need to be visited. + if (!variable->is_used()) return; + switch (variable->location()) { case VariableLocation::UNALLOCATED: { DCHECK(!variable->binding_needs_init()); @@ -1275,6 +1278,9 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) { DCHECK(variable->mode() == VariableMode::kLet || variable->mode() == VariableMode::kVar || variable->mode() == VariableMode::kDynamic); + // Unused variables don't need to be visited. + if (!variable->is_used()) return; + switch (variable->location()) { case VariableLocation::UNALLOCATED: { FeedbackSlot slot = @@ -2343,7 +2349,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { VisitForEffect(property->value()); } } else { - RegisterList args = register_allocator()->NewRegisterList(4); + RegisterList args = register_allocator()->NewRegisterList(3); builder()->MoveRegister(literal, args[0]); builder()->SetExpressionPosition(property->key()); @@ -2351,10 +2357,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { builder()->SetExpressionPosition(property->value()); VisitForRegisterValue(property->value(), args[2]); if (property->emit_store()) { - builder() - ->LoadLiteral(Smi::FromEnum(LanguageMode::kSloppy)) - .StoreAccumulatorInRegister(args[3]) - .CallRuntime(Runtime::kSetKeyedProperty, args); + builder()->CallRuntime(Runtime::kSetKeyedProperty, args); Register value = args[2]; VisitSetHomeObject(value, literal, property); } @@ -3106,7 +3109,8 @@ BytecodeGenerator::AssignmentLhsData BytecodeGenerator::PrepareAssignmentLhs( register_allocator()->NewRegisterList(4); SuperPropertyReference* super_property = property->obj()->AsSuperPropertyReference(); - VisitForRegisterValue(super_property->this_var(), super_property_args[0]); + BuildThisVariableLoad(); + builder()->StoreAccumulatorInRegister(super_property_args[0]); VisitForRegisterValue(super_property->home_object(), super_property_args[1]); builder() @@ -3120,7 +3124,8 @@ BytecodeGenerator::AssignmentLhsData BytecodeGenerator::PrepareAssignmentLhs( register_allocator()->NewRegisterList(4); SuperPropertyReference* super_property = property->obj()->AsSuperPropertyReference(); - VisitForRegisterValue(super_property->this_var(), super_property_args[0]); + BuildThisVariableLoad(); + builder()->StoreAccumulatorInRegister(super_property_args[0]); VisitForRegisterValue(super_property->home_object(), super_property_args[1]); VisitForRegisterValue(property->key(), super_property_args[2]); @@ -3639,13 +3644,13 @@ void BytecodeGenerator::BuildAssignment( case NAMED_SUPER_PROPERTY: { builder() ->StoreAccumulatorInRegister(lhs_data.super_property_args()[3]) - .CallRuntime(StoreToSuperRuntimeId(), lhs_data.super_property_args()); + .CallRuntime(Runtime::kStoreToSuper, lhs_data.super_property_args()); break; } case KEYED_SUPER_PROPERTY: { builder() ->StoreAccumulatorInRegister(lhs_data.super_property_args()[3]) - .CallRuntime(StoreKeyedToSuperRuntimeId(), + .CallRuntime(Runtime::kStoreKeyedToSuper, lhs_data.super_property_args()); break; } @@ -4163,7 +4168,8 @@ void BytecodeGenerator::VisitNamedSuperPropertyLoad(Property* property, SuperPropertyReference* super_property = property->obj()->AsSuperPropertyReference(); RegisterList args = register_allocator()->NewRegisterList(3); - VisitForRegisterValue(super_property->this_var(), args[0]); + BuildThisVariableLoad(); + builder()->StoreAccumulatorInRegister(args[0]); VisitForRegisterValue(super_property->home_object(), args[1]); builder()->SetExpressionPosition(property); @@ -4183,7 +4189,8 @@ void BytecodeGenerator::VisitKeyedSuperPropertyLoad(Property* property, SuperPropertyReference* super_property = property->obj()->AsSuperPropertyReference(); RegisterList args = register_allocator()->NewRegisterList(3); - VisitForRegisterValue(super_property->this_var(), args[0]); + BuildThisVariableLoad(); + builder()->StoreAccumulatorInRegister(args[0]); VisitForRegisterValue(super_property->home_object(), args[1]); VisitForRegisterValue(property->key(), args[2]); @@ -4451,8 +4458,8 @@ void BytecodeGenerator::VisitCallSuper(Call* expr) { // Default constructors don't need have to do the assignment because // 'this' isn't accessed in default constructors. if (!IsDefaultConstructor(info()->literal()->kind())) { - BuildVariableAssignment(super->this_var()->var(), Token::INIT, - HoleCheckMode::kRequired); + Variable* var = closure_scope()->GetReceiverScope()->receiver(); + BuildVariableAssignment(var, Token::INIT, HoleCheckMode::kRequired); } // The derived constructor has the correct bit set always, so we @@ -4582,7 +4589,7 @@ void BytecodeGenerator::VisitDelete(UnaryOperation* unary) { Register object = VisitForRegisterValue(property->obj()); VisitForAccumulatorValue(property->key()); builder()->Delete(object, language_mode()); - } else if (expr->IsVariableProxy() && !expr->AsVariableProxy()->is_this() && + } else if (expr->IsVariableProxy() && !expr->AsVariableProxy()->is_new_target()) { // Delete of an unqualified identifier is allowed in sloppy mode but is // not allowed in strict mode. @@ -4665,7 +4672,8 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) { RegisterList load_super_args = super_property_args.Truncate(3); SuperPropertyReference* super_property = property->obj()->AsSuperPropertyReference(); - VisitForRegisterValue(super_property->this_var(), load_super_args[0]); + BuildThisVariableLoad(); + builder()->StoreAccumulatorInRegister(load_super_args[0]); VisitForRegisterValue(super_property->home_object(), load_super_args[1]); builder() ->LoadLiteral(property->key()->AsLiteral()->AsRawPropertyName()) @@ -4678,7 +4686,8 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) { RegisterList load_super_args = super_property_args.Truncate(3); SuperPropertyReference* super_property = property->obj()->AsSuperPropertyReference(); - VisitForRegisterValue(super_property->this_var(), load_super_args[0]); + BuildThisVariableLoad(); + builder()->StoreAccumulatorInRegister(load_super_args[0]); VisitForRegisterValue(super_property->home_object(), load_super_args[1]); VisitForRegisterValue(property->key(), load_super_args[2]); builder()->CallRuntime(Runtime::kLoadKeyedFromSuper, load_super_args); @@ -4741,13 +4750,13 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) { case NAMED_SUPER_PROPERTY: { builder() ->StoreAccumulatorInRegister(super_property_args[3]) - .CallRuntime(StoreToSuperRuntimeId(), super_property_args); + .CallRuntime(Runtime::kStoreToSuper, super_property_args); break; } case KEYED_SUPER_PROPERTY: { builder() ->StoreAccumulatorInRegister(super_property_args[3]) - .CallRuntime(StoreKeyedToSuperRuntimeId(), super_property_args); + .CallRuntime(Runtime::kStoreKeyedToSuper, super_property_args); break; } } @@ -4841,15 +4850,15 @@ void BytecodeGenerator::VisitCompareOperation(CompareOperation* expr) { Register lhs = VisitForRegisterValue(expr->left()); VisitForAccumulatorValue(expr->right()); builder()->SetExpressionPosition(expr); + FeedbackSlot slot; if (expr->op() == Token::IN) { - builder()->CompareOperation(expr->op(), lhs); + slot = feedback_spec()->AddKeyedHasICSlot(); } else if (expr->op() == Token::INSTANCEOF) { - FeedbackSlot slot = feedback_spec()->AddInstanceOfSlot(); - builder()->CompareOperation(expr->op(), lhs, feedback_index(slot)); + slot = feedback_spec()->AddInstanceOfSlot(); } else { - FeedbackSlot slot = feedback_spec()->AddCompareICSlot(); - builder()->CompareOperation(expr->op(), lhs, feedback_index(slot)); + slot = feedback_spec()->AddCompareICSlot(); } + builder()->CompareOperation(expr->op(), lhs, feedback_index(slot)); } // Always returns a boolean value. execution_result()->SetResultIsBoolean(); @@ -5132,8 +5141,19 @@ void BytecodeGenerator::VisitTemplateLiteral(TemplateLiteral* expr) { } } -void BytecodeGenerator::VisitThisFunction(ThisFunction* expr) { - builder()->LoadAccumulatorWithRegister(Register::function_closure()); +void BytecodeGenerator::BuildThisVariableLoad() { + DeclarationScope* receiver_scope = closure_scope()->GetReceiverScope(); + Variable* var = receiver_scope->receiver(); + // TODO(littledan): implement 'this' hole check elimination. + HoleCheckMode hole_check_mode = + IsDerivedConstructor(receiver_scope->function_kind()) + ? HoleCheckMode::kRequired + : HoleCheckMode::kElided; + BuildVariableLoad(var, hole_check_mode); +} + +void BytecodeGenerator::VisitThisExpression(ThisExpression* expr) { + BuildThisVariableLoad(); } void BytecodeGenerator::VisitSuperCallReference(SuperCallReference* expr) { @@ -5854,16 +5874,6 @@ FeedbackSlot BytecodeGenerator::GetDummyCompareICSlot() { return dummy_feedback_slot_.Get(); } -Runtime::FunctionId BytecodeGenerator::StoreToSuperRuntimeId() { - return is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict - : Runtime::kStoreToSuper_Sloppy; -} - -Runtime::FunctionId BytecodeGenerator::StoreKeyedToSuperRuntimeId() { - return is_strict(language_mode()) ? Runtime::kStoreKeyedToSuper_Strict - : Runtime::kStoreKeyedToSuper_Sloppy; -} - } // namespace interpreter } // namespace internal } // namespace v8 diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h index a5c573f7ff..045567b1ac 100644 --- a/deps/v8/src/interpreter/bytecode-generator.h +++ b/deps/v8/src/interpreter/bytecode-generator.h @@ -7,6 +7,7 @@ #include "src/ast/ast.h" #include "src/feedback-vector.h" +#include "src/function-kind.h" #include "src/interpreter/bytecode-array-builder.h" #include "src/interpreter/bytecode-label.h" #include "src/interpreter/bytecode-register.h" @@ -203,6 +204,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> { void BuildAssignment(const AssignmentLhsData& data, Token::Value op, LookupHoistingMode lookup_hoisting_mode); + void BuildThisVariableLoad(); + Expression* GetDestructuringDefaultValue(Expression** target); void BuildDestructuringArrayAssignment( ArrayLiteral* pattern, Token::Value op, diff --git a/deps/v8/src/interpreter/bytecode-label.cc b/deps/v8/src/interpreter/bytecode-label.cc index da607a2927..df49b03bd4 100644 --- a/deps/v8/src/interpreter/bytecode-label.cc +++ b/deps/v8/src/interpreter/bytecode-label.cc @@ -18,18 +18,13 @@ BytecodeLabel* BytecodeLabels::New() { } void BytecodeLabels::Bind(BytecodeArrayBuilder* builder) { + DCHECK(!is_bound_); + is_bound_ = true; for (auto& label : labels_) { builder->Bind(&label); } } -void BytecodeLabels::BindToLabel(BytecodeArrayBuilder* builder, - const BytecodeLabel& target) { - for (auto& label : labels_) { - builder->Bind(target, &label); - } -} - } // namespace interpreter } // namespace internal } // namespace v8 diff --git a/deps/v8/src/interpreter/bytecode-label.h b/deps/v8/src/interpreter/bytecode-label.h index 9622c1513e..4581f4f4e2 100644 --- a/deps/v8/src/interpreter/bytecode-label.h +++ b/deps/v8/src/interpreter/bytecode-label.h @@ -15,42 +15,67 @@ namespace interpreter { class BytecodeArrayBuilder; -// A label representing a branch target in a bytecode array. When a -// label is bound, it represents a known position in the bytecode -// array. For labels that are forward references there can be at most -// one reference whilst it is unbound. -class V8_EXPORT_PRIVATE BytecodeLabel final { +// A label representing a loop header in a bytecode array. It is bound before +// the jump is seen, so its position is always known by the time the jump is +// reached. +class V8_EXPORT_PRIVATE BytecodeLoopHeader final { public: - BytecodeLabel() : bound_(false), offset_(kInvalidOffset) {} + BytecodeLoopHeader() : offset_(kInvalidOffset) {} - bool is_bound() const { return bound_; } - size_t offset() const { return offset_; } + size_t offset() const { + DCHECK_NE(offset_, kInvalidOffset); + return offset_; + } private: static const size_t kInvalidOffset = static_cast<size_t>(-1); void bind_to(size_t offset) { - DCHECK(!bound_ && offset != kInvalidOffset); + DCHECK_NE(offset, kInvalidOffset); + DCHECK_EQ(offset_, kInvalidOffset); offset_ = offset; - bound_ = true; } - void set_referrer(size_t offset) { - DCHECK(!bound_ && offset != kInvalidOffset && offset_ == kInvalidOffset); - offset_ = offset; + // The bytecode offset of the loop header. + size_t offset_; + + friend class BytecodeArrayWriter; +}; + +// A label representing a forward branch target in a bytecode array. When a +// label is bound, it represents a known position in the bytecode array. A label +// can only have at most one referrer jump. +class V8_EXPORT_PRIVATE BytecodeLabel final { + public: + BytecodeLabel() : bound_(false), jump_offset_(kInvalidOffset) {} + + bool is_bound() const { return bound_; } + size_t jump_offset() const { + DCHECK_NE(jump_offset_, kInvalidOffset); + return jump_offset_; + } + + bool has_referrer_jump() const { return jump_offset_ != kInvalidOffset; } + + private: + static const size_t kInvalidOffset = static_cast<size_t>(-1); + + void bind() { + DCHECK(!bound_); + bound_ = true; } - bool is_forward_target() const { - return offset() != kInvalidOffset && !is_bound(); + void set_referrer(size_t offset) { + DCHECK(!bound_); + DCHECK_NE(offset, kInvalidOffset); + DCHECK_EQ(jump_offset_, kInvalidOffset); + jump_offset_ = offset; } - // There are three states for a label: - // bound_ offset_ - // UNSET false kInvalidOffset - // FORWARD_TARGET false Offset of referring jump - // BACKWARD_TARGET true Offset of label in bytecode array when bound + // Set when the label is bound (i.e. the start of the target basic block). bool bound_; - size_t offset_; + // Set when the jump referrer is set (i.e. the location of the jump). + size_t jump_offset_; friend class BytecodeArrayWriter; }; @@ -58,26 +83,26 @@ class V8_EXPORT_PRIVATE BytecodeLabel final { // Class representing a branch target of multiple jumps. class V8_EXPORT_PRIVATE BytecodeLabels { public: - explicit BytecodeLabels(Zone* zone) : labels_(zone) {} + explicit BytecodeLabels(Zone* zone) : labels_(zone), is_bound_(false) {} BytecodeLabel* New(); void Bind(BytecodeArrayBuilder* builder); - void BindToLabel(BytecodeArrayBuilder* builder, const BytecodeLabel& target); - bool is_bound() const { - bool is_bound = !labels_.empty() && labels_.front().is_bound(); - DCHECK(!is_bound || - std::all_of(labels_.begin(), labels_.end(), - [](const BytecodeLabel& l) { return l.is_bound(); })); - return is_bound; + DCHECK_IMPLIES( + is_bound_, + std::all_of(labels_.begin(), labels_.end(), [](const BytecodeLabel& l) { + return !l.has_referrer_jump() || l.is_bound(); + })); + return is_bound_; } bool empty() const { return labels_.empty(); } private: ZoneLinkedList<BytecodeLabel> labels_; + bool is_bound_; DISALLOW_COPY_AND_ASSIGN(BytecodeLabels); }; diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.h b/deps/v8/src/interpreter/bytecode-register-optimizer.h index 7ba7d3b602..b120741872 100644 --- a/deps/v8/src/interpreter/bytecode-register-optimizer.h +++ b/deps/v8/src/interpreter/bytecode-register-optimizer.h @@ -60,6 +60,7 @@ class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final // Materialize all live registers and flush equivalence sets. void Flush(); + bool EnsureAllRegistersAreFlushed() const; // Prepares for |bytecode|. template <Bytecode bytecode, AccumulatorUse accumulator_use> @@ -132,8 +133,6 @@ class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final RegisterInfo* non_set_member); void PushToRegistersNeedingFlush(RegisterInfo* reg); - bool EnsureAllRegistersAreFlushed() const; - // Methods for finding and creating metadata for each register. RegisterInfo* GetRegisterInfo(Register reg) { size_t index = GetRegisterInfoTableIndex(reg); diff --git a/deps/v8/src/interpreter/bytecode-register.h b/deps/v8/src/interpreter/bytecode-register.h index ae8bbe4275..ca76fcfec4 100644 --- a/deps/v8/src/interpreter/bytecode-register.h +++ b/deps/v8/src/interpreter/bytecode-register.h @@ -87,7 +87,7 @@ class V8_EXPORT_PRIVATE Register final { } private: - DISALLOW_NEW_AND_DELETE(); + DISALLOW_NEW_AND_DELETE() static const int kInvalidIndex = kMaxInt; static const int kRegisterFileStartOffset = diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h index 7efcd1ae62..f9713ef79b 100644 --- a/deps/v8/src/interpreter/bytecodes.h +++ b/deps/v8/src/interpreter/bytecodes.h @@ -235,7 +235,7 @@ namespace interpreter { V(TestReferenceEqual, AccumulatorUse::kReadWrite, OperandType::kReg) \ V(TestInstanceOf, AccumulatorUse::kReadWrite, OperandType::kReg, \ OperandType::kIdx) \ - V(TestIn, AccumulatorUse::kReadWrite, OperandType::kReg) \ + V(TestIn, AccumulatorUse::kReadWrite, OperandType::kReg, OperandType::kIdx) \ V(TestUndetectable, AccumulatorUse::kReadWrite) \ V(TestNull, AccumulatorUse::kReadWrite) \ V(TestUndefined, AccumulatorUse::kReadWrite) \ diff --git a/deps/v8/src/interpreter/control-flow-builders.cc b/deps/v8/src/interpreter/control-flow-builders.cc index 6b1bdc3424..8eb44069f6 100644 --- a/deps/v8/src/interpreter/control-flow-builders.cc +++ b/deps/v8/src/interpreter/control-flow-builders.cc @@ -70,7 +70,6 @@ void LoopBuilder::JumpToHeader(int loop_depth) { int level = Min(loop_depth, AbstractCode::kMaxLoopNestingMarker - 1); // Loop must have closed form, i.e. all loop elements are within the loop, // the loop header precedes the body and next elements in the loop. - DCHECK(loop_header_.is_bound()); builder()->JumpLoop(&loop_header_, level); } @@ -79,7 +78,7 @@ void LoopBuilder::BindContinueTarget() { continue_labels_.Bind(builder()); } SwitchBuilder::~SwitchBuilder() { #ifdef DEBUG for (auto site : case_sites_) { - DCHECK(site.is_bound()); + DCHECK(!site.has_referrer_jump() || site.is_bound()); } #endif } @@ -108,7 +107,6 @@ void TryCatchBuilder::BeginTry(Register context) { void TryCatchBuilder::EndTry() { builder()->MarkTryEnd(handler_id_); builder()->Jump(&exit_); - builder()->Bind(&handler_); builder()->MarkHandler(handler_id_, catch_prediction_); if (block_coverage_builder_ != nullptr) { diff --git a/deps/v8/src/interpreter/control-flow-builders.h b/deps/v8/src/interpreter/control-flow-builders.h index 8359f0d1ee..d4f2d11e7c 100644 --- a/deps/v8/src/interpreter/control-flow-builders.h +++ b/deps/v8/src/interpreter/control-flow-builders.h @@ -121,7 +121,7 @@ class V8_EXPORT_PRIVATE LoopBuilder final : public BreakableControlFlowBuilder { void ContinueIfNull() { EmitJumpIfNull(&continue_labels_); } private: - BytecodeLabel loop_header_; + BytecodeLoopHeader loop_header_; // Unbound labels that identify jumps for continue statements in the code and // jumps from checking the loop condition to the header for do-while loops. @@ -188,7 +188,6 @@ class V8_EXPORT_PRIVATE TryCatchBuilder final : public ControlFlowBuilder { private: int handler_id_; HandlerTable::CatchPrediction catch_prediction_; - BytecodeLabel handler_; BytecodeLabel exit_; BlockCoverageBuilder* block_coverage_builder_; diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc index dadfaa8783..903300a6e1 100644 --- a/deps/v8/src/interpreter/interpreter-assembler.cc +++ b/deps/v8/src/interpreter/interpreter-assembler.cc @@ -236,18 +236,19 @@ Node* InterpreterAssembler::RegisterFrameOffset(Node* index) { } Node* InterpreterAssembler::LoadRegister(Node* reg_index) { - return Load(MachineType::AnyTagged(), GetInterpretedFramePointer(), - RegisterFrameOffset(reg_index), LoadSensitivity::kCritical); + return LoadFullTagged(GetInterpretedFramePointer(), + RegisterFrameOffset(reg_index), + LoadSensitivity::kCritical); } Node* InterpreterAssembler::LoadRegister(Register reg) { - return Load(MachineType::AnyTagged(), GetInterpretedFramePointer(), - IntPtrConstant(reg.ToOperand() << kSystemPointerSizeLog2)); + return LoadFullTagged(GetInterpretedFramePointer(), + IntPtrConstant(reg.ToOperand() * kSystemPointerSize)); } Node* InterpreterAssembler::LoadAndUntagRegister(Register reg) { return LoadAndUntagSmi(GetInterpretedFramePointer(), - reg.ToOperand() << kSystemPointerSizeLog2); + reg.ToOperand() * kSystemPointerSize); } Node* InterpreterAssembler::LoadRegisterAtOperandIndex(int operand_index) { @@ -282,7 +283,7 @@ Node* InterpreterAssembler::LoadRegisterFromRegisterList( const RegListNodePair& reg_list, int index) { Node* location = RegisterLocationInRegisterList(reg_list, index); // Location is already poisoned on speculation, so no need to poison here. - return Load(MachineType::AnyTagged(), location); + return LoadFullTagged(location); } Node* InterpreterAssembler::RegisterLocationInRegisterList( @@ -296,19 +297,18 @@ Node* InterpreterAssembler::RegisterLocationInRegisterList( } void InterpreterAssembler::StoreRegister(Node* value, Register reg) { - StoreNoWriteBarrier( - MachineRepresentation::kTagged, GetInterpretedFramePointer(), - IntPtrConstant(reg.ToOperand() << kSystemPointerSizeLog2), value); + StoreFullTaggedNoWriteBarrier( + GetInterpretedFramePointer(), + IntPtrConstant(reg.ToOperand() * kSystemPointerSize), value); } void InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) { - StoreNoWriteBarrier(MachineRepresentation::kTagged, - GetInterpretedFramePointer(), - RegisterFrameOffset(reg_index), value); + StoreFullTaggedNoWriteBarrier(GetInterpretedFramePointer(), + RegisterFrameOffset(reg_index), value); } void InterpreterAssembler::StoreAndTagRegister(Node* value, Register reg) { - int offset = reg.ToOperand() << kSystemPointerSizeLog2; + int offset = reg.ToOperand() * kSystemPointerSize; StoreAndTagSmi(GetInterpretedFramePointer(), offset, value); } @@ -648,8 +648,8 @@ Node* InterpreterAssembler::BytecodeOperandIntrinsicId(int operand_index) { Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) { TNode<FixedArray> constant_pool = CAST(LoadObjectField( BytecodeArrayTaggedPointer(), BytecodeArray::kConstantPoolOffset)); - return LoadFixedArrayElement(constant_pool, UncheckedCast<IntPtrT>(index), - LoadSensitivity::kCritical); + return UnsafeLoadFixedArrayElement( + constant_pool, UncheckedCast<IntPtrT>(index), LoadSensitivity::kCritical); } Node* InterpreterAssembler::LoadAndUntagConstantPoolEntry(Node* index) { diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc index 0ac2146731..e1e3181357 100644 --- a/deps/v8/src/interpreter/interpreter-generator.cc +++ b/deps/v8/src/interpreter/interpreter-generator.cc @@ -176,20 +176,10 @@ class InterpreterLoadGlobalAssembler : public InterpreterAssembler { return CAST(name); }; - Label miss(this, Label::kDeferred); ParameterMode slot_mode = CodeStubAssembler::INTPTR_PARAMETERS; - GotoIf(IsUndefined(maybe_feedback_vector), &miss); - accessor_asm.LoadGlobalIC(CAST(maybe_feedback_vector), feedback_slot, + accessor_asm.LoadGlobalIC(maybe_feedback_vector, feedback_slot, lazy_context, lazy_name, typeof_mode, &exit_point, slot_mode); - - BIND(&miss); - { - exit_point.ReturnCallRuntime( - Runtime::kLoadGlobalIC_Miss, lazy_context(), lazy_name(), - ParameterToTagged(feedback_slot, slot_mode), maybe_feedback_vector, - SmiConstant(typeof_mode)); - } } }; @@ -237,10 +227,7 @@ IGNITION_HANDLER(StaGlobal, InterpreterAssembler) { Goto(&end); Bind(&no_feedback); - TNode<JSFunction> closure = CAST(LoadRegister(Register::function_closure())); - Node* language_mode = GetLanguageMode(closure, context); - CallRuntime(Runtime::kStoreGlobalICNoFeedback_Miss, context, value, name, - language_mode); + CallRuntime(Runtime::kStoreGlobalICNoFeedback_Miss, context, value, name); Goto(&end); Bind(&end); @@ -571,22 +558,9 @@ IGNITION_HANDLER(LdaKeyedProperty, InterpreterAssembler) { Node* feedback_vector = LoadFeedbackVectorUnchecked(); Node* context = GetContext(); - Label no_feedback(this, Label::kDeferred), end(this); VARIABLE(var_result, MachineRepresentation::kTagged); - GotoIf(IsUndefined(feedback_vector), &no_feedback); var_result.Bind(CallBuiltin(Builtins::kKeyedLoadIC, context, object, name, smi_slot, feedback_vector)); - Goto(&end); - - BIND(&no_feedback); - { - Comment("KeyedLoadIC_no_feedback"); - var_result.Bind(CallRuntime(Runtime::kKeyedLoadIC_Miss, context, object, - name, smi_slot, feedback_vector)); - Goto(&end); - } - - BIND(&end); SetAccumulator(var_result.value()); Dispatch(); } @@ -609,22 +583,8 @@ class InterpreterStoreNamedPropertyAssembler : public InterpreterAssembler { Node* context = GetContext(); VARIABLE(var_result, MachineRepresentation::kTagged); - Label no_feedback(this, Label::kDeferred), end(this); - GotoIf(IsUndefined(maybe_vector), &no_feedback); var_result.Bind(CallStub(ic.descriptor(), code_target, context, object, name, value, smi_slot, maybe_vector)); - Goto(&end); - - Bind(&no_feedback); - TNode<JSFunction> closure = - CAST(LoadRegister(Register::function_closure())); - Node* language_mode = GetLanguageMode(closure, context); - var_result.Bind(CallRuntime(Runtime::kStoreICNoFeedback_Miss, context, - value, object, name, language_mode, - SmiConstant(property_type))); - Goto(&end); - - Bind(&end); // To avoid special logic in the deoptimizer to re-materialize the value in // the accumulator, we overwrite the accumulator after the IC call. It // doesn't really matter what we write to the accumulator here, since we @@ -664,11 +624,10 @@ IGNITION_HANDLER(StaNamedPropertyNoFeedback, Node* object = LoadRegisterAtOperandIndex(0); Node* name = LoadConstantPoolEntryAtOperandIndex(1); Node* value = GetAccumulator(); - Node* language_mode = SmiFromInt32(BytecodeOperandFlag(2)); Node* context = GetContext(); - Node* result = CallRuntime(Runtime::kSetNamedProperty, context, object, name, - value, language_mode); + Node* result = + CallRuntime(Runtime::kSetNamedProperty, context, object, name, value); SetAccumulator(result); Dispatch(); } @@ -686,22 +645,9 @@ IGNITION_HANDLER(StaKeyedProperty, InterpreterAssembler) { Node* maybe_vector = LoadFeedbackVectorUnchecked(); Node* context = GetContext(); - Label no_feedback(this, Label::kDeferred), end(this); VARIABLE(var_result, MachineRepresentation::kTagged); - GotoIf(IsUndefined(maybe_vector), &no_feedback); - var_result.Bind(CallBuiltin(Builtins::kKeyedStoreIC, context, object, name, value, smi_slot, maybe_vector)); - Goto(&end); - - Bind(&no_feedback); - TNode<JSFunction> closure = CAST(LoadRegister(Register::function_closure())); - Node* language_mode = GetLanguageMode(closure, context); - var_result.Bind(CallRuntime(Runtime::kKeyedStoreICNoFeedback_Miss, context, - value, object, name, language_mode)); - Goto(&end); - - Bind(&end); // To avoid special logic in the deoptimizer to re-materialize the value in // the accumulator, we overwrite the accumulator after the IC call. It // doesn't really matter what we write to the accumulator here, since we @@ -725,19 +671,8 @@ IGNITION_HANDLER(StaInArrayLiteral, InterpreterAssembler) { Node* context = GetContext(); VARIABLE(var_result, MachineRepresentation::kTagged); - Label no_feedback(this, Label::kDeferred), end(this); - GotoIf(IsUndefined(feedback_vector), &no_feedback); - var_result.Bind(CallBuiltin(Builtins::kStoreInArrayLiteralIC, context, array, index, value, smi_slot, feedback_vector)); - Goto(&end); - - BIND(&no_feedback); - var_result.Bind(CallRuntime(Runtime::kStoreInArrayLiteralIC_Miss, context, - value, smi_slot, feedback_vector, array, index)); - Goto(&end); - - BIND(&end); // To avoid special logic in the deoptimizer to re-materialize the value in // the accumulator, we overwrite the accumulator after the IC call. It // doesn't really matter what we write to the accumulator here, since we @@ -1941,16 +1876,22 @@ IGNITION_HANDLER(TestReferenceEqual, InterpreterAssembler) { Dispatch(); } -// TestIn <src> +// TestIn <src> <feedback_slot> // // Test if the object referenced by the register operand is a property of the // object referenced by the accumulator. IGNITION_HANDLER(TestIn, InterpreterAssembler) { - Node* property = LoadRegisterAtOperandIndex(0); + Node* name = LoadRegisterAtOperandIndex(0); Node* object = GetAccumulator(); + Node* raw_slot = BytecodeOperandIdx(1); + Node* smi_slot = SmiTag(raw_slot); + Node* feedback_vector = LoadFeedbackVectorUnchecked(); Node* context = GetContext(); - SetAccumulator(HasProperty(context, object, property, kHasProperty)); + VARIABLE(var_result, MachineRepresentation::kTagged); + var_result.Bind(CallBuiltin(Builtins::kKeyedHasIC, context, object, name, + smi_slot, feedback_vector)); + SetAccumulator(var_result.value()); Dispatch(); } @@ -2463,22 +2404,10 @@ IGNITION_HANDLER(CreateRegExpLiteral, InterpreterAssembler) { Node* context = GetContext(); VARIABLE(result, MachineRepresentation::kTagged); - Label no_feedback(this, Label::kDeferred), end(this); - GotoIf(IsUndefined(feedback_vector), &no_feedback); ConstructorBuiltinsAssembler constructor_assembler(state()); result.Bind(constructor_assembler.EmitCreateRegExpLiteral( feedback_vector, slot_id, pattern, flags, context)); - Goto(&end); - - BIND(&no_feedback); - { - result.Bind(CallRuntime(Runtime::kCreateRegExpLiteral, context, - feedback_vector, SmiTag(slot_id), pattern, flags)); - Goto(&end); - } - - BIND(&end); SetAccumulator(result.value()); Dispatch(); } @@ -2644,18 +2573,8 @@ IGNITION_HANDLER(CloneObject, InterpreterAssembler) { Node* context = GetContext(); Variable var_result(this, MachineRepresentation::kTagged); - Label no_feedback(this), end(this); - GotoIf(IsUndefined(maybe_feedback_vector), &no_feedback); var_result.Bind(CallBuiltin(Builtins::kCloneObjectIC, context, source, smi_flags, smi_slot, maybe_feedback_vector)); - Goto(&end); - - BIND(&no_feedback); - var_result.Bind(CallRuntime(Runtime::kCloneObjectIC_Miss, context, source, - smi_flags, smi_slot, maybe_feedback_vector)); - Goto(&end); - - BIND(&end); SetAccumulator(var_result.value()); Dispatch(); } @@ -2683,9 +2602,13 @@ IGNITION_HANDLER(GetTemplateObject, InterpreterAssembler) { BIND(&call_runtime); { Node* description = LoadConstantPoolEntryAtOperandIndex(0); + Node* slot_smi = SmiTag(slot); + Node* closure = LoadRegister(Register::function_closure()); + Node* shared_info = + LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset); Node* context = GetContext(); - Node* result = - CallRuntime(Runtime::kCreateTemplateObject, context, description); + Node* result = CallRuntime(Runtime::kGetTemplateObject, context, + description, shared_info, slot_smi); Label end(this); GotoIf(IsUndefined(feedback_vector), &end); @@ -2906,8 +2829,7 @@ IGNITION_HANDLER(SetPendingMessage, InterpreterAssembler) { ExternalReference::address_of_pending_message_obj(isolate())); Node* previous_message = Load(MachineType::TaggedPointer(), pending_message); Node* new_message = GetAccumulator(); - StoreNoWriteBarrier(MachineRepresentation::kTaggedPointer, pending_message, - new_message); + StoreFullTaggedNoWriteBarrier(pending_message, new_message); SetAccumulator(previous_message); Dispatch(); } @@ -2967,7 +2889,8 @@ IGNITION_HANDLER(ThrowReferenceErrorIfHole, InterpreterAssembler) { BIND(&throw_error); { Node* name = LoadConstantPoolEntryAtOperandIndex(0); - CallRuntime(Runtime::kThrowReferenceError, GetContext(), name); + CallRuntime(Runtime::kThrowAccessedUninitializedVariable, GetContext(), + name); // We shouldn't ever return from a throw. Abort(AbortReason::kUnexpectedReturnFromThrow); Unreachable(); @@ -3037,7 +2960,7 @@ IGNITION_HANDLER(Debugger, InterpreterAssembler) { SetAccumulator(return_value); \ DispatchToBytecode(original_bytecode, BytecodeOffset()); \ } -DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK); +DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK) #undef DEBUG_BREAK // IncBlockCounter <slot> |