// Copyright 2015 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "src/interpreter/interpreter-assembler.h" #include #include #include "src/codegen/code-factory.h" #include "src/codegen/interface-descriptors.h" #include "src/codegen/machine-type.h" #include "src/execution/frames.h" #include "src/interpreter/bytecodes.h" #include "src/interpreter/interpreter.h" #include "src/objects/objects-inl.h" #include "src/zone/zone.h" namespace v8 { namespace internal { namespace interpreter { using compiler::CodeAssemblerState; using compiler::Node; InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state, Bytecode bytecode, OperandScale operand_scale) : CodeStubAssembler(state), bytecode_(bytecode), operand_scale_(operand_scale), TVARIABLE_CONSTRUCTOR(interpreted_frame_pointer_), TVARIABLE_CONSTRUCTOR( bytecode_array_, CAST(Parameter(InterpreterDispatchDescriptor::kBytecodeArray))), TVARIABLE_CONSTRUCTOR( bytecode_offset_, UncheckedCast( Parameter(InterpreterDispatchDescriptor::kBytecodeOffset))), TVARIABLE_CONSTRUCTOR( dispatch_table_, UncheckedCast(Parameter( InterpreterDispatchDescriptor::kDispatchTable))), TVARIABLE_CONSTRUCTOR( accumulator_, CAST(Parameter(InterpreterDispatchDescriptor::kAccumulator))), accumulator_use_(AccumulatorUse::kNone), made_call_(false), reloaded_frame_ptr_(false), bytecode_array_valid_(true) { #ifdef V8_TRACE_IGNITION TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry); #endif RegisterCallGenerationCallbacks([this] { CallPrologue(); }, [this] { CallEpilogue(); }); // Save the bytecode offset immediately if bytecode will make a call along // the critical path, or it is a return bytecode. if (Bytecodes::MakesCallAlongCriticalPath(bytecode) || Bytecodes::Returns(bytecode)) { SaveBytecodeOffset(); } } InterpreterAssembler::~InterpreterAssembler() { // If the following check fails the handler does not use the // accumulator in the way described in the bytecode definitions in // bytecodes.h. DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_)); UnregisterCallGenerationCallbacks(); } TNode InterpreterAssembler::GetInterpretedFramePointer() { if (!interpreted_frame_pointer_.IsBound()) { interpreted_frame_pointer_ = LoadParentFramePointer(); } else if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ && !reloaded_frame_ptr_) { interpreted_frame_pointer_ = LoadParentFramePointer(); reloaded_frame_ptr_ = true; } return interpreted_frame_pointer_.value(); } TNode InterpreterAssembler::BytecodeOffset() { if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ && (bytecode_offset_.value() == Parameter(InterpreterDispatchDescriptor::kBytecodeOffset))) { bytecode_offset_ = ReloadBytecodeOffset(); } return bytecode_offset_.value(); } TNode InterpreterAssembler::ReloadBytecodeOffset() { TNode offset = LoadAndUntagRegister(Register::bytecode_offset()); if (operand_scale() != OperandScale::kSingle) { // Add one to the offset such that it points to the actual bytecode rather // than the Wide / ExtraWide prefix bytecode. offset = IntPtrAdd(offset, IntPtrConstant(1)); } return offset; } void InterpreterAssembler::SaveBytecodeOffset() { TNode bytecode_offset = BytecodeOffset(); if (operand_scale() != OperandScale::kSingle) { // Subtract one from the bytecode_offset such that it points to the Wide / // ExtraWide prefix bytecode. bytecode_offset = IntPtrSub(BytecodeOffset(), IntPtrConstant(1)); } int store_offset = Register::bytecode_offset().ToOperand() * kSystemPointerSize; TNode base = GetInterpretedFramePointer(); if (SmiValuesAre32Bits()) { int zero_offset = store_offset + 4; int payload_offset = store_offset; #if V8_TARGET_LITTLE_ENDIAN std::swap(zero_offset, payload_offset); #endif StoreNoWriteBarrier(MachineRepresentation::kWord32, base, IntPtrConstant(zero_offset), Int32Constant(0)); StoreNoWriteBarrier(MachineRepresentation::kWord32, base, IntPtrConstant(payload_offset), TruncateIntPtrToInt32(bytecode_offset)); } else { StoreNoWriteBarrier(MachineRepresentation::kTaggedSigned, base, IntPtrConstant(store_offset), SmiTag(bytecode_offset)); } } TNode InterpreterAssembler::BytecodeArrayTaggedPointer() { // Force a re-load of the bytecode array after every call in case the debugger // has been activated. if (!bytecode_array_valid_) { bytecode_array_ = CAST(LoadRegister(Register::bytecode_array())); bytecode_array_valid_ = true; } return bytecode_array_.value(); } TNode InterpreterAssembler::DispatchTablePointer() { if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ && (dispatch_table_.value() == Parameter(InterpreterDispatchDescriptor::kDispatchTable))) { dispatch_table_ = ExternalConstant( ExternalReference::interpreter_dispatch_table_address(isolate())); } return dispatch_table_.value(); } TNode InterpreterAssembler::GetAccumulatorUnchecked() { return accumulator_.value(); } TNode InterpreterAssembler::GetAccumulator() { DCHECK(Bytecodes::ReadsAccumulator(bytecode_)); accumulator_use_ = accumulator_use_ | AccumulatorUse::kRead; return TaggedPoisonOnSpeculation(GetAccumulatorUnchecked()); } // TODO(v8:6949): Remove sloppy-ness from SetAccumulator's value argument. void InterpreterAssembler::SetAccumulator(SloppyTNode value) { DCHECK(Bytecodes::WritesAccumulator(bytecode_)); accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite; accumulator_ = value; } TNode InterpreterAssembler::GetContext() { return CAST(LoadRegister(Register::current_context())); } void InterpreterAssembler::SetContext(TNode value) { StoreRegister(value, Register::current_context()); } TNode InterpreterAssembler::GetContextAtDepth(TNode context, TNode depth) { TVARIABLE(Context, cur_context, context); TVARIABLE(Uint32T, cur_depth, depth); Label context_found(this); Label context_search(this, {&cur_depth, &cur_context}); // Fast path if the depth is 0. Branch(Word32Equal(depth, Int32Constant(0)), &context_found, &context_search); // Loop until the depth is 0. BIND(&context_search); { cur_depth = Unsigned(Int32Sub(cur_depth.value(), Int32Constant(1))); cur_context = CAST(LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX)); Branch(Word32Equal(cur_depth.value(), Int32Constant(0)), &context_found, &context_search); } BIND(&context_found); return cur_context.value(); } void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth( TNode context, TNode depth, Label* target) { TVARIABLE(Context, cur_context, context); TVARIABLE(Uint32T, cur_depth, depth); Label context_search(this, {&cur_depth, &cur_context}); Label no_extension(this); // Loop until the depth is 0. Goto(&context_search); BIND(&context_search); { // Check if context has an extension slot TNode has_extension = LoadContextHasExtensionField(cur_context.value()); GotoIfNot(has_extension, &no_extension); // Jump to the target if the extension slot is not a hole. TNode extension_slot = LoadContextElement(cur_context.value(), Context::EXTENSION_INDEX); Branch(TaggedNotEqual(extension_slot, TheHoleConstant()), target, &no_extension); BIND(&no_extension); { cur_depth = Unsigned(Int32Sub(cur_depth.value(), Int32Constant(1))); cur_context = CAST( LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX)); GotoIf(Word32NotEqual(cur_depth.value(), Int32Constant(0)), &context_search); } } } TNode InterpreterAssembler::RegisterLocation( TNode reg_index) { return Signed(WordPoisonOnSpeculation( IntPtrAdd(GetInterpretedFramePointer(), RegisterFrameOffset(reg_index)))); } TNode InterpreterAssembler::RegisterLocation(Register reg) { return RegisterLocation(IntPtrConstant(reg.ToOperand())); } TNode InterpreterAssembler::RegisterFrameOffset(TNode index) { return TimesSystemPointerSize(index); } TNode InterpreterAssembler::LoadRegister(TNode reg_index) { return LoadFullTagged(GetInterpretedFramePointer(), RegisterFrameOffset(reg_index), LoadSensitivity::kCritical); } TNode InterpreterAssembler::LoadRegister(Register reg) { return LoadFullTagged(GetInterpretedFramePointer(), IntPtrConstant(reg.ToOperand() * kSystemPointerSize)); } TNode InterpreterAssembler::LoadAndUntagRegister(Register reg) { TNode base = GetInterpretedFramePointer(); int index = reg.ToOperand() * kSystemPointerSize; if (SmiValuesAre32Bits()) { #if V8_TARGET_LITTLE_ENDIAN index += 4; #endif return ChangeInt32ToIntPtr( Load(MachineType::Int32(), base, IntPtrConstant(index))); } else { return SmiToIntPtr( Load(MachineType::TaggedSigned(), base, IntPtrConstant(index))); } } TNode InterpreterAssembler::LoadRegisterAtOperandIndex( int operand_index) { return LoadRegister( BytecodeOperandReg(operand_index, LoadSensitivity::kSafe)); } std::pair, TNode> InterpreterAssembler::LoadRegisterPairAtOperandIndex(int operand_index) { DCHECK_EQ(OperandType::kRegPair, Bytecodes::GetOperandType(bytecode_, operand_index)); TNode first_reg_index = BytecodeOperandReg(operand_index, LoadSensitivity::kSafe); TNode second_reg_index = NextRegister(first_reg_index); return std::make_pair(LoadRegister(first_reg_index), LoadRegister(second_reg_index)); } InterpreterAssembler::RegListNodePair InterpreterAssembler::GetRegisterListAtOperandIndex(int operand_index) { DCHECK(Bytecodes::IsRegisterListOperandType( Bytecodes::GetOperandType(bytecode_, operand_index))); DCHECK_EQ(OperandType::kRegCount, Bytecodes::GetOperandType(bytecode_, operand_index + 1)); TNode base_reg = RegisterLocation( BytecodeOperandReg(operand_index, LoadSensitivity::kSafe)); TNode reg_count = BytecodeOperandCount(operand_index + 1); return RegListNodePair(base_reg, reg_count); } TNode InterpreterAssembler::LoadRegisterFromRegisterList( const RegListNodePair& reg_list, int index) { TNode location = RegisterLocationInRegisterList(reg_list, index); // Location is already poisoned on speculation, so no need to poison here. return LoadFullTagged(location); } TNode InterpreterAssembler::RegisterLocationInRegisterList( const RegListNodePair& reg_list, int index) { CSA_ASSERT(this, Uint32GreaterThan(reg_list.reg_count(), Int32Constant(index))); TNode offset = RegisterFrameOffset(IntPtrConstant(index)); // Register indexes are negative, so subtract index from base location to get // location. return Signed(IntPtrSub(reg_list.base_reg_location(), offset)); } void InterpreterAssembler::StoreRegister(TNode value, Register reg) { StoreFullTaggedNoWriteBarrier( GetInterpretedFramePointer(), IntPtrConstant(reg.ToOperand() * kSystemPointerSize), value); } void InterpreterAssembler::StoreRegister(TNode value, TNode reg_index) { StoreFullTaggedNoWriteBarrier(GetInterpretedFramePointer(), RegisterFrameOffset(reg_index), value); } void InterpreterAssembler::StoreRegisterAtOperandIndex(TNode value, int operand_index) { StoreRegister(value, BytecodeOperandReg(operand_index, LoadSensitivity::kSafe)); } void InterpreterAssembler::StoreRegisterPairAtOperandIndex(TNode value1, TNode value2, int operand_index) { DCHECK_EQ(OperandType::kRegOutPair, Bytecodes::GetOperandType(bytecode_, operand_index)); TNode first_reg_index = BytecodeOperandReg(operand_index, LoadSensitivity::kSafe); StoreRegister(value1, first_reg_index); TNode second_reg_index = NextRegister(first_reg_index); StoreRegister(value2, second_reg_index); } void InterpreterAssembler::StoreRegisterTripleAtOperandIndex( TNode value1, TNode value2, TNode value3, int operand_index) { DCHECK_EQ(OperandType::kRegOutTriple, Bytecodes::GetOperandType(bytecode_, operand_index)); TNode first_reg_index = BytecodeOperandReg(operand_index, LoadSensitivity::kSafe); StoreRegister(value1, first_reg_index); TNode second_reg_index = NextRegister(first_reg_index); StoreRegister(value2, second_reg_index); TNode third_reg_index = NextRegister(second_reg_index); StoreRegister(value3, third_reg_index); } TNode InterpreterAssembler::NextRegister(TNode reg_index) { // Register indexes are negative, so the next index is minus one. return Signed(IntPtrAdd(reg_index, IntPtrConstant(-1))); } TNode InterpreterAssembler::OperandOffset(int operand_index) { return IntPtrConstant( Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale())); } TNode InterpreterAssembler::BytecodeOperandUnsignedByte( int operand_index, LoadSensitivity needs_poisoning) { DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_)); DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize( bytecode_, operand_index, operand_scale())); TNode operand_offset = OperandOffset(operand_index); return Load(BytecodeArrayTaggedPointer(), IntPtrAdd(BytecodeOffset(), operand_offset), needs_poisoning); } TNode InterpreterAssembler::BytecodeOperandSignedByte( int operand_index, LoadSensitivity needs_poisoning) { DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_)); DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize( bytecode_, operand_index, operand_scale())); TNode operand_offset = OperandOffset(operand_index); return Load(BytecodeArrayTaggedPointer(), IntPtrAdd(BytecodeOffset(), operand_offset), needs_poisoning); } TNode InterpreterAssembler::BytecodeOperandReadUnaligned( int relative_offset, MachineType result_type, LoadSensitivity needs_poisoning) { static const int kMaxCount = 4; DCHECK(!TargetSupportsUnalignedAccess()); int count; switch (result_type.representation()) { case MachineRepresentation::kWord16: count = 2; break; case MachineRepresentation::kWord32: count = 4; break; default: UNREACHABLE(); } MachineType msb_type = result_type.IsSigned() ? MachineType::Int8() : MachineType::Uint8(); #if V8_TARGET_LITTLE_ENDIAN const int kStep = -1; int msb_offset = count - 1; #elif V8_TARGET_BIG_ENDIAN const int kStep = 1; int msb_offset = 0; #else #error "Unknown Architecture" #endif // Read the most signicant bytecode into bytes[0] and then in order // down to least significant in bytes[count - 1]. DCHECK_LE(count, kMaxCount); TNode bytes[kMaxCount]; for (int i = 0; i < count; i++) { MachineType machine_type = (i == 0) ? msb_type : MachineType::Uint8(); TNode offset = IntPtrConstant(relative_offset + msb_offset + i * kStep); TNode array_offset = IntPtrAdd(BytecodeOffset(), offset); bytes[i] = UncheckedCast(Load(machine_type, BytecodeArrayTaggedPointer(), array_offset, needs_poisoning)); } // Pack LSB to MSB. TNode result = bytes[--count]; for (int i = 1; --count >= 0; i++) { TNode shift = Int32Constant(i * kBitsPerByte); TNode value = Word32Shl(bytes[count], shift); result = Word32Or(value, result); } return result; } TNode InterpreterAssembler::BytecodeOperandUnsignedShort( int operand_index, LoadSensitivity needs_poisoning) { DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_)); DCHECK_EQ( OperandSize::kShort, Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale())); int operand_offset = Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale()); if (TargetSupportsUnalignedAccess()) { return Load( BytecodeArrayTaggedPointer(), IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)), needs_poisoning); } else { return UncheckedCast(BytecodeOperandReadUnaligned( operand_offset, MachineType::Uint16(), needs_poisoning)); } } TNode InterpreterAssembler::BytecodeOperandSignedShort( int operand_index, LoadSensitivity needs_poisoning) { DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_)); DCHECK_EQ( OperandSize::kShort, Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale())); int operand_offset = Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale()); if (TargetSupportsUnalignedAccess()) { return Load( BytecodeArrayTaggedPointer(), IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)), needs_poisoning); } else { return UncheckedCast(BytecodeOperandReadUnaligned( operand_offset, MachineType::Int16(), needs_poisoning)); } } TNode InterpreterAssembler::BytecodeOperandUnsignedQuad( int operand_index, LoadSensitivity needs_poisoning) { DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_)); DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize( bytecode_, operand_index, operand_scale())); int operand_offset = Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale()); if (TargetSupportsUnalignedAccess()) { return Load( BytecodeArrayTaggedPointer(), IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)), needs_poisoning); } else { return UncheckedCast(BytecodeOperandReadUnaligned( operand_offset, MachineType::Uint32(), needs_poisoning)); } } TNode InterpreterAssembler::BytecodeOperandSignedQuad( int operand_index, LoadSensitivity needs_poisoning) { DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_)); DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize( bytecode_, operand_index, operand_scale())); int operand_offset = Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale()); if (TargetSupportsUnalignedAccess()) { return Load( BytecodeArrayTaggedPointer(), IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)), needs_poisoning); } else { return UncheckedCast(BytecodeOperandReadUnaligned( operand_offset, MachineType::Int32(), needs_poisoning)); } } TNode InterpreterAssembler::BytecodeSignedOperand( int operand_index, OperandSize operand_size, LoadSensitivity needs_poisoning) { DCHECK(!Bytecodes::IsUnsignedOperandType( Bytecodes::GetOperandType(bytecode_, operand_index))); switch (operand_size) { case OperandSize::kByte: return BytecodeOperandSignedByte(operand_index, needs_poisoning); case OperandSize::kShort: return BytecodeOperandSignedShort(operand_index, needs_poisoning); case OperandSize::kQuad: return BytecodeOperandSignedQuad(operand_index, needs_poisoning); case OperandSize::kNone: UNREACHABLE(); } } TNode InterpreterAssembler::BytecodeUnsignedOperand( int operand_index, OperandSize operand_size, LoadSensitivity needs_poisoning) { DCHECK(Bytecodes::IsUnsignedOperandType( Bytecodes::GetOperandType(bytecode_, operand_index))); switch (operand_size) { case OperandSize::kByte: return BytecodeOperandUnsignedByte(operand_index, needs_poisoning); case OperandSize::kShort: return BytecodeOperandUnsignedShort(operand_index, needs_poisoning); case OperandSize::kQuad: return BytecodeOperandUnsignedQuad(operand_index, needs_poisoning); case OperandSize::kNone: UNREACHABLE(); } } TNode InterpreterAssembler::BytecodeOperandCount(int operand_index) { DCHECK_EQ(OperandType::kRegCount, Bytecodes::GetOperandType(bytecode_, operand_index)); OperandSize operand_size = Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()); return BytecodeUnsignedOperand(operand_index, operand_size); } TNode InterpreterAssembler::BytecodeOperandFlag(int operand_index) { DCHECK_EQ(OperandType::kFlag8, Bytecodes::GetOperandType(bytecode_, operand_index)); OperandSize operand_size = Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()); DCHECK_EQ(operand_size, OperandSize::kByte); return BytecodeUnsignedOperand(operand_index, operand_size); } TNode InterpreterAssembler::BytecodeOperandUImm(int operand_index) { DCHECK_EQ(OperandType::kUImm, Bytecodes::GetOperandType(bytecode_, operand_index)); OperandSize operand_size = Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()); return BytecodeUnsignedOperand(operand_index, operand_size); } TNode InterpreterAssembler::BytecodeOperandUImmWord( int operand_index) { return ChangeUint32ToWord(BytecodeOperandUImm(operand_index)); } TNode InterpreterAssembler::BytecodeOperandUImmSmi(int operand_index) { return SmiFromUint32(BytecodeOperandUImm(operand_index)); } TNode InterpreterAssembler::BytecodeOperandImm(int operand_index) { DCHECK_EQ(OperandType::kImm, Bytecodes::GetOperandType(bytecode_, operand_index)); OperandSize operand_size = Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()); return BytecodeSignedOperand(operand_index, operand_size); } TNode InterpreterAssembler::BytecodeOperandImmIntPtr( int operand_index) { return ChangeInt32ToIntPtr(BytecodeOperandImm(operand_index)); } TNode InterpreterAssembler::BytecodeOperandImmSmi(int operand_index) { return SmiFromInt32(BytecodeOperandImm(operand_index)); } TNode InterpreterAssembler::BytecodeOperandIdxInt32( int operand_index) { DCHECK_EQ(OperandType::kIdx, Bytecodes::GetOperandType(bytecode_, operand_index)); OperandSize operand_size = Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()); return BytecodeUnsignedOperand(operand_index, operand_size); } TNode InterpreterAssembler::BytecodeOperandIdx(int operand_index) { return ChangeUint32ToWord(BytecodeOperandIdxInt32(operand_index)); } TNode InterpreterAssembler::BytecodeOperandIdxSmi(int operand_index) { return SmiTag(Signed(BytecodeOperandIdx(operand_index))); } TNode InterpreterAssembler::BytecodeOperandConstantPoolIdx( int operand_index, LoadSensitivity needs_poisoning) { DCHECK_EQ(OperandType::kIdx, Bytecodes::GetOperandType(bytecode_, operand_index)); OperandSize operand_size = Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()); return ChangeUint32ToWord( BytecodeUnsignedOperand(operand_index, operand_size, needs_poisoning)); } TNode InterpreterAssembler::BytecodeOperandReg( int operand_index, LoadSensitivity needs_poisoning) { DCHECK(Bytecodes::IsRegisterOperandType( Bytecodes::GetOperandType(bytecode_, operand_index))); OperandSize operand_size = Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()); return ChangeInt32ToIntPtr( BytecodeSignedOperand(operand_index, operand_size, needs_poisoning)); } TNode InterpreterAssembler::BytecodeOperandRuntimeId( int operand_index) { DCHECK_EQ(OperandType::kRuntimeId, Bytecodes::GetOperandType(bytecode_, operand_index)); OperandSize operand_size = Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()); DCHECK_EQ(operand_size, OperandSize::kShort); return BytecodeUnsignedOperand(operand_index, operand_size); } TNode InterpreterAssembler::BytecodeOperandNativeContextIndex( int operand_index) { DCHECK_EQ(OperandType::kNativeContextIndex, Bytecodes::GetOperandType(bytecode_, operand_index)); OperandSize operand_size = Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()); return ChangeUint32ToWord( BytecodeUnsignedOperand(operand_index, operand_size)); } TNode InterpreterAssembler::BytecodeOperandIntrinsicId( int operand_index) { DCHECK_EQ(OperandType::kIntrinsicId, Bytecodes::GetOperandType(bytecode_, operand_index)); OperandSize operand_size = Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()); DCHECK_EQ(operand_size, OperandSize::kByte); return BytecodeUnsignedOperand(operand_index, operand_size); } TNode InterpreterAssembler::LoadConstantPoolEntry(TNode index) { TNode constant_pool = CAST(LoadObjectField( BytecodeArrayTaggedPointer(), BytecodeArray::kConstantPoolOffset)); return UnsafeLoadFixedArrayElement( constant_pool, UncheckedCast(index), LoadSensitivity::kCritical); } TNode InterpreterAssembler::LoadAndUntagConstantPoolEntry( TNode index) { return SmiUntag(CAST(LoadConstantPoolEntry(index))); } TNode InterpreterAssembler::LoadConstantPoolEntryAtOperandIndex( int operand_index) { TNode index = BytecodeOperandConstantPoolIdx(operand_index, LoadSensitivity::kSafe); return LoadConstantPoolEntry(index); } TNode InterpreterAssembler::LoadAndUntagConstantPoolEntryAtOperandIndex( int operand_index) { return SmiUntag(CAST(LoadConstantPoolEntryAtOperandIndex(operand_index))); } TNode InterpreterAssembler::LoadFeedbackVector() { TNode function = CAST(LoadRegister(Register::function_closure())); return CodeStubAssembler::LoadFeedbackVector(function); } void InterpreterAssembler::CallPrologue() { if (!Bytecodes::MakesCallAlongCriticalPath(bytecode_)) { // Bytecodes that make a call along the critical path save the bytecode // offset in the bytecode handler's prologue. For other bytecodes, if // there are multiple calls in the bytecode handler, you need to spill // before each of them, unless SaveBytecodeOffset has explicitly been called // in a path that dominates _all_ of those calls (which we don't track). SaveBytecodeOffset(); } bytecode_array_valid_ = false; made_call_ = true; } void InterpreterAssembler::CallEpilogue() { } void InterpreterAssembler::CallJSAndDispatch( TNode function, TNode context, const RegListNodePair& args, ConvertReceiverMode receiver_mode) { DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_)); DCHECK(Bytecodes::IsCallOrConstruct(bytecode_) || bytecode_ == Bytecode::kInvokeIntrinsic); DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode); TNode args_count; if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { // The receiver is implied, so it is not in the argument list. args_count = args.reg_count(); } else { // Subtract the receiver from the argument count. TNode receiver_count = Int32Constant(1); args_count = Int32Sub(args.reg_count(), receiver_count); } Callable callable = CodeFactory::InterpreterPushArgsThenCall( isolate(), receiver_mode, InterpreterPushArgsMode::kOther); TNode code_target = HeapConstant(callable.code()); TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context, args_count, args.base_reg_location(), function); // TailCallStubThenDispatch updates accumulator with result. accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite; } template void InterpreterAssembler::CallJSAndDispatch(TNode function, TNode context, TNode arg_count, ConvertReceiverMode receiver_mode, TArgs... args) { DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_)); DCHECK(Bytecodes::IsCallOrConstruct(bytecode_) || bytecode_ == Bytecode::kInvokeIntrinsic); DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode); Callable callable = CodeFactory::Call(isolate()); TNode code_target = HeapConstant(callable.code()); if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { // The first argument parameter (the receiver) is implied to be undefined. TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context, function, arg_count, UndefinedConstant(), args...); } else { TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context, function, arg_count, args...); } // TailCallStubThenDispatch updates accumulator with result. accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite; } // Instantiate CallJSAndDispatch() for argument counts used by interpreter // generator. template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch( TNode function, TNode context, TNode arg_count, ConvertReceiverMode receiver_mode); template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch( TNode function, TNode context, TNode arg_count, ConvertReceiverMode receiver_mode, TNode); template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch( TNode function, TNode context, TNode arg_count, ConvertReceiverMode receiver_mode, TNode, TNode); template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch( TNode function, TNode context, TNode arg_count, ConvertReceiverMode receiver_mode, TNode, TNode, TNode); void InterpreterAssembler::CallJSWithSpreadAndDispatch( TNode function, TNode context, const RegListNodePair& args, TNode slot_id, TNode maybe_feedback_vector) { DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_)); DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), ConvertReceiverMode::kAny); CollectCallFeedback(function, context, maybe_feedback_vector, slot_id); Comment("call using CallWithSpread builtin"); Callable callable = CodeFactory::InterpreterPushArgsThenCall( isolate(), ConvertReceiverMode::kAny, InterpreterPushArgsMode::kWithFinalSpread); TNode code_target = HeapConstant(callable.code()); TNode receiver_count = Int32Constant(1); TNode args_count = Int32Sub(args.reg_count(), receiver_count); TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context, args_count, args.base_reg_location(), function); // TailCallStubThenDispatch updates accumulator with result. accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite; } TNode InterpreterAssembler::Construct( TNode target, TNode context, TNode new_target, const RegListNodePair& args, TNode slot_id, TNode maybe_feedback_vector) { DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_)); TVARIABLE(Object, var_result); TVARIABLE(AllocationSite, var_site); Label extra_checks(this, Label::kDeferred), return_result(this, &var_result), construct(this), construct_array(this, &var_site); GotoIf(IsUndefined(maybe_feedback_vector), &construct); TNode feedback_vector = CAST(maybe_feedback_vector); // Increment the call count. IncrementCallCount(feedback_vector, slot_id); // Check if we have monomorphic {new_target} feedback already. TNode feedback = LoadFeedbackVectorSlot(feedback_vector, slot_id); Branch(IsWeakReferenceToObject(feedback, new_target), &construct, &extra_checks); BIND(&extra_checks); { Label check_allocation_site(this), check_initialized(this), initialize(this), mark_megamorphic(this); // Check if it is a megamorphic {new_target}.. Comment("check if megamorphic"); TNode is_megamorphic = TaggedEqual( feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate()))); GotoIf(is_megamorphic, &construct); Comment("check if weak reference"); GotoIfNot(IsWeakOrCleared(feedback), &check_allocation_site); // If the weak reference is cleared, we have a new chance to become // monomorphic. Comment("check if weak reference is cleared"); Branch(IsCleared(feedback), &initialize, &mark_megamorphic); BIND(&check_allocation_site); { // Check if it is an AllocationSite. Comment("check if allocation site"); TNode strong_feedback = CAST(feedback); GotoIfNot(IsAllocationSite(strong_feedback), &check_initialized); // Make sure that {target} and {new_target} are the Array constructor. TNode array_function = LoadContextElement( LoadNativeContext(context), Context::ARRAY_FUNCTION_INDEX); GotoIfNot(TaggedEqual(target, array_function), &mark_megamorphic); GotoIfNot(TaggedEqual(new_target, array_function), &mark_megamorphic); var_site = CAST(strong_feedback); Goto(&construct_array); } BIND(&check_initialized); { // Check if it is uninitialized. Comment("check if uninitialized"); TNode is_uninitialized = TaggedEqual(feedback, UninitializedSymbolConstant()); Branch(is_uninitialized, &initialize, &mark_megamorphic); } BIND(&initialize); { Comment("check if function in same native context"); GotoIf(TaggedIsSmi(new_target), &mark_megamorphic); // Check if the {new_target} is a JSFunction or JSBoundFunction // in the current native context. TVARIABLE(HeapObject, var_current, CAST(new_target)); Label loop(this, &var_current), done_loop(this); Goto(&loop); BIND(&loop); { Label if_boundfunction(this), if_function(this); TNode current = var_current.value(); TNode current_instance_type = LoadInstanceType(current); GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE), &if_boundfunction); Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE), &if_function, &mark_megamorphic); BIND(&if_function); { // Check that the JSFunction {current} is in the current native // context. TNode current_context = CAST(LoadObjectField(current, JSFunction::kContextOffset)); TNode current_native_context = LoadNativeContext(current_context); Branch( TaggedEqual(LoadNativeContext(context), current_native_context), &done_loop, &mark_megamorphic); } BIND(&if_boundfunction); { // Continue with the [[BoundTargetFunction]] of {current}. var_current = LoadObjectField( current, JSBoundFunction::kBoundTargetFunctionOffset); Goto(&loop); } } BIND(&done_loop); // Create an AllocationSite if {target} and {new_target} refer // to the current native context's Array constructor. Label create_allocation_site(this), store_weak_reference(this); GotoIfNot(TaggedEqual(target, new_target), &store_weak_reference); TNode array_function = LoadContextElement( LoadNativeContext(context), Context::ARRAY_FUNCTION_INDEX); Branch(TaggedEqual(target, array_function), &create_allocation_site, &store_weak_reference); BIND(&create_allocation_site); { var_site = CreateAllocationSiteInFeedbackVector(feedback_vector, slot_id); ReportFeedbackUpdate(feedback_vector, slot_id, "Construct:CreateAllocationSite"); Goto(&construct_array); } BIND(&store_weak_reference); { StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id, CAST(new_target)); ReportFeedbackUpdate(feedback_vector, slot_id, "Construct:StoreWeakReference"); Goto(&construct); } } BIND(&mark_megamorphic); { // MegamorphicSentinel is an immortal immovable object so // write-barrier is not needed. Comment("transition to megamorphic"); DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kmegamorphic_symbol)); StoreFeedbackVectorSlot( feedback_vector, slot_id, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())), SKIP_WRITE_BARRIER); ReportFeedbackUpdate(feedback_vector, slot_id, "Construct:TransitionMegamorphic"); Goto(&construct); } } BIND(&construct_array); { // TODO(bmeurer): Introduce a dedicated builtin to deal with the Array // constructor feedback collection inside of Ignition. Comment("call using ConstructArray builtin"); Callable callable = CodeFactory::InterpreterPushArgsThenConstruct( isolate(), InterpreterPushArgsMode::kArrayFunction); TNode code_target = HeapConstant(callable.code()); var_result = CallStub(callable.descriptor(), code_target, context, args.reg_count(), args.base_reg_location(), target, new_target, var_site.value()); Goto(&return_result); } BIND(&construct); { // TODO(bmeurer): Remove the generic type_info parameter from the Construct. Comment("call using Construct builtin"); Callable callable = CodeFactory::InterpreterPushArgsThenConstruct( isolate(), InterpreterPushArgsMode::kOther); TNode code_target = HeapConstant(callable.code()); var_result = CallStub(callable.descriptor(), code_target, context, args.reg_count(), args.base_reg_location(), target, new_target, UndefinedConstant()); Goto(&return_result); } BIND(&return_result); return var_result.value(); } TNode InterpreterAssembler::ConstructWithSpread( TNode target, TNode context, TNode new_target, const RegListNodePair& args, TNode slot_id, TNode maybe_feedback_vector) { // TODO(bmeurer): Unify this with the Construct bytecode feedback // above once we have a way to pass the AllocationSite to the Array // constructor _and_ spread the last argument at the same time. DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_)); Label extra_checks(this, Label::kDeferred), construct(this); GotoIf(IsUndefined(maybe_feedback_vector), &construct); TNode feedback_vector = CAST(maybe_feedback_vector); // Increment the call count. IncrementCallCount(feedback_vector, slot_id); // Check if we have monomorphic {new_target} feedback already. TNode feedback = LoadFeedbackVectorSlot(feedback_vector, slot_id); Branch(IsWeakReferenceToObject(feedback, new_target), &construct, &extra_checks); BIND(&extra_checks); { Label check_initialized(this), initialize(this), mark_megamorphic(this); // Check if it is a megamorphic {new_target}. Comment("check if megamorphic"); TNode is_megamorphic = TaggedEqual( feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate()))); GotoIf(is_megamorphic, &construct); Comment("check if weak reference"); GotoIfNot(IsWeakOrCleared(feedback), &check_initialized); // If the weak reference is cleared, we have a new chance to become // monomorphic. Comment("check if weak reference is cleared"); Branch(IsCleared(feedback), &initialize, &mark_megamorphic); BIND(&check_initialized); { // Check if it is uninitialized. Comment("check if uninitialized"); TNode is_uninitialized = TaggedEqual(feedback, UninitializedSymbolConstant()); Branch(is_uninitialized, &initialize, &mark_megamorphic); } BIND(&initialize); { Comment("check if function in same native context"); GotoIf(TaggedIsSmi(new_target), &mark_megamorphic); // Check if the {new_target} is a JSFunction or JSBoundFunction // in the current native context. TVARIABLE(HeapObject, var_current, CAST(new_target)); Label loop(this, &var_current), done_loop(this); Goto(&loop); BIND(&loop); { Label if_boundfunction(this), if_function(this); TNode current = var_current.value(); TNode current_instance_type = LoadInstanceType(current); GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE), &if_boundfunction); Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE), &if_function, &mark_megamorphic); BIND(&if_function); { // Check that the JSFunction {current} is in the current native // context. TNode current_context = CAST(LoadObjectField(current, JSFunction::kContextOffset)); TNode current_native_context = LoadNativeContext(current_context); Branch( TaggedEqual(LoadNativeContext(context), current_native_context), &done_loop, &mark_megamorphic); } BIND(&if_boundfunction); { // Continue with the [[BoundTargetFunction]] of {current}. var_current = LoadObjectField( current, JSBoundFunction::kBoundTargetFunctionOffset); Goto(&loop); } } BIND(&done_loop); StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id, CAST(new_target)); ReportFeedbackUpdate(feedback_vector, slot_id, "ConstructWithSpread:Initialize"); Goto(&construct); } BIND(&mark_megamorphic); { // MegamorphicSentinel is an immortal immovable object so // write-barrier is not needed. Comment("transition to megamorphic"); DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kmegamorphic_symbol)); StoreFeedbackVectorSlot( feedback_vector, slot_id, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())), SKIP_WRITE_BARRIER); ReportFeedbackUpdate(feedback_vector, slot_id, "ConstructWithSpread:TransitionMegamorphic"); Goto(&construct); } } BIND(&construct); Comment("call using ConstructWithSpread builtin"); Callable callable = CodeFactory::InterpreterPushArgsThenConstruct( isolate(), InterpreterPushArgsMode::kWithFinalSpread); TNode code_target = HeapConstant(callable.code()); return CallStub(callable.descriptor(), code_target, context, args.reg_count(), args.base_reg_location(), target, new_target, UndefinedConstant()); } Node* InterpreterAssembler::CallRuntimeN(TNode function_id, TNode context, const RegListNodePair& args, int result_size) { DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_)); DCHECK(Bytecodes::IsCallRuntime(bytecode_)); Callable callable = CodeFactory::InterpreterCEntry(isolate(), result_size); TNode code_target = HeapConstant(callable.code()); // Get the function entry from the function id. TNode function_table = ReinterpretCast(ExternalConstant( ExternalReference::runtime_function_table_address(isolate()))); TNode function_offset = Int32Mul(function_id, Int32Constant(sizeof(Runtime::Function))); TNode function = IntPtrAdd(function_table, ChangeUint32ToWord(function_offset)); TNode function_entry = Load( function, IntPtrConstant(offsetof(Runtime::Function, entry))); return CallStubR(StubCallMode::kCallCodeObject, callable.descriptor(), result_size, code_target, context, args.reg_count(), args.base_reg_location(), function_entry); } void InterpreterAssembler::UpdateInterruptBudget(TNode weight, bool backward) { Comment("[ UpdateInterruptBudget"); // Assert that the weight is positive (negative weights should be implemented // as backward updates). CSA_ASSERT(this, Int32GreaterThanOrEqual(weight, Int32Constant(0))); Label load_budget_from_bytecode(this), load_budget_done(this); TNode function = CAST(LoadRegister(Register::function_closure())); TNode feedback_cell = LoadObjectField(function, JSFunction::kFeedbackCellOffset); TNode old_budget = LoadObjectField( feedback_cell, FeedbackCell::kInterruptBudgetOffset); // Make sure we include the current bytecode in the budget calculation. TNode budget_after_bytecode = Int32Sub(old_budget, Int32Constant(CurrentBytecodeSize())); Label done(this); TVARIABLE(Int32T, new_budget); if (backward) { // Update budget by |weight| and check if it reaches zero. new_budget = Int32Sub(budget_after_bytecode, weight); TNode condition = Int32GreaterThanOrEqual(new_budget.value(), Int32Constant(0)); Label ok(this), interrupt_check(this, Label::kDeferred); Branch(condition, &ok, &interrupt_check); BIND(&interrupt_check); CallRuntime(Runtime::kBytecodeBudgetInterrupt, GetContext(), function); Goto(&done); BIND(&ok); } else { // For a forward jump, we know we only increase the interrupt budget, so // no need to check if it's below zero. new_budget = Int32Add(budget_after_bytecode, weight); } // Update budget. StoreObjectFieldNoWriteBarrier( feedback_cell, FeedbackCell::kInterruptBudgetOffset, new_budget.value(), MachineRepresentation::kWord32); Goto(&done); BIND(&done); Comment("] UpdateInterruptBudget"); } TNode InterpreterAssembler::Advance() { return Advance(CurrentBytecodeSize()); } TNode InterpreterAssembler::Advance(int delta) { return Advance(IntPtrConstant(delta)); } TNode InterpreterAssembler::Advance(TNode delta, bool backward) { #ifdef V8_TRACE_IGNITION TraceBytecode(Runtime::kInterpreterTraceBytecodeExit); #endif TNode next_offset = backward ? IntPtrSub(BytecodeOffset(), delta) : IntPtrAdd(BytecodeOffset(), delta); bytecode_offset_ = next_offset; return next_offset; } void InterpreterAssembler::Jump(TNode jump_offset, bool backward) { DCHECK(!Bytecodes::IsStarLookahead(bytecode_, operand_scale_)); UpdateInterruptBudget(TruncateIntPtrToInt32(jump_offset), backward); TNode new_bytecode_offset = Advance(jump_offset, backward); TNode target_bytecode = UncheckedCast(LoadBytecode(new_bytecode_offset)); DispatchToBytecode(target_bytecode, new_bytecode_offset); } void InterpreterAssembler::Jump(TNode jump_offset) { Jump(jump_offset, false); } void InterpreterAssembler::JumpBackward(TNode jump_offset) { Jump(jump_offset, true); } void InterpreterAssembler::JumpConditional(TNode condition, TNode jump_offset) { Label match(this), no_match(this); Branch(condition, &match, &no_match); BIND(&match); Jump(jump_offset); BIND(&no_match); Dispatch(); } void InterpreterAssembler::JumpIfTaggedEqual(TNode lhs, TNode rhs, TNode jump_offset) { JumpConditional(TaggedEqual(lhs, rhs), jump_offset); } void InterpreterAssembler::JumpIfTaggedNotEqual(TNode lhs, TNode rhs, TNode jump_offset) { JumpConditional(TaggedNotEqual(lhs, rhs), jump_offset); } TNode InterpreterAssembler::LoadBytecode( TNode bytecode_offset) { TNode bytecode = Load(BytecodeArrayTaggedPointer(), bytecode_offset); return ChangeUint32ToWord(bytecode); } TNode InterpreterAssembler::StarDispatchLookahead( TNode target_bytecode) { Label do_inline_star(this), done(this); TVARIABLE(WordT, var_bytecode, target_bytecode); TNode star_bytecode = Int32Constant(static_cast(Bytecode::kStar)); TNode is_star = Word32Equal(TruncateWordToInt32(target_bytecode), star_bytecode); Branch(is_star, &do_inline_star, &done); BIND(&do_inline_star); { InlineStar(); var_bytecode = LoadBytecode(BytecodeOffset()); Goto(&done); } BIND(&done); return var_bytecode.value(); } void InterpreterAssembler::InlineStar() { Bytecode previous_bytecode = bytecode_; AccumulatorUse previous_acc_use = accumulator_use_; bytecode_ = Bytecode::kStar; accumulator_use_ = AccumulatorUse::kNone; #ifdef V8_TRACE_IGNITION TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry); #endif StoreRegister(GetAccumulator(), BytecodeOperandReg(0, LoadSensitivity::kSafe)); DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_)); Advance(); bytecode_ = previous_bytecode; accumulator_use_ = previous_acc_use; } void InterpreterAssembler::Dispatch() { Comment("========= Dispatch"); DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_); TNode target_offset = Advance(); TNode target_bytecode = LoadBytecode(target_offset); if (Bytecodes::IsStarLookahead(bytecode_, operand_scale_)) { target_bytecode = StarDispatchLookahead(target_bytecode); } DispatchToBytecode(target_bytecode, BytecodeOffset()); } void InterpreterAssembler::DispatchToBytecode( TNode target_bytecode, TNode new_bytecode_offset) { if (FLAG_trace_ignition_dispatches) { TraceBytecodeDispatch(target_bytecode); } TNode target_code_entry = Load( DispatchTablePointer(), TimesSystemPointerSize(target_bytecode)); DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset); } void InterpreterAssembler::DispatchToBytecodeHandlerEntry( TNode handler_entry, TNode bytecode_offset) { // Propagate speculation poisoning. TNode poisoned_handler_entry = UncheckedCast(WordPoisonOnSpeculation(handler_entry)); TailCallBytecodeDispatch(InterpreterDispatchDescriptor{}, poisoned_handler_entry, GetAccumulatorUnchecked(), bytecode_offset, BytecodeArrayTaggedPointer(), DispatchTablePointer()); } void InterpreterAssembler::DispatchWide(OperandScale operand_scale) { // Dispatching a wide bytecode requires treating the prefix // bytecode a base pointer into the dispatch table and dispatching // the bytecode that follows relative to this base. // // Indices 0-255 correspond to bytecodes with operand_scale == 0 // Indices 256-511 correspond to bytecodes with operand_scale == 1 // Indices 512-767 correspond to bytecodes with operand_scale == 2 DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_); TNode next_bytecode_offset = Advance(1); TNode next_bytecode = LoadBytecode(next_bytecode_offset); if (FLAG_trace_ignition_dispatches) { TraceBytecodeDispatch(next_bytecode); } TNode base_index; switch (operand_scale) { case OperandScale::kDouble: base_index = IntPtrConstant(1 << kBitsPerByte); break; case OperandScale::kQuadruple: base_index = IntPtrConstant(2 << kBitsPerByte); break; default: UNREACHABLE(); } TNode target_index = IntPtrAdd(base_index, next_bytecode); TNode target_code_entry = Load( DispatchTablePointer(), TimesSystemPointerSize(target_index)); DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset); } void InterpreterAssembler::UpdateInterruptBudgetOnReturn() { // TODO(rmcilroy): Investigate whether it is worth supporting self // optimization of primitive functions like FullCodegen. // Update profiling count by the number of bytes between the end of the // current bytecode and the start of the first one, to simulate backedge to // start of function. // // With headers and current offset, the bytecode array layout looks like: // // <---------- simulated backedge ---------- // | header | first bytecode | .... | return bytecode | // |<------ current offset -------> // ^ tagged bytecode array pointer // // UpdateInterruptBudget already handles adding the bytecode size to the // length of the back-edge, so we just have to correct for the non-zero offset // of the first bytecode. const int kFirstBytecodeOffset = BytecodeArray::kHeaderSize - kHeapObjectTag; TNode profiling_weight = Int32Sub(TruncateIntPtrToInt32(BytecodeOffset()), Int32Constant(kFirstBytecodeOffset)); UpdateInterruptBudget(profiling_weight, true); } TNode InterpreterAssembler::LoadOsrNestingLevel() { return LoadObjectField(BytecodeArrayTaggedPointer(), BytecodeArray::kOsrNestingLevelOffset); } void InterpreterAssembler::Abort(AbortReason abort_reason) { TNode abort_id = SmiConstant(abort_reason); CallRuntime(Runtime::kAbort, GetContext(), abort_id); } void InterpreterAssembler::AbortIfWordNotEqual(TNode lhs, TNode rhs, AbortReason abort_reason) { Label ok(this), abort(this, Label::kDeferred); Branch(WordEqual(lhs, rhs), &ok, &abort); BIND(&abort); Abort(abort_reason); Goto(&ok); BIND(&ok); } void InterpreterAssembler::MaybeDropFrames(TNode context) { TNode restart_fp_address = ExternalConstant(ExternalReference::debug_restart_fp_address(isolate())); TNode restart_fp = Load(restart_fp_address); TNode null = IntPtrConstant(0); Label ok(this), drop_frames(this); Branch(IntPtrEqual(restart_fp, null), &ok, &drop_frames); BIND(&drop_frames); // We don't expect this call to return since the frame dropper tears down // the stack and jumps into the function on the target frame to restart it. CallStub(CodeFactory::FrameDropperTrampoline(isolate()), context, restart_fp); Abort(AbortReason::kUnexpectedReturnFromFrameDropper); Goto(&ok); BIND(&ok); } void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) { CallRuntime(function_id, GetContext(), BytecodeArrayTaggedPointer(), SmiTag(BytecodeOffset()), GetAccumulatorUnchecked()); } void InterpreterAssembler::TraceBytecodeDispatch(TNode target_bytecode) { TNode counters_table = ExternalConstant( ExternalReference::interpreter_dispatch_counters(isolate())); TNode source_bytecode_table_index = IntPtrConstant( static_cast(bytecode_) * (static_cast(Bytecode::kLast) + 1)); TNode counter_offset = TimesSystemPointerSize( IntPtrAdd(source_bytecode_table_index, target_bytecode)); TNode old_counter = Load(counters_table, counter_offset); Label counter_ok(this), counter_saturated(this, Label::kDeferred); TNode counter_reached_max = WordEqual( old_counter, IntPtrConstant(std::numeric_limits::max())); Branch(counter_reached_max, &counter_saturated, &counter_ok); BIND(&counter_ok); { TNode new_counter = IntPtrAdd(old_counter, IntPtrConstant(1)); StoreNoWriteBarrier(MachineType::PointerRepresentation(), counters_table, counter_offset, new_counter); Goto(&counter_saturated); } BIND(&counter_saturated); } // static bool InterpreterAssembler::TargetSupportsUnalignedAccess() { #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 return false; #elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390 || \ V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC return true; #else #error "Unknown Architecture" #endif } void InterpreterAssembler::AbortIfRegisterCountInvalid( TNode parameters_and_registers, TNode formal_parameter_count, TNode register_count) { TNode array_size = LoadAndUntagFixedArrayBaseLength(parameters_and_registers); Label ok(this), abort(this, Label::kDeferred); Branch(UintPtrLessThanOrEqual( IntPtrAdd(formal_parameter_count, register_count), array_size), &ok, &abort); BIND(&abort); Abort(AbortReason::kInvalidParametersAndRegistersInGenerator); Goto(&ok); BIND(&ok); } TNode InterpreterAssembler::ExportParametersAndRegisterFile( TNode array, const RegListNodePair& registers, TNode formal_parameter_count) { // Store the formal parameters (without receiver) followed by the // registers into the generator's internal parameters_and_registers field. TNode formal_parameter_count_intptr = Signed(ChangeUint32ToWord(formal_parameter_count)); TNode register_count = ChangeUint32ToWord(registers.reg_count()); if (FLAG_debug_code) { CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(), RegisterLocation(Register(0)))); AbortIfRegisterCountInvalid(array, formal_parameter_count_intptr, register_count); } { TVARIABLE(IntPtrT, var_index); var_index = IntPtrConstant(0); // Iterate over parameters and write them into the array. Label loop(this, &var_index), done_loop(this); TNode reg_base = IntPtrAdd( IntPtrConstant(Register::FromParameterIndex(0, 1).ToOperand() - 1), formal_parameter_count_intptr); Goto(&loop); BIND(&loop); { TNode index = var_index.value(); GotoIfNot(UintPtrLessThan(index, formal_parameter_count_intptr), &done_loop); TNode reg_index = IntPtrSub(reg_base, index); TNode value = LoadRegister(reg_index); StoreFixedArrayElement(array, index, value); var_index = IntPtrAdd(index, IntPtrConstant(1)); Goto(&loop); } BIND(&done_loop); } { // Iterate over register file and write values into array. // The mapping of register to array index must match that used in // BytecodeGraphBuilder::VisitResumeGenerator. TVARIABLE(IntPtrT, var_index); var_index = IntPtrConstant(0); Label loop(this, &var_index), done_loop(this); Goto(&loop); BIND(&loop); { TNode index = var_index.value(); GotoIfNot(UintPtrLessThan(index, register_count), &done_loop); TNode reg_index = IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index); TNode value = LoadRegister(reg_index); TNode array_index = IntPtrAdd(formal_parameter_count_intptr, index); StoreFixedArrayElement(array, array_index, value); var_index = IntPtrAdd(index, IntPtrConstant(1)); Goto(&loop); } BIND(&done_loop); } return array; } TNode InterpreterAssembler::ImportRegisterFile( TNode array, const RegListNodePair& registers, TNode formal_parameter_count) { TNode formal_parameter_count_intptr = Signed(ChangeUint32ToWord(formal_parameter_count)); TNode register_count = ChangeUint32ToWord(registers.reg_count()); if (FLAG_debug_code) { CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(), RegisterLocation(Register(0)))); AbortIfRegisterCountInvalid(array, formal_parameter_count_intptr, register_count); } TVARIABLE(IntPtrT, var_index, IntPtrConstant(0)); // Iterate over array and write values into register file. Also erase the // array contents to not keep them alive artificially. Label loop(this, &var_index), done_loop(this); Goto(&loop); BIND(&loop); { TNode index = var_index.value(); GotoIfNot(UintPtrLessThan(index, register_count), &done_loop); TNode array_index = IntPtrAdd(formal_parameter_count_intptr, index); TNode value = LoadFixedArrayElement(array, array_index); TNode reg_index = IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index); StoreRegister(value, reg_index); StoreFixedArrayElement(array, array_index, StaleRegisterConstant()); var_index = IntPtrAdd(index, IntPtrConstant(1)); Goto(&loop); } BIND(&done_loop); return array; } int InterpreterAssembler::CurrentBytecodeSize() const { return Bytecodes::Size(bytecode_, operand_scale_); } void InterpreterAssembler::ToNumberOrNumeric(Object::Conversion mode) { TNode object = GetAccumulator(); TNode context = GetContext(); TVARIABLE(Smi, var_type_feedback); TVARIABLE(Numeric, var_result); Label if_done(this), if_objectissmi(this), if_objectisheapnumber(this), if_objectisother(this, Label::kDeferred); GotoIf(TaggedIsSmi(object), &if_objectissmi); Branch(IsHeapNumber(CAST(object)), &if_objectisheapnumber, &if_objectisother); BIND(&if_objectissmi); { var_result = CAST(object); var_type_feedback = SmiConstant(BinaryOperationFeedback::kSignedSmall); Goto(&if_done); } BIND(&if_objectisheapnumber); { var_result = CAST(object); var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumber); Goto(&if_done); } BIND(&if_objectisother); { auto builtin = Builtins::kNonNumberToNumber; if (mode == Object::Conversion::kToNumeric) { builtin = Builtins::kNonNumberToNumeric; // Special case for collecting BigInt feedback. Label not_bigint(this); GotoIfNot(IsBigInt(CAST(object)), ¬_bigint); { var_result = CAST(object); var_type_feedback = SmiConstant(BinaryOperationFeedback::kBigInt); Goto(&if_done); } BIND(¬_bigint); } // Convert {object} by calling out to the appropriate builtin. var_result = CAST(CallBuiltin(builtin, context, object)); var_type_feedback = SmiConstant(BinaryOperationFeedback::kAny); Goto(&if_done); } BIND(&if_done); // Record the type feedback collected for {object}. TNode slot_index = BytecodeOperandIdx(0); TNode maybe_feedback_vector = LoadFeedbackVector(); UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_index); SetAccumulator(var_result.value()); Dispatch(); } } // namespace interpreter } // namespace internal } // namespace v8