aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/interpreter
diff options
context:
space:
mode:
authorAli Ijaz Sheikh <ofrobots@google.com>2016-03-01 08:58:05 -0800
committerAli Sheikh <ofrobots@lemonhope.roam.corp.google.com>2016-03-03 20:35:20 -0800
commit069e02ab47656b3efd1b6829c65856b2e1c2d1db (patch)
treeeb643e0a2e88fd64bb9fc927423458d2ae96c2db /deps/v8/src/interpreter
parent8938355398c79f583a468284b768652d12ba9bc9 (diff)
downloadandroid-node-v8-069e02ab47656b3efd1b6829c65856b2e1c2d1db.tar.gz
android-node-v8-069e02ab47656b3efd1b6829c65856b2e1c2d1db.tar.bz2
android-node-v8-069e02ab47656b3efd1b6829c65856b2e1c2d1db.zip
deps: upgrade to V8 4.9.385.18
Pick up the current branch head for V8 4.9 https://github.com/v8/v8/commit/1ecba0f PR-URL: https://github.com/nodejs/node/pull/4722 Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: Michaƫl Zasso <mic.besace@gmail.com>
Diffstat (limited to 'deps/v8/src/interpreter')
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc675
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h160
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.cc41
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.h6
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc705
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h30
-rw-r--r--deps/v8/src/interpreter/bytecode-register-allocator.cc72
-rw-r--r--deps/v8/src/interpreter/bytecode-register-allocator.h49
-rw-r--r--deps/v8/src/interpreter/bytecodes.cc124
-rw-r--r--deps/v8/src/interpreter/bytecodes.h112
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.cc174
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.h97
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.cc47
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.h37
-rw-r--r--deps/v8/src/interpreter/interpreter.cc515
-rw-r--r--deps/v8/src/interpreter/interpreter.h13
16 files changed, 2112 insertions, 745 deletions
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index f2f5c07251..1b15fc6668 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -8,16 +8,72 @@ namespace v8 {
namespace internal {
namespace interpreter {
+class BytecodeArrayBuilder::PreviousBytecodeHelper {
+ public:
+ explicit PreviousBytecodeHelper(const BytecodeArrayBuilder& array_builder)
+ : array_builder_(array_builder),
+ previous_bytecode_start_(array_builder_.last_bytecode_start_) {
+ // This helper is expected to be instantiated only when the last bytecode is
+ // in the same basic block.
+ DCHECK(array_builder_.LastBytecodeInSameBlock());
+ }
+
+ // Returns the previous bytecode in the same basic block.
+ MUST_USE_RESULT Bytecode GetBytecode() const {
+ DCHECK_EQ(array_builder_.last_bytecode_start_, previous_bytecode_start_);
+ return Bytecodes::FromByte(
+ array_builder_.bytecodes()->at(previous_bytecode_start_));
+ }
+
+ // Returns the operand at operand_index for the previous bytecode in the
+ // same basic block.
+ MUST_USE_RESULT uint32_t GetOperand(int operand_index) const {
+ DCHECK_EQ(array_builder_.last_bytecode_start_, previous_bytecode_start_);
+ Bytecode bytecode = GetBytecode();
+ DCHECK_GE(operand_index, 0);
+ DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode));
+ size_t operand_offset =
+ previous_bytecode_start_ +
+ Bytecodes::GetOperandOffset(bytecode, operand_index);
+ OperandSize size = Bytecodes::GetOperandSize(bytecode, operand_index);
+ switch (size) {
+ default:
+ case OperandSize::kNone:
+ UNREACHABLE();
+ case OperandSize::kByte:
+ return static_cast<uint32_t>(
+ array_builder_.bytecodes()->at(operand_offset));
+ case OperandSize::kShort:
+ uint16_t operand =
+ (array_builder_.bytecodes()->at(operand_offset) << 8) +
+ array_builder_.bytecodes()->at(operand_offset + 1);
+ return static_cast<uint32_t>(operand);
+ }
+ }
+
+ Handle<Object> GetConstantForIndexOperand(int operand_index) const {
+ return array_builder_.constant_array_builder()->At(
+ GetOperand(operand_index));
+ }
+
+ private:
+ const BytecodeArrayBuilder& array_builder_;
+ size_t previous_bytecode_start_;
+
+ DISALLOW_COPY_AND_ASSIGN(PreviousBytecodeHelper);
+};
+
+
BytecodeArrayBuilder::BytecodeArrayBuilder(Isolate* isolate, Zone* zone)
: isolate_(isolate),
zone_(zone),
bytecodes_(zone),
bytecode_generated_(false),
+ constant_array_builder_(isolate, zone),
last_block_end_(0),
last_bytecode_start_(~0),
exit_seen_in_block_(false),
- constants_map_(isolate->heap(), zone),
- constants_(zone),
+ unbound_jumps_(0),
parameter_count_(-1),
local_register_count_(-1),
context_register_count_(-1),
@@ -25,6 +81,9 @@ BytecodeArrayBuilder::BytecodeArrayBuilder(Isolate* isolate, Zone* zone)
free_temporaries_(zone) {}
+BytecodeArrayBuilder::~BytecodeArrayBuilder() { DCHECK_EQ(0, unbound_jumps_); }
+
+
void BytecodeArrayBuilder::set_locals_count(int number_of_locals) {
local_register_count_ = number_of_locals;
DCHECK_LE(context_register_count_, 0);
@@ -85,21 +144,14 @@ bool BytecodeArrayBuilder::RegisterIsTemporary(Register reg) const {
Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray() {
DCHECK_EQ(bytecode_generated_, false);
-
EnsureReturn();
int bytecode_size = static_cast<int>(bytecodes_.size());
int register_count = fixed_register_count() + temporary_register_count_;
int frame_size = register_count * kPointerSize;
-
Factory* factory = isolate_->factory();
- int constants_count = static_cast<int>(constants_.size());
Handle<FixedArray> constant_pool =
- factory->NewFixedArray(constants_count, TENURED);
- for (int i = 0; i < constants_count; i++) {
- constant_pool->set(i, *constants_[i]);
- }
-
+ constant_array_builder()->ToFixedArray(factory);
Handle<BytecodeArray> output =
factory->NewBytecodeArray(bytecode_size, &bytecodes_.front(), frame_size,
parameter_count(), constant_pool);
@@ -137,6 +189,14 @@ void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t(&operands)[N]) {
void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
+ uint32_t operand1, uint32_t operand2,
+ uint32_t operand3) {
+ uint32_t operands[] = {operand0, operand1, operand2, operand3};
+ Output(bytecode, operands);
+}
+
+
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
uint32_t operand1, uint32_t operand2) {
uint32_t operands[] = {operand0, operand1, operand2};
Output(bytecode, operands);
@@ -269,11 +329,21 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadFalse() {
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadBooleanConstant(bool value) {
+ if (value) {
+ LoadTrue();
+ } else {
+ LoadFalse();
+ }
+ return *this;
+}
+
+
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadAccumulatorWithRegister(
Register reg) {
- // TODO(oth): Avoid loading the accumulator with the register if the
- // previous bytecode stored the accumulator with the same register.
- Output(Bytecode::kLdar, reg.ToOperand());
+ if (!IsRegisterInAccumulator(reg)) {
+ Output(Bytecode::kLdar, reg.ToOperand());
+ }
return *this;
}
@@ -282,17 +352,47 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreAccumulatorInRegister(
Register reg) {
// TODO(oth): Avoid storing the accumulator in the register if the
// previous bytecode loaded the accumulator with the same register.
+ //
+ // TODO(oth): If the previous bytecode is a MOV into this register,
+ // the previous instruction can be removed. The logic for determining
+ // these redundant MOVs appears complex.
Output(Bytecode::kStar, reg.ToOperand());
+ if (!IsRegisterInAccumulator(reg)) {
+ Output(Bytecode::kStar, reg.ToOperand());
+ }
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::MoveRegister(Register from,
+ Register to) {
+ DCHECK(from != to);
+ Output(Bytecode::kMov, from.ToOperand(), to.ToOperand());
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::ExchangeRegisters(Register reg0,
+ Register reg1) {
+ DCHECK(reg0 != reg1);
+ if (FitsInReg8Operand(reg0)) {
+ Output(Bytecode::kExchange, reg0.ToOperand(), reg1.ToWideOperand());
+ } else if (FitsInReg8Operand(reg1)) {
+ Output(Bytecode::kExchange, reg1.ToOperand(), reg0.ToWideOperand());
+ } else {
+ Output(Bytecode::kExchangeWide, reg0.ToWideOperand(), reg1.ToWideOperand());
+ }
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(
- size_t name_index, int feedback_slot, LanguageMode language_mode,
+ const Handle<String> name, int feedback_slot, LanguageMode language_mode,
TypeofMode typeof_mode) {
// TODO(rmcilroy): Potentially store language and typeof information in an
// operand rather than having extra bytecodes.
Bytecode bytecode = BytecodeForLoadGlobal(language_mode, typeof_mode);
+ size_t name_index = GetConstantPoolEntry(name);
if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
Output(bytecode, static_cast<uint8_t>(name_index),
static_cast<uint8_t>(feedback_slot));
@@ -308,8 +408,9 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreGlobal(
- size_t name_index, int feedback_slot, LanguageMode language_mode) {
+ const Handle<String> name, int feedback_slot, LanguageMode language_mode) {
Bytecode bytecode = BytecodeForStoreGlobal(language_mode);
+ size_t name_index = GetConstantPoolEntry(name);
if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
Output(bytecode, static_cast<uint8_t>(name_index),
static_cast<uint8_t>(feedback_slot));
@@ -330,6 +431,9 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadContextSlot(Register context,
if (FitsInIdx8Operand(slot_index)) {
Output(Bytecode::kLdaContextSlot, context.ToOperand(),
static_cast<uint8_t>(slot_index));
+ } else if (FitsInIdx16Operand(slot_index)) {
+ Output(Bytecode::kLdaContextSlotWide, context.ToOperand(),
+ static_cast<uint16_t>(slot_index));
} else {
UNIMPLEMENTED();
}
@@ -343,6 +447,43 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreContextSlot(Register context,
if (FitsInIdx8Operand(slot_index)) {
Output(Bytecode::kStaContextSlot, context.ToOperand(),
static_cast<uint8_t>(slot_index));
+ } else if (FitsInIdx16Operand(slot_index)) {
+ Output(Bytecode::kStaContextSlotWide, context.ToOperand(),
+ static_cast<uint16_t>(slot_index));
+ } else {
+ UNIMPLEMENTED();
+ }
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupSlot(
+ const Handle<String> name, TypeofMode typeof_mode) {
+ Bytecode bytecode = (typeof_mode == INSIDE_TYPEOF)
+ ? Bytecode::kLdaLookupSlotInsideTypeof
+ : Bytecode::kLdaLookupSlot;
+ size_t name_index = GetConstantPoolEntry(name);
+ if (FitsInIdx8Operand(name_index)) {
+ Output(bytecode, static_cast<uint8_t>(name_index));
+ } else if (FitsInIdx16Operand(name_index)) {
+ Output(BytecodeForWideOperands(bytecode),
+ static_cast<uint16_t>(name_index));
+ } else {
+ UNIMPLEMENTED();
+ }
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::StoreLookupSlot(
+ const Handle<String> name, LanguageMode language_mode) {
+ Bytecode bytecode = BytecodeForStoreLookupSlot(language_mode);
+ size_t name_index = GetConstantPoolEntry(name);
+ if (FitsInIdx8Operand(name_index)) {
+ Output(bytecode, static_cast<uint8_t>(name_index));
+ } else if (FitsInIdx16Operand(name_index)) {
+ Output(BytecodeForWideOperands(bytecode),
+ static_cast<uint16_t>(name_index));
} else {
UNIMPLEMENTED();
}
@@ -351,9 +492,10 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreContextSlot(Register context,
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNamedProperty(
- Register object, size_t name_index, int feedback_slot,
+ Register object, const Handle<String> name, int feedback_slot,
LanguageMode language_mode) {
Bytecode bytecode = BytecodeForLoadIC(language_mode);
+ size_t name_index = GetConstantPoolEntry(name);
if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
Output(bytecode, object.ToOperand(), static_cast<uint8_t>(name_index),
static_cast<uint8_t>(feedback_slot));
@@ -385,9 +527,10 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadKeyedProperty(
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
- Register object, size_t name_index, int feedback_slot,
+ Register object, const Handle<String> name, int feedback_slot,
LanguageMode language_mode) {
Bytecode bytecode = BytecodeForStoreIC(language_mode);
+ size_t name_index = GetConstantPoolEntry(name);
if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
Output(bytecode, object.ToOperand(), static_cast<uint8_t>(name_index),
static_cast<uint8_t>(feedback_slot));
@@ -421,9 +564,18 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreKeyedProperty(
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateClosure(
- PretenureFlag tenured) {
+ Handle<SharedFunctionInfo> shared_info, PretenureFlag tenured) {
+ size_t entry = GetConstantPoolEntry(shared_info);
DCHECK(FitsInImm8Operand(tenured));
- Output(Bytecode::kCreateClosure, static_cast<uint8_t>(tenured));
+ if (FitsInIdx8Operand(entry)) {
+ Output(Bytecode::kCreateClosure, static_cast<uint8_t>(entry),
+ static_cast<uint8_t>(tenured));
+ } else if (FitsInIdx16Operand(entry)) {
+ Output(Bytecode::kCreateClosureWide, static_cast<uint16_t>(entry),
+ static_cast<uint8_t>(tenured));
+ } else {
+ UNIMPLEMENTED();
+ }
return *this;
}
@@ -440,10 +592,17 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArguments(
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateRegExpLiteral(
- int literal_index, Register flags) {
- if (FitsInIdx8Operand(literal_index)) {
- Output(Bytecode::kCreateRegExpLiteral, static_cast<uint8_t>(literal_index),
- flags.ToOperand());
+ Handle<String> pattern, int literal_index, int flags) {
+ DCHECK(FitsInImm8Operand(flags)); // Flags should fit in 8 bits.
+ size_t pattern_entry = GetConstantPoolEntry(pattern);
+ if (FitsInIdx8Operand(literal_index) && FitsInIdx8Operand(pattern_entry)) {
+ Output(Bytecode::kCreateRegExpLiteral, static_cast<uint8_t>(pattern_entry),
+ static_cast<uint8_t>(literal_index), static_cast<uint8_t>(flags));
+ } else if (FitsInIdx16Operand(literal_index) &&
+ FitsInIdx16Operand(pattern_entry)) {
+ Output(Bytecode::kCreateRegExpLiteralWide,
+ static_cast<uint16_t>(pattern_entry),
+ static_cast<uint16_t>(literal_index), static_cast<uint8_t>(flags));
} else {
UNIMPLEMENTED();
}
@@ -452,11 +611,19 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CreateRegExpLiteral(
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArrayLiteral(
- int literal_index, int flags) {
- DCHECK(FitsInImm8Operand(flags)); // Flags should fit in 8 bytes.
- if (FitsInIdx8Operand(literal_index)) {
- Output(Bytecode::kCreateArrayLiteral, static_cast<uint8_t>(literal_index),
- static_cast<uint8_t>(flags));
+ Handle<FixedArray> constant_elements, int literal_index, int flags) {
+ DCHECK(FitsInImm8Operand(flags)); // Flags should fit in 8 bits.
+ size_t constant_elements_entry = GetConstantPoolEntry(constant_elements);
+ if (FitsInIdx8Operand(literal_index) &&
+ FitsInIdx8Operand(constant_elements_entry)) {
+ Output(Bytecode::kCreateArrayLiteral,
+ static_cast<uint8_t>(constant_elements_entry),
+ static_cast<uint8_t>(literal_index), static_cast<uint8_t>(flags));
+ } else if (FitsInIdx16Operand(literal_index) &&
+ FitsInIdx16Operand(constant_elements_entry)) {
+ Output(Bytecode::kCreateArrayLiteralWide,
+ static_cast<uint16_t>(constant_elements_entry),
+ static_cast<uint16_t>(literal_index), static_cast<uint8_t>(flags));
} else {
UNIMPLEMENTED();
}
@@ -465,11 +632,19 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArrayLiteral(
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateObjectLiteral(
- int literal_index, int flags) {
- DCHECK(FitsInImm8Operand(flags)); // Flags should fit in 8 bytes.
- if (FitsInIdx8Operand(literal_index)) {
- Output(Bytecode::kCreateObjectLiteral, static_cast<uint8_t>(literal_index),
- static_cast<uint8_t>(flags));
+ Handle<FixedArray> constant_properties, int literal_index, int flags) {
+ DCHECK(FitsInImm8Operand(flags)); // Flags should fit in 8 bits.
+ size_t constant_properties_entry = GetConstantPoolEntry(constant_properties);
+ if (FitsInIdx8Operand(literal_index) &&
+ FitsInIdx8Operand(constant_properties_entry)) {
+ Output(Bytecode::kCreateObjectLiteral,
+ static_cast<uint8_t>(constant_properties_entry),
+ static_cast<uint8_t>(literal_index), static_cast<uint8_t>(flags));
+ } else if (FitsInIdx16Operand(literal_index) &&
+ FitsInIdx16Operand(constant_properties_entry)) {
+ Output(Bytecode::kCreateObjectLiteralWide,
+ static_cast<uint16_t>(constant_properties_entry),
+ static_cast<uint16_t>(literal_index), static_cast<uint8_t>(flags));
} else {
UNIMPLEMENTED();
}
@@ -491,14 +666,11 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::PopContext(Register context) {
bool BytecodeArrayBuilder::NeedToBooleanCast() {
if (!LastBytecodeInSameBlock()) {
- // If the previous bytecode was from a different block return false.
return true;
}
-
- // If the previous bytecode puts a boolean in the accumulator return true.
- switch (Bytecodes::FromByte(bytecodes()->at(last_bytecode_start_))) {
- case Bytecode::kToBoolean:
- UNREACHABLE();
+ PreviousBytecodeHelper previous_bytecode(*this);
+ switch (previous_bytecode.GetBytecode()) {
+ // If the previous bytecode puts a boolean in the accumulator return true.
case Bytecode::kLdaTrue:
case Bytecode::kLdaFalse:
case Bytecode::kLogicalNot:
@@ -520,16 +692,6 @@ bool BytecodeArrayBuilder::NeedToBooleanCast() {
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToBoolean() {
- // If the previous bytecode puts a boolean in the accumulator
- // there is no need to emit an instruction.
- if (NeedToBooleanCast()) {
- Output(Bytecode::kToBoolean);
- }
- return *this;
-}
-
-
BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToJSObject() {
Output(Bytecode::kToObject);
return *this;
@@ -537,6 +699,22 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToJSObject() {
BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToName() {
+ if (LastBytecodeInSameBlock()) {
+ PreviousBytecodeHelper previous_bytecode(*this);
+ switch (previous_bytecode.GetBytecode()) {
+ case Bytecode::kToName:
+ case Bytecode::kTypeOf:
+ return *this;
+ case Bytecode::kLdaConstantWide:
+ case Bytecode::kLdaConstant: {
+ Handle<Object> object = previous_bytecode.GetConstantForIndexOperand(0);
+ if (object->IsName()) return *this;
+ break;
+ }
+ default:
+ break;
+ }
+ }
Output(Bytecode::kToName);
return *this;
}
@@ -594,42 +772,32 @@ Bytecode BytecodeArrayBuilder::GetJumpWithConstantOperand(
return Bytecode::kJumpIfUndefinedConstant;
default:
UNREACHABLE();
- return Bytecode::kJumpConstant;
+ return static_cast<Bytecode>(-1);
}
}
-void BytecodeArrayBuilder::PatchJump(
- const ZoneVector<uint8_t>::iterator& jump_target,
- ZoneVector<uint8_t>::iterator jump_location) {
- Bytecode jump_bytecode = Bytecodes::FromByte(*jump_location);
- int delta = static_cast<int>(jump_target - jump_location);
-
- DCHECK(Bytecodes::IsJump(jump_bytecode));
- DCHECK_EQ(Bytecodes::Size(jump_bytecode), 2);
- DCHECK_NE(delta, 0);
-
- if (FitsInImm8Operand(delta)) {
- // Just update the operand
- jump_location++;
- *jump_location = static_cast<uint8_t>(delta);
- } else {
- // Update the jump type and operand
- size_t entry = GetConstantPoolEntry(handle(Smi::FromInt(delta), isolate()));
- if (FitsInIdx8Operand(entry)) {
- jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
- *jump_location++ = Bytecodes::ToByte(jump_bytecode);
- *jump_location = static_cast<uint8_t>(entry);
- } else {
- // TODO(oth): OutputJump should reserve a constant pool entry
- // when jump is written. The reservation should be used here if
- // needed, or cancelled if not. This is due to the patch needing
- // to match the size of the code it's replacing. In future,
- // there will probably be a jump with 32-bit operand for cases
- // when constant pool is full, but that needs to be emitted in
- // OutputJump too.
- UNIMPLEMENTED();
- }
+// static
+Bytecode BytecodeArrayBuilder::GetJumpWithConstantWideOperand(
+ Bytecode jump_bytecode) {
+ switch (jump_bytecode) {
+ case Bytecode::kJump:
+ return Bytecode::kJumpConstantWide;
+ case Bytecode::kJumpIfTrue:
+ return Bytecode::kJumpIfTrueConstantWide;
+ case Bytecode::kJumpIfFalse:
+ return Bytecode::kJumpIfFalseConstantWide;
+ case Bytecode::kJumpIfToBooleanTrue:
+ return Bytecode::kJumpIfToBooleanTrueConstantWide;
+ case Bytecode::kJumpIfToBooleanFalse:
+ return Bytecode::kJumpIfToBooleanFalseConstantWide;
+ case Bytecode::kJumpIfNull:
+ return Bytecode::kJumpIfNullConstantWide;
+ case Bytecode::kJumpIfUndefined:
+ return Bytecode::kJumpIfUndefinedConstantWide;
+ default:
+ UNREACHABLE();
+ return static_cast<Bytecode>(-1);
}
}
@@ -652,6 +820,66 @@ Bytecode BytecodeArrayBuilder::GetJumpWithToBoolean(Bytecode jump_bytecode) {
}
+void BytecodeArrayBuilder::PatchIndirectJumpWith8BitOperand(
+ const ZoneVector<uint8_t>::iterator& jump_location, int delta) {
+ Bytecode jump_bytecode = Bytecodes::FromByte(*jump_location);
+ DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
+ ZoneVector<uint8_t>::iterator operand_location = jump_location + 1;
+ DCHECK_EQ(*operand_location, 0);
+ if (FitsInImm8Operand(delta)) {
+ // The jump fits within the range of an Imm8 operand, so cancel
+ // the reservation and jump directly.
+ constant_array_builder()->DiscardReservedEntry(OperandSize::kByte);
+ *operand_location = static_cast<uint8_t>(delta);
+ } else {
+ // The jump does not fit within the range of an Imm8 operand, so
+ // commit reservation putting the offset into the constant pool,
+ // and update the jump instruction and operand.
+ size_t entry = constant_array_builder()->CommitReservedEntry(
+ OperandSize::kByte, handle(Smi::FromInt(delta), isolate()));
+ DCHECK(FitsInIdx8Operand(entry));
+ jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
+ *jump_location = Bytecodes::ToByte(jump_bytecode);
+ *operand_location = static_cast<uint8_t>(entry);
+ }
+}
+
+
+void BytecodeArrayBuilder::PatchIndirectJumpWith16BitOperand(
+ const ZoneVector<uint8_t>::iterator& jump_location, int delta) {
+ DCHECK(Bytecodes::IsJumpConstantWide(Bytecodes::FromByte(*jump_location)));
+ ZoneVector<uint8_t>::iterator operand_location = jump_location + 1;
+ size_t entry = constant_array_builder()->CommitReservedEntry(
+ OperandSize::kShort, handle(Smi::FromInt(delta), isolate()));
+ DCHECK(FitsInIdx16Operand(entry));
+ uint8_t operand_bytes[2];
+ WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(entry));
+ DCHECK(*operand_location == 0 && *(operand_location + 1) == 0);
+ *operand_location++ = operand_bytes[0];
+ *operand_location = operand_bytes[1];
+}
+
+
+void BytecodeArrayBuilder::PatchJump(
+ const ZoneVector<uint8_t>::iterator& jump_target,
+ const ZoneVector<uint8_t>::iterator& jump_location) {
+ Bytecode jump_bytecode = Bytecodes::FromByte(*jump_location);
+ int delta = static_cast<int>(jump_target - jump_location);
+ DCHECK(Bytecodes::IsJump(jump_bytecode));
+ switch (Bytecodes::GetOperandSize(jump_bytecode, 0)) {
+ case OperandSize::kByte:
+ PatchIndirectJumpWith8BitOperand(jump_location, delta);
+ break;
+ case OperandSize::kShort:
+ PatchIndirectJumpWith16BitOperand(jump_location, delta);
+ break;
+ case OperandSize::kNone:
+ UNREACHABLE();
+ }
+ unbound_jumps_--;
+}
+
+
BytecodeArrayBuilder& BytecodeArrayBuilder::OutputJump(Bytecode jump_bytecode,
BytecodeLabel* label) {
// Don't emit dead code.
@@ -663,29 +891,48 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::OutputJump(Bytecode jump_bytecode,
jump_bytecode = GetJumpWithToBoolean(jump_bytecode);
}
- int delta;
if (label->is_bound()) {
// Label has been bound already so this is a backwards jump.
CHECK_GE(bytecodes()->size(), label->offset());
CHECK_LE(bytecodes()->size(), static_cast<size_t>(kMaxInt));
size_t abs_delta = bytecodes()->size() - label->offset();
- delta = -static_cast<int>(abs_delta);
- } else {
- // Label has not yet been bound so this is a forward reference
- // that will be patched when the label is bound.
- label->set_referrer(bytecodes()->size());
- delta = 0;
- }
+ int delta = -static_cast<int>(abs_delta);
- if (FitsInImm8Operand(delta)) {
- Output(jump_bytecode, static_cast<uint8_t>(delta));
- } else {
- size_t entry = GetConstantPoolEntry(handle(Smi::FromInt(delta), isolate()));
- if (FitsInIdx8Operand(entry)) {
- Output(GetJumpWithConstantOperand(jump_bytecode),
- static_cast<uint8_t>(entry));
+ if (FitsInImm8Operand(delta)) {
+ Output(jump_bytecode, static_cast<uint8_t>(delta));
} else {
- UNIMPLEMENTED();
+ size_t entry =
+ GetConstantPoolEntry(handle(Smi::FromInt(delta), isolate()));
+ if (FitsInIdx8Operand(entry)) {
+ Output(GetJumpWithConstantOperand(jump_bytecode),
+ static_cast<uint8_t>(entry));
+ } else if (FitsInIdx16Operand(entry)) {
+ Output(GetJumpWithConstantWideOperand(jump_bytecode),
+ static_cast<uint16_t>(entry));
+ } else {
+ UNREACHABLE();
+ }
+ }
+ } else {
+ // The label has not yet been bound so this is a forward reference
+ // that will be patched when the label is bound. We create a
+ // reservation in the constant pool so the jump can be patched
+ // when the label is bound. The reservation means the maximum size
+ // of the operand for the constant is known and the jump can
+ // be emitted into the bytecode stream with space for the operand.
+ label->set_referrer(bytecodes()->size());
+ unbound_jumps_++;
+ OperandSize reserved_operand_size =
+ constant_array_builder()->CreateReservedEntry();
+ switch (reserved_operand_size) {
+ case OperandSize::kByte:
+ Output(jump_bytecode, 0);
+ break;
+ case OperandSize::kShort:
+ Output(GetJumpWithConstantWideOperand(jump_bytecode), 0);
+ break;
+ case OperandSize::kNone:
+ UNREACHABLE();
}
}
LeaveBasicBlock();
@@ -733,21 +980,33 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Return() {
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::ForInPrepare(Register receiver) {
- Output(Bytecode::kForInPrepare, receiver.ToOperand());
+BytecodeArrayBuilder& BytecodeArrayBuilder::ForInPrepare(
+ Register cache_type, Register cache_array, Register cache_length) {
+ Output(Bytecode::kForInPrepare, cache_type.ToOperand(),
+ cache_array.ToOperand(), cache_length.ToOperand());
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::ForInNext(Register for_in_state,
+BytecodeArrayBuilder& BytecodeArrayBuilder::ForInDone(Register index,
+ Register cache_length) {
+ Output(Bytecode::kForInDone, index.ToOperand(), cache_length.ToOperand());
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::ForInNext(Register receiver,
+ Register cache_type,
+ Register cache_array,
Register index) {
- Output(Bytecode::kForInNext, for_in_state.ToOperand(), index.ToOperand());
+ Output(Bytecode::kForInNext, receiver.ToOperand(), cache_type.ToOperand(),
+ cache_array.ToOperand(), index.ToOperand());
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::ForInDone(Register for_in_state) {
- Output(Bytecode::kForInDone, for_in_state.ToOperand());
+BytecodeArrayBuilder& BytecodeArrayBuilder::ForInStep(Register index) {
+ Output(Bytecode::kForInStep, index.ToOperand());
return *this;
}
@@ -768,10 +1027,17 @@ void BytecodeArrayBuilder::EnsureReturn() {
BytecodeArrayBuilder& BytecodeArrayBuilder::Call(Register callable,
Register receiver,
- size_t arg_count) {
- if (FitsInIdx8Operand(arg_count)) {
+ size_t arg_count,
+ int feedback_slot) {
+ if (FitsInIdx8Operand(arg_count) && FitsInIdx8Operand(feedback_slot)) {
Output(Bytecode::kCall, callable.ToOperand(), receiver.ToOperand(),
- static_cast<uint8_t>(arg_count));
+ static_cast<uint8_t>(arg_count),
+ static_cast<uint8_t>(feedback_slot));
+ } else if (FitsInIdx16Operand(arg_count) &&
+ FitsInIdx16Operand(feedback_slot)) {
+ Output(Bytecode::kCallWide, callable.ToOperand(), receiver.ToOperand(),
+ static_cast<uint16_t>(arg_count),
+ static_cast<uint16_t>(feedback_slot));
} else {
UNIMPLEMENTED();
}
@@ -795,6 +1061,7 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::New(Register constructor,
BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntime(
Runtime::FunctionId function_id, Register first_arg, size_t arg_count) {
+ DCHECK_EQ(1, Runtime::FunctionForId(function_id)->result_size);
DCHECK(FitsInIdx16Operand(function_id));
DCHECK(FitsInIdx8Operand(arg_count));
if (!first_arg.is_valid()) {
@@ -807,6 +1074,23 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntime(
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntimeForPair(
+ Runtime::FunctionId function_id, Register first_arg, size_t arg_count,
+ Register first_return) {
+ DCHECK_EQ(2, Runtime::FunctionForId(function_id)->result_size);
+ DCHECK(FitsInIdx16Operand(function_id));
+ DCHECK(FitsInIdx8Operand(arg_count));
+ if (!first_arg.is_valid()) {
+ DCHECK_EQ(0u, arg_count);
+ first_arg = Register(0);
+ }
+ Output(Bytecode::kCallRuntimeForPair, static_cast<uint16_t>(function_id),
+ first_arg.ToOperand(), static_cast<uint8_t>(arg_count),
+ first_return.ToOperand());
+ return *this;
+}
+
+
BytecodeArrayBuilder& BytecodeArrayBuilder::CallJSRuntime(int context_index,
Register receiver,
size_t arg_count) {
@@ -825,23 +1109,14 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Delete(Register object,
}
-size_t BytecodeArrayBuilder::GetConstantPoolEntry(Handle<Object> object) {
- // These constants shouldn't be added to the constant pool, the should use
- // specialzed bytecodes instead.
- DCHECK(!object.is_identical_to(isolate_->factory()->undefined_value()));
- DCHECK(!object.is_identical_to(isolate_->factory()->null_value()));
- DCHECK(!object.is_identical_to(isolate_->factory()->the_hole_value()));
- DCHECK(!object.is_identical_to(isolate_->factory()->true_value()));
- DCHECK(!object.is_identical_to(isolate_->factory()->false_value()));
+BytecodeArrayBuilder& BytecodeArrayBuilder::DeleteLookupSlot() {
+ Output(Bytecode::kDeleteLookupSlot);
+ return *this;
+}
- size_t* entry = constants_map_.Find(object);
- if (!entry) {
- entry = constants_map_.Get(object);
- *entry = constants_.size();
- constants_.push_back(object);
- }
- DCHECK(constants_[*entry].is_identical_to(object));
- return *entry;
+
+size_t BytecodeArrayBuilder::GetConstantPoolEntry(Handle<Object> object) {
+ return constant_array_builder()->Insert(object);
}
@@ -858,6 +1133,28 @@ int BytecodeArrayBuilder::BorrowTemporaryRegister() {
}
+int BytecodeArrayBuilder::BorrowTemporaryRegisterNotInRange(int start_index,
+ int end_index) {
+ auto index = free_temporaries_.lower_bound(start_index);
+ if (index == free_temporaries_.begin()) {
+ // If start_index is the first free register, check for a register
+ // greater than end_index.
+ index = free_temporaries_.upper_bound(end_index);
+ if (index == free_temporaries_.end()) {
+ temporary_register_count_ += 1;
+ return last_temporary_register().index();
+ }
+ } else {
+ // If there is a free register < start_index
+ index--;
+ }
+
+ int retval = *index;
+ free_temporaries_.erase(index);
+ return retval;
+}
+
+
void BytecodeArrayBuilder::BorrowConsecutiveTemporaryRegister(int reg_index) {
DCHECK(free_temporaries_.find(reg_index) != free_temporaries_.end());
free_temporaries_.erase(reg_index);
@@ -917,12 +1214,28 @@ bool BytecodeArrayBuilder::TemporaryRegisterIsLive(Register reg) const {
}
+bool BytecodeArrayBuilder::RegisterIsValid(Register reg) const {
+ if (reg.is_function_context() || reg.is_function_closure() ||
+ reg.is_new_target()) {
+ return true;
+ } else if (reg.is_parameter()) {
+ int parameter_index = reg.ToParameterIndex(parameter_count_);
+ return parameter_index >= 0 && parameter_index < parameter_count_;
+ } else if (reg.index() < fixed_register_count()) {
+ return true;
+ } else {
+ return TemporaryRegisterIsLive(reg);
+ }
+}
+
+
bool BytecodeArrayBuilder::OperandIsValid(Bytecode bytecode, int operand_index,
uint32_t operand_value) const {
OperandType operand_type = Bytecodes::GetOperandType(bytecode, operand_index);
switch (operand_type) {
case OperandType::kNone:
return false;
+ case OperandType::kCount16:
case OperandType::kIdx16:
return static_cast<uint16_t>(operand_value) == operand_value;
case OperandType::kCount8:
@@ -934,30 +1247,47 @@ bool BytecodeArrayBuilder::OperandIsValid(Bytecode bytecode, int operand_index,
return true;
}
// Fall-through to kReg8 case.
- case OperandType::kReg8: {
- Register reg = Register::FromOperand(static_cast<uint8_t>(operand_value));
- if (reg.is_function_context() || reg.is_function_closure()) {
- return true;
- } else if (reg.is_parameter()) {
- int parameter_index = reg.ToParameterIndex(parameter_count_);
- return parameter_index >= 0 && parameter_index < parameter_count_;
- } else if (reg.index() < fixed_register_count()) {
- return true;
- } else {
- return TemporaryRegisterIsLive(reg);
- }
+ case OperandType::kReg8:
+ return RegisterIsValid(
+ Register::FromOperand(static_cast<uint8_t>(operand_value)));
+ case OperandType::kRegPair8: {
+ Register reg0 =
+ Register::FromOperand(static_cast<uint8_t>(operand_value));
+ Register reg1 = Register(reg0.index() + 1);
+ return RegisterIsValid(reg0) && RegisterIsValid(reg1);
}
+ case OperandType::kReg16:
+ if (bytecode != Bytecode::kExchange &&
+ bytecode != Bytecode::kExchangeWide) {
+ return false;
+ }
+ return RegisterIsValid(
+ Register::FromWideOperand(static_cast<uint16_t>(operand_value)));
}
UNREACHABLE();
return false;
}
+
bool BytecodeArrayBuilder::LastBytecodeInSameBlock() const {
return last_bytecode_start_ < bytecodes()->size() &&
last_bytecode_start_ >= last_block_end_;
}
+bool BytecodeArrayBuilder::IsRegisterInAccumulator(Register reg) {
+ if (LastBytecodeInSameBlock()) {
+ PreviousBytecodeHelper previous_bytecode(*this);
+ Bytecode bytecode = previous_bytecode.GetBytecode();
+ if ((bytecode == Bytecode::kLdar || bytecode == Bytecode::kStar) &&
+ (reg == Register::FromOperand(previous_bytecode.GetOperand(0)))) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
// static
Bytecode BytecodeArrayBuilder::BytecodeForBinaryOperation(Token::Value op) {
switch (op) {
@@ -1065,6 +1395,14 @@ Bytecode BytecodeArrayBuilder::BytecodeForWideOperands(Bytecode bytecode) {
return Bytecode::kStaGlobalSloppyWide;
case Bytecode::kStaGlobalStrict:
return Bytecode::kStaGlobalStrictWide;
+ case Bytecode::kLdaLookupSlot:
+ return Bytecode::kLdaLookupSlotWide;
+ case Bytecode::kLdaLookupSlotInsideTypeof:
+ return Bytecode::kLdaLookupSlotInsideTypeofWide;
+ case Bytecode::kStaLookupSlotStrict:
+ return Bytecode::kStaLookupSlotStrictWide;
+ case Bytecode::kStaLookupSlotSloppy:
+ return Bytecode::kStaLookupSlotSloppyWide;
default:
UNREACHABLE();
return static_cast<Bytecode>(-1);
@@ -1177,6 +1515,23 @@ Bytecode BytecodeArrayBuilder::BytecodeForStoreGlobal(
// static
+Bytecode BytecodeArrayBuilder::BytecodeForStoreLookupSlot(
+ LanguageMode language_mode) {
+ switch (language_mode) {
+ case SLOPPY:
+ return Bytecode::kStaLookupSlotSloppy;
+ case STRICT:
+ return Bytecode::kStaLookupSlotStrict;
+ case STRONG:
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+ return static_cast<Bytecode>(-1);
+}
+
+
+// static
Bytecode BytecodeArrayBuilder::BytecodeForCreateArguments(
CreateArgumentsType type) {
switch (type) {
@@ -1221,7 +1576,7 @@ bool BytecodeArrayBuilder::FitsInIdx8Operand(size_t value) {
// static
bool BytecodeArrayBuilder::FitsInImm8Operand(int value) {
- return kMinInt8 <= value && value < kMaxInt8;
+ return kMinInt8 <= value && value <= kMaxInt8;
}
@@ -1237,53 +1592,15 @@ bool BytecodeArrayBuilder::FitsInIdx16Operand(size_t value) {
}
-TemporaryRegisterScope::TemporaryRegisterScope(BytecodeArrayBuilder* builder)
- : builder_(builder),
- allocated_(builder->zone()),
- next_consecutive_register_(-1),
- next_consecutive_count_(-1) {}
-
-
-TemporaryRegisterScope::~TemporaryRegisterScope() {
- for (auto i = allocated_.rbegin(); i != allocated_.rend(); i++) {
- builder_->ReturnTemporaryRegister(*i);
- }
- allocated_.clear();
-}
-
-
-Register TemporaryRegisterScope::NewRegister() {
- int allocated = builder_->BorrowTemporaryRegister();
- allocated_.push_back(allocated);
- return Register(allocated);
-}
-
-
-bool TemporaryRegisterScope::RegisterIsAllocatedInThisScope(
- Register reg) const {
- for (auto i = allocated_.begin(); i != allocated_.end(); i++) {
- if (*i == reg.index()) return true;
- }
- return false;
-}
-
-
-void TemporaryRegisterScope::PrepareForConsecutiveAllocations(size_t count) {
- if (static_cast<int>(count) > next_consecutive_count_) {
- next_consecutive_register_ =
- builder_->PrepareForConsecutiveTemporaryRegisters(count);
- next_consecutive_count_ = static_cast<int>(count);
- }
+// static
+bool BytecodeArrayBuilder::FitsInReg8Operand(Register value) {
+ return kMinInt8 <= value.index() && value.index() <= kMaxInt8;
}
-Register TemporaryRegisterScope::NextConsecutiveRegister() {
- DCHECK_GE(next_consecutive_register_, 0);
- DCHECK_GT(next_consecutive_count_, 0);
- builder_->BorrowConsecutiveTemporaryRegister(next_consecutive_register_);
- allocated_.push_back(next_consecutive_register_);
- next_consecutive_count_--;
- return Register(next_consecutive_register_++);
+// static
+bool BytecodeArrayBuilder::FitsInReg16Operand(Register value) {
+ return kMinInt16 <= value.index() && value.index() <= kMaxInt16;
}
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index b766ccd4a6..7c23dc3f22 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -5,12 +5,9 @@
#ifndef V8_INTERPRETER_BYTECODE_ARRAY_BUILDER_H_
#define V8_INTERPRETER_BYTECODE_ARRAY_BUILDER_H_
-#include <vector>
-
-#include "src/ast.h"
-#include "src/identity-map.h"
+#include "src/ast/ast.h"
#include "src/interpreter/bytecodes.h"
-#include "src/zone.h"
+#include "src/interpreter/constant-array-builder.h"
#include "src/zone-containers.h"
namespace v8 {
@@ -21,15 +18,18 @@ class Isolate;
namespace interpreter {
class BytecodeLabel;
+class ConstantArrayBuilder;
class Register;
// TODO(rmcilroy): Unify this with CreateArgumentsParameters::Type in Turbofan
// when rest parameters implementation has settled down.
enum class CreateArgumentsType { kMappedArguments, kUnmappedArguments };
-class BytecodeArrayBuilder {
+class BytecodeArrayBuilder final {
public:
BytecodeArrayBuilder(Isolate* isolate, Zone* zone);
+ ~BytecodeArrayBuilder();
+
Handle<BytecodeArray> ToBytecodeArray();
// Set the number of parameters expected by function.
@@ -68,9 +68,6 @@ class BytecodeArrayBuilder {
// Return true if the register |reg| represents a temporary register.
bool RegisterIsTemporary(Register reg) const;
- // Gets a constant pool entry for the |object|.
- size_t GetConstantPoolEntry(Handle<Object> object);
-
// Constant loads to accumulator.
BytecodeArrayBuilder& LoadLiteral(v8::internal::Smi* value);
BytecodeArrayBuilder& LoadLiteral(Handle<Object> object);
@@ -79,12 +76,14 @@ class BytecodeArrayBuilder {
BytecodeArrayBuilder& LoadTheHole();
BytecodeArrayBuilder& LoadTrue();
BytecodeArrayBuilder& LoadFalse();
+ BytecodeArrayBuilder& LoadBooleanConstant(bool value);
// Global loads to the accumulator and stores from the accumulator.
- BytecodeArrayBuilder& LoadGlobal(size_t name_index, int feedback_slot,
+ BytecodeArrayBuilder& LoadGlobal(const Handle<String> name, int feedback_slot,
LanguageMode language_mode,
TypeofMode typeof_mode);
- BytecodeArrayBuilder& StoreGlobal(size_t name_index, int feedback_slot,
+ BytecodeArrayBuilder& StoreGlobal(const Handle<String> name,
+ int feedback_slot,
LanguageMode language_mode);
// Load the object at |slot_index| in |context| into the accumulator.
@@ -97,8 +96,13 @@ class BytecodeArrayBuilder {
BytecodeArrayBuilder& LoadAccumulatorWithRegister(Register reg);
BytecodeArrayBuilder& StoreAccumulatorInRegister(Register reg);
+ // Register-register transfer.
+ BytecodeArrayBuilder& MoveRegister(Register from, Register to);
+ BytecodeArrayBuilder& ExchangeRegisters(Register reg0, Register reg1);
+
// Named load property.
- BytecodeArrayBuilder& LoadNamedProperty(Register object, size_t name_index,
+ BytecodeArrayBuilder& LoadNamedProperty(Register object,
+ const Handle<String> name,
int feedback_slot,
LanguageMode language_mode);
// Keyed load property. The key should be in the accumulator.
@@ -106,23 +110,36 @@ class BytecodeArrayBuilder {
LanguageMode language_mode);
// Store properties. The value to be stored should be in the accumulator.
- BytecodeArrayBuilder& StoreNamedProperty(Register object, size_t name_index,
+ BytecodeArrayBuilder& StoreNamedProperty(Register object,
+ const Handle<String> name,
int feedback_slot,
LanguageMode language_mode);
BytecodeArrayBuilder& StoreKeyedProperty(Register object, Register key,
int feedback_slot,
LanguageMode language_mode);
- // Create a new closure for the SharedFunctionInfo in the accumulator.
- BytecodeArrayBuilder& CreateClosure(PretenureFlag tenured);
+ // Lookup the variable with |name|.
+ BytecodeArrayBuilder& LoadLookupSlot(const Handle<String> name,
+ TypeofMode typeof_mode);
+
+ // Store value in the accumulator into the variable with |name|.
+ BytecodeArrayBuilder& StoreLookupSlot(const Handle<String> name,
+ LanguageMode language_mode);
+
+ // Create a new closure for the SharedFunctionInfo.
+ BytecodeArrayBuilder& CreateClosure(Handle<SharedFunctionInfo> shared_info,
+ PretenureFlag tenured);
// Create a new arguments object in the accumulator.
BytecodeArrayBuilder& CreateArguments(CreateArgumentsType type);
// Literals creation. Constant elements should be in the accumulator.
- BytecodeArrayBuilder& CreateRegExpLiteral(int literal_index, Register flags);
- BytecodeArrayBuilder& CreateArrayLiteral(int literal_index, int flags);
- BytecodeArrayBuilder& CreateObjectLiteral(int literal_index, int flags);
+ BytecodeArrayBuilder& CreateRegExpLiteral(Handle<String> pattern,
+ int literal_index, int flags);
+ BytecodeArrayBuilder& CreateArrayLiteral(Handle<FixedArray> constant_elements,
+ int literal_index, int flags);
+ BytecodeArrayBuilder& CreateObjectLiteral(
+ Handle<FixedArray> constant_properties, int literal_index, int flags);
// Push the context in accumulator as the new context, and store in register
// |context|.
@@ -136,7 +153,7 @@ class BytecodeArrayBuilder {
// arguments should be in registers <receiver + 1> to
// <receiver + 1 + arg_count>.
BytecodeArrayBuilder& Call(Register callable, Register receiver,
- size_t arg_count);
+ size_t arg_count, int feedback_slot);
// Call the new operator. The |constructor| register is followed by
// |arg_count| consecutive registers containing arguments to be
@@ -150,6 +167,14 @@ class BytecodeArrayBuilder {
BytecodeArrayBuilder& CallRuntime(Runtime::FunctionId function_id,
Register first_arg, size_t arg_count);
+ // Call the runtime function with |function_id| that returns a pair of values.
+ // The first argument should be in |first_arg| and all subsequent arguments
+ // should be in registers <first_arg + 1> to <first_arg + 1 + arg_count>. The
+ // return values will be returned in <first_return> and <first_return + 1>.
+ BytecodeArrayBuilder& CallRuntimeForPair(Runtime::FunctionId function_id,
+ Register first_arg, size_t arg_count,
+ Register first_return);
+
// Call the JS runtime function with |context_index|. The the receiver should
// be in |receiver| and all subsequent arguments should be in registers
// <receiver + 1> to <receiver + 1 + arg_count>.
@@ -170,6 +195,7 @@ class BytecodeArrayBuilder {
// Deletes property from an object. This expects that accumulator contains
// the key to be deleted and the register contains a reference to the object.
BytecodeArrayBuilder& Delete(Register object, LanguageMode language_mode);
+ BytecodeArrayBuilder& DeleteLookupSlot();
// Tests.
BytecodeArrayBuilder& CompareOperation(Token::Value op, Register reg,
@@ -195,9 +221,12 @@ class BytecodeArrayBuilder {
BytecodeArrayBuilder& Return();
// Complex flow control.
- BytecodeArrayBuilder& ForInPrepare(Register receiver);
- BytecodeArrayBuilder& ForInNext(Register for_in_state, Register index);
- BytecodeArrayBuilder& ForInDone(Register for_in_state);
+ BytecodeArrayBuilder& ForInPrepare(Register cache_type, Register cache_array,
+ Register cache_length);
+ BytecodeArrayBuilder& ForInDone(Register index, Register cache_length);
+ BytecodeArrayBuilder& ForInNext(Register receiver, Register cache_type,
+ Register cache_array, Register index);
+ BytecodeArrayBuilder& ForInStep(Register index);
// Accessors
Zone* zone() const { return zone_; }
@@ -206,6 +235,12 @@ class BytecodeArrayBuilder {
ZoneVector<uint8_t>* bytecodes() { return &bytecodes_; }
const ZoneVector<uint8_t>* bytecodes() const { return &bytecodes_; }
Isolate* isolate() const { return isolate_; }
+ ConstantArrayBuilder* constant_array_builder() {
+ return &constant_array_builder_;
+ }
+ const ConstantArrayBuilder* constant_array_builder() const {
+ return &constant_array_builder_;
+ }
static Bytecode BytecodeForBinaryOperation(Token::Value op);
static Bytecode BytecodeForCountOperation(Token::Value op);
@@ -218,6 +253,7 @@ class BytecodeArrayBuilder {
static Bytecode BytecodeForLoadGlobal(LanguageMode language_mode,
TypeofMode typeof_mode);
static Bytecode BytecodeForStoreGlobal(LanguageMode language_mode);
+ static Bytecode BytecodeForStoreLookupSlot(LanguageMode language_mode);
static Bytecode BytecodeForCreateArguments(CreateArgumentsType type);
static Bytecode BytecodeForDelete(LanguageMode language_mode);
@@ -226,12 +262,20 @@ class BytecodeArrayBuilder {
static bool FitsInImm8Operand(int value);
static bool FitsInIdx16Operand(int value);
static bool FitsInIdx16Operand(size_t value);
+ static bool FitsInReg8Operand(Register value);
+ static bool FitsInReg16Operand(Register value);
- static Bytecode GetJumpWithConstantOperand(Bytecode jump_with_smi8_operand);
- static Bytecode GetJumpWithToBoolean(Bytecode jump);
+ static Bytecode GetJumpWithConstantOperand(Bytecode jump_smi8_operand);
+ static Bytecode GetJumpWithConstantWideOperand(Bytecode jump_smi8_operand);
+ static Bytecode GetJumpWithToBoolean(Bytecode jump_smi8_operand);
+
+ Register MapRegister(Register reg);
+ Register MapRegisters(Register reg, Register args_base, int args_length = 1);
template <size_t N>
- INLINE(void Output(Bytecode bytecode, uint32_t(&oprands)[N]));
+ INLINE(void Output(Bytecode bytecode, uint32_t(&operands)[N]));
+ void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+ uint32_t operand2, uint32_t operand3);
void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
uint32_t operand2);
void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1);
@@ -241,7 +285,11 @@ class BytecodeArrayBuilder {
BytecodeArrayBuilder& OutputJump(Bytecode jump_bytecode,
BytecodeLabel* label);
void PatchJump(const ZoneVector<uint8_t>::iterator& jump_target,
- ZoneVector<uint8_t>::iterator jump_location);
+ const ZoneVector<uint8_t>::iterator& jump_location);
+ void PatchIndirectJumpWith8BitOperand(
+ const ZoneVector<uint8_t>::iterator& jump_location, int delta);
+ void PatchIndirectJumpWith16BitOperand(
+ const ZoneVector<uint8_t>::iterator& jump_location, int delta);
void LeaveBasicBlock();
void EnsureReturn();
@@ -251,8 +299,13 @@ class BytecodeArrayBuilder {
bool LastBytecodeInSameBlock() const;
bool NeedToBooleanCast();
+ bool IsRegisterInAccumulator(Register reg);
+ bool RegisterIsValid(Register reg) const;
+
+ // Temporary register management.
int BorrowTemporaryRegister();
+ int BorrowTemporaryRegisterNotInRange(int start_index, int end_index);
void ReturnTemporaryRegister(int reg_index);
int PrepareForConsecutiveTemporaryRegisters(size_t count);
void BorrowConsecutiveTemporaryRegister(int reg_index);
@@ -261,25 +314,28 @@ class BytecodeArrayBuilder {
Register first_temporary_register() const;
Register last_temporary_register() const;
+ // Gets a constant pool entry for the |object|.
+ size_t GetConstantPoolEntry(Handle<Object> object);
+
Isolate* isolate_;
Zone* zone_;
ZoneVector<uint8_t> bytecodes_;
bool bytecode_generated_;
+ ConstantArrayBuilder constant_array_builder_;
size_t last_block_end_;
size_t last_bytecode_start_;
bool exit_seen_in_block_;
-
- IdentityMap<size_t> constants_map_;
- ZoneVector<Handle<Object>> constants_;
+ int unbound_jumps_;
int parameter_count_;
int local_register_count_;
int context_register_count_;
int temporary_register_count_;
-
ZoneSet<int> free_temporaries_;
- friend class TemporaryRegisterScope;
+ class PreviousBytecodeHelper;
+ friend class BytecodeRegisterAllocator;
+
DISALLOW_COPY_AND_ASSIGN(BytecodeArrayBuilder);
};
@@ -292,22 +348,24 @@ class BytecodeLabel final {
public:
BytecodeLabel() : bound_(false), offset_(kInvalidOffset) {}
- INLINE(bool is_bound() const) { return bound_; }
+ bool is_bound() const { return bound_; }
+ size_t offset() const { return offset_; }
private:
static const size_t kInvalidOffset = static_cast<size_t>(-1);
- INLINE(void bind_to(size_t offset)) {
+ void bind_to(size_t offset) {
DCHECK(!bound_ && offset != kInvalidOffset);
offset_ = offset;
bound_ = true;
}
- INLINE(void set_referrer(size_t offset)) {
+
+ void set_referrer(size_t offset) {
DCHECK(!bound_ && offset != kInvalidOffset && offset_ == kInvalidOffset);
offset_ = offset;
}
- INLINE(size_t offset() const) { return offset_; }
- INLINE(bool is_forward_target() const) {
+
+ bool is_forward_target() const {
return offset() != kInvalidOffset && !is_bound();
}
@@ -322,36 +380,6 @@ class BytecodeLabel final {
friend class BytecodeArrayBuilder;
};
-
-// A stack-allocated class than allows the instantiator to allocate
-// temporary registers that are cleaned up when scope is closed.
-// TODO(oth): Deprecate TemporaryRegisterScope use. Code should be
-// using result scopes as far as possible.
-class TemporaryRegisterScope {
- public:
- explicit TemporaryRegisterScope(BytecodeArrayBuilder* builder);
- ~TemporaryRegisterScope();
- Register NewRegister();
-
- void PrepareForConsecutiveAllocations(size_t count);
- Register NextConsecutiveRegister();
-
- bool RegisterIsAllocatedInThisScope(Register reg) const;
-
- private:
- void* operator new(size_t size);
- void operator delete(void* p);
-
- BytecodeArrayBuilder* builder_;
- const TemporaryRegisterScope* outer_;
- ZoneVector<int> allocated_;
- int next_consecutive_register_;
- int next_consecutive_count_;
-
- DISALLOW_COPY_AND_ASSIGN(TemporaryRegisterScope);
-};
-
-
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.cc b/deps/v8/src/interpreter/bytecode-array-iterator.cc
index b84215660e..d09d72f01a 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.cc
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.cc
@@ -32,6 +32,11 @@ Bytecode BytecodeArrayIterator::current_bytecode() const {
}
+int BytecodeArrayIterator::current_bytecode_size() const {
+ return Bytecodes::Size(current_bytecode());
+}
+
+
uint32_t BytecodeArrayIterator::GetRawOperand(int operand_index,
OperandType operand_type) const {
DCHECK_GE(operand_index, 0);
@@ -60,17 +65,21 @@ int8_t BytecodeArrayIterator::GetImmediateOperand(int operand_index) const {
int BytecodeArrayIterator::GetCountOperand(int operand_index) const {
- uint32_t operand = GetRawOperand(operand_index, OperandType::kCount8);
+ OperandSize size =
+ Bytecodes::GetOperandSize(current_bytecode(), operand_index);
+ OperandType type = (size == OperandSize::kByte) ? OperandType::kCount8
+ : OperandType::kCount16;
+ uint32_t operand = GetRawOperand(operand_index, type);
return static_cast<int>(operand);
}
int BytecodeArrayIterator::GetIndexOperand(int operand_index) const {
- OperandSize size =
- Bytecodes::GetOperandSize(current_bytecode(), operand_index);
- OperandType type =
- (size == OperandSize::kByte) ? OperandType::kIdx8 : OperandType::kIdx16;
- uint32_t operand = GetRawOperand(operand_index, type);
+ OperandType operand_type =
+ Bytecodes::GetOperandType(current_bytecode(), operand_index);
+ DCHECK(operand_type == OperandType::kIdx8 ||
+ operand_type == OperandType::kIdx16);
+ uint32_t operand = GetRawOperand(operand_index, operand_type);
return static_cast<int>(operand);
}
@@ -79,7 +88,9 @@ Register BytecodeArrayIterator::GetRegisterOperand(int operand_index) const {
OperandType operand_type =
Bytecodes::GetOperandType(current_bytecode(), operand_index);
DCHECK(operand_type == OperandType::kReg8 ||
- operand_type == OperandType::kMaybeReg8);
+ operand_type == OperandType::kRegPair8 ||
+ operand_type == OperandType::kMaybeReg8 ||
+ operand_type == OperandType::kReg16);
uint32_t operand = GetRawOperand(operand_index, operand_type);
return Register::FromOperand(operand);
}
@@ -91,6 +102,22 @@ Handle<Object> BytecodeArrayIterator::GetConstantForIndexOperand(
return FixedArray::get(constants, GetIndexOperand(operand_index));
}
+
+int BytecodeArrayIterator::GetJumpTargetOffset() const {
+ Bytecode bytecode = current_bytecode();
+ if (interpreter::Bytecodes::IsJumpImmediate(bytecode)) {
+ int relative_offset = GetImmediateOperand(0);
+ return current_offset() + relative_offset;
+ } else if (interpreter::Bytecodes::IsJumpConstant(bytecode) ||
+ interpreter::Bytecodes::IsJumpConstantWide(bytecode)) {
+ Smi* smi = Smi::cast(*GetConstantForIndexOperand(0));
+ return current_offset() + smi->value();
+ } else {
+ UNREACHABLE();
+ return kMinInt;
+ }
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.h b/deps/v8/src/interpreter/bytecode-array-iterator.h
index 31e237f098..e67fa974bd 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.h
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.h
@@ -20,6 +20,7 @@ class BytecodeArrayIterator {
void Advance();
bool done() const;
Bytecode current_bytecode() const;
+ int current_bytecode_size() const;
int current_offset() const { return bytecode_offset_; }
const Handle<BytecodeArray>& bytecode_array() const {
return bytecode_array_;
@@ -35,6 +36,11 @@ class BytecodeArrayIterator {
// typed versions above which cast the return to an appropriate type.
uint32_t GetRawOperand(int operand_index, OperandType operand_type) const;
+ // Returns the absolute offset of the branch target at the current
+ // bytecode. It is an error to call this method if the bytecode is
+ // not for a jump or conditional jump.
+ int GetJumpTargetOffset() const;
+
private:
Handle<BytecodeArray> bytecode_array_;
int bytecode_offset_;
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 02061a7514..959e155149 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -4,12 +4,13 @@
#include "src/interpreter/bytecode-generator.h"
+#include "src/ast/scopes.h"
#include "src/compiler.h"
+#include "src/interpreter/bytecode-register-allocator.h"
#include "src/interpreter/control-flow-builders.h"
#include "src/objects.h"
-#include "src/parser.h"
-#include "src/scopes.h"
-#include "src/token.h"
+#include "src/parsing/parser.h"
+#include "src/parsing/token.h"
namespace v8 {
namespace internal {
@@ -104,65 +105,65 @@ class BytecodeGenerator::ControlScope BASE_EMBEDDED {
};
-// Scoped class for enabling 'break' and 'continue' in iteration
-// constructs, e.g. do...while, while..., for...
-class BytecodeGenerator::ControlScopeForIteration
+// Scoped class for enabling break inside blocks and switch blocks.
+class BytecodeGenerator::ControlScopeForBreakable final
: public BytecodeGenerator::ControlScope {
public:
- ControlScopeForIteration(BytecodeGenerator* generator,
- IterationStatement* statement,
- LoopBuilder* loop_builder)
+ ControlScopeForBreakable(BytecodeGenerator* generator,
+ BreakableStatement* statement,
+ BreakableControlFlowBuilder* control_builder)
: ControlScope(generator),
statement_(statement),
- loop_builder_(loop_builder) {}
+ control_builder_(control_builder) {}
protected:
virtual bool Execute(Command command, Statement* statement) {
if (statement != statement_) return false;
switch (command) {
case CMD_BREAK:
- loop_builder_->Break();
+ control_builder_->Break();
return true;
case CMD_CONTINUE:
- loop_builder_->Continue();
- return true;
+ break;
}
return false;
}
private:
Statement* statement_;
- LoopBuilder* loop_builder_;
+ BreakableControlFlowBuilder* control_builder_;
};
-// Scoped class for enabling 'break' in switch statements.
-class BytecodeGenerator::ControlScopeForSwitch
+// Scoped class for enabling 'break' and 'continue' in iteration
+// constructs, e.g. do...while, while..., for...
+class BytecodeGenerator::ControlScopeForIteration final
: public BytecodeGenerator::ControlScope {
public:
- ControlScopeForSwitch(BytecodeGenerator* generator,
- SwitchStatement* statement,
- SwitchBuilder* switch_builder)
+ ControlScopeForIteration(BytecodeGenerator* generator,
+ IterationStatement* statement,
+ LoopBuilder* loop_builder)
: ControlScope(generator),
statement_(statement),
- switch_builder_(switch_builder) {}
+ loop_builder_(loop_builder) {}
protected:
virtual bool Execute(Command command, Statement* statement) {
if (statement != statement_) return false;
switch (command) {
case CMD_BREAK:
- switch_builder_->Break();
+ loop_builder_->Break();
return true;
case CMD_CONTINUE:
- break;
+ loop_builder_->Continue();
+ return true;
}
return false;
}
private:
Statement* statement_;
- SwitchBuilder* switch_builder_;
+ LoopBuilder* loop_builder_;
};
@@ -177,6 +178,63 @@ void BytecodeGenerator::ControlScope::PerformCommand(Command command,
}
+class BytecodeGenerator::RegisterAllocationScope {
+ public:
+ explicit RegisterAllocationScope(BytecodeGenerator* generator)
+ : generator_(generator),
+ outer_(generator->register_allocator()),
+ allocator_(builder()) {
+ generator_->set_register_allocator(this);
+ }
+
+ virtual ~RegisterAllocationScope() {
+ generator_->set_register_allocator(outer_);
+ }
+
+ Register NewRegister() {
+ RegisterAllocationScope* current_scope = generator()->register_allocator();
+ if ((current_scope == this) ||
+ (current_scope->outer() == this &&
+ !current_scope->allocator_.HasConsecutiveAllocations())) {
+ // Regular case - Allocating registers in current or outer context.
+ // VisitForRegisterValue allocates register in outer context.
+ return allocator_.NewRegister();
+ } else {
+ // If it is required to allocate a register other than current or outer
+ // scopes, allocate a new temporary register. It might be expensive to
+ // walk the full context chain and compute the list of consecutive
+ // reservations in the innerscopes.
+ UNIMPLEMENTED();
+ return Register(-1);
+ }
+ }
+
+ void PrepareForConsecutiveAllocations(size_t count) {
+ allocator_.PrepareForConsecutiveAllocations(count);
+ }
+
+ Register NextConsecutiveRegister() {
+ return allocator_.NextConsecutiveRegister();
+ }
+
+ bool RegisterIsAllocatedInThisScope(Register reg) const {
+ return allocator_.RegisterIsAllocatedInThisScope(reg);
+ }
+
+ RegisterAllocationScope* outer() const { return outer_; }
+
+ private:
+ BytecodeGenerator* generator() const { return generator_; }
+ BytecodeArrayBuilder* builder() const { return generator_->builder(); }
+
+ BytecodeGenerator* generator_;
+ RegisterAllocationScope* outer_;
+ BytecodeRegisterAllocator allocator_;
+
+ DISALLOW_COPY_AND_ASSIGN(RegisterAllocationScope);
+};
+
+
// Scoped base class for determining where the result of an expression
// is stored.
class BytecodeGenerator::ExpressionResultScope {
@@ -185,7 +243,7 @@ class BytecodeGenerator::ExpressionResultScope {
: generator_(generator),
kind_(kind),
outer_(generator->execution_result()),
- allocator_(builder()),
+ allocator_(generator),
result_identified_(false) {
generator_->set_execution_result(this);
}
@@ -201,21 +259,11 @@ class BytecodeGenerator::ExpressionResultScope {
virtual void SetResultInAccumulator() = 0;
virtual void SetResultInRegister(Register reg) = 0;
- BytecodeGenerator* generator() const { return generator_; }
- BytecodeArrayBuilder* builder() const { return generator()->builder(); }
+ protected:
ExpressionResultScope* outer() const { return outer_; }
+ BytecodeArrayBuilder* builder() const { return generator_->builder(); }
+ const RegisterAllocationScope* allocator() const { return &allocator_; }
- Register NewRegister() { return allocator_.NewRegister(); }
-
- void PrepareForConsecutiveAllocations(size_t count) {
- allocator_.PrepareForConsecutiveAllocations(count);
- }
-
- Register NextConsecutiveRegister() {
- return allocator_.NextConsecutiveRegister();
- }
-
- protected:
void set_result_identified() {
DCHECK(!result_identified());
result_identified_ = true;
@@ -223,13 +271,11 @@ class BytecodeGenerator::ExpressionResultScope {
bool result_identified() const { return result_identified_; }
- const TemporaryRegisterScope* allocator() const { return &allocator_; }
-
private:
BytecodeGenerator* generator_;
Expression::Context kind_;
ExpressionResultScope* outer_;
- TemporaryRegisterScope allocator_;
+ RegisterAllocationScope allocator_;
bool result_identified_;
DISALLOW_COPY_AND_ASSIGN(ExpressionResultScope);
@@ -277,7 +323,7 @@ class BytecodeGenerator::RegisterResultScope final
: ExpressionResultScope(generator, Expression::kValue) {}
virtual void SetResultInAccumulator() {
- result_register_ = outer()->NewRegister();
+ result_register_ = allocator()->outer()->NewRegister();
builder()->StoreAccumulatorInRegister(result_register_);
set_result_identified();
}
@@ -307,15 +353,11 @@ BytecodeGenerator::BytecodeGenerator(Isolate* isolate, Zone* zone)
execution_control_(nullptr),
execution_context_(nullptr),
execution_result_(nullptr),
- binary_expression_depth_(0),
- binary_expression_hazard_set_(zone) {
+ register_allocator_(nullptr) {
InitializeAstVisitor(isolate);
}
-BytecodeGenerator::~BytecodeGenerator() {}
-
-
Handle<BytecodeArray> BytecodeGenerator::MakeBytecode(CompilationInfo* info) {
set_info(info);
set_scope(info->scope());
@@ -348,6 +390,12 @@ void BytecodeGenerator::MakeBytecodeBody() {
// Build the arguments object if it is used.
VisitArgumentsObject(scope()->arguments());
+ // TODO(mythria): Build rest arguments array if it is used.
+ int rest_index;
+ if (scope()->rest_parameter(&rest_index)) {
+ UNIMPLEMENTED();
+ }
+
// Build assignment to {.this_function} variable if it is used.
VisitThisFunctionVariable(scope()->this_function_var());
@@ -374,6 +422,9 @@ void BytecodeGenerator::MakeBytecodeBody() {
void BytecodeGenerator::VisitBlock(Block* stmt) {
+ BlockBuilder block_builder(this->builder());
+ ControlScopeForBreakable execution_control(this, stmt, &block_builder);
+
if (stmt->scope() == NULL) {
// Visit statements in the same scope, no declarations.
VisitStatements(stmt->statements());
@@ -389,6 +440,7 @@ void BytecodeGenerator::VisitBlock(Block* stmt) {
VisitStatements(stmt->statements());
}
}
+ if (stmt->labels() != nullptr) block_builder.EndBlock();
}
@@ -479,6 +531,7 @@ void BytecodeGenerator::VisitExportDeclaration(ExportDeclaration* decl) {
void BytecodeGenerator::VisitDeclarations(
ZoneList<Declaration*>* declarations) {
+ RegisterAllocationScope register_scope(this);
DCHECK(globals()->empty());
AstVisitor::VisitDeclarations(declarations);
if (globals()->empty()) return;
@@ -490,12 +543,11 @@ void BytecodeGenerator::VisitDeclarations(
DeclareGlobalsNativeFlag::encode(info()->is_native()) |
DeclareGlobalsLanguageMode::encode(language_mode());
- TemporaryRegisterScope temporary_register_scope(builder());
- Register pairs = temporary_register_scope.NewRegister();
+ Register pairs = register_allocator()->NewRegister();
builder()->LoadLiteral(data);
builder()->StoreAccumulatorInRegister(pairs);
- Register flags = temporary_register_scope.NewRegister();
+ Register flags = register_allocator()->NewRegister();
builder()->LoadLiteral(Smi::FromInt(encoded_flags));
builder()->StoreAccumulatorInRegister(flags);
DCHECK(flags.index() == pairs.index() + 1);
@@ -505,9 +557,18 @@ void BytecodeGenerator::VisitDeclarations(
}
+void BytecodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
+ for (int i = 0; i < statements->length(); i++) {
+ // Allocate an outer register allocations scope for the statement.
+ RegisterAllocationScope allocation_scope(this);
+ Statement* stmt = statements->at(i);
+ Visit(stmt);
+ if (stmt->IsJump()) break;
+ }
+}
+
+
void BytecodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
- // TODO(rmcilroy): Replace this with a StatementResultScope when it exists.
- EffectResultScope effect_scope(this);
VisitForEffect(stmt->expression());
}
@@ -519,14 +580,17 @@ void BytecodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
void BytecodeGenerator::VisitIfStatement(IfStatement* stmt) {
BytecodeLabel else_label, end_label;
if (stmt->condition()->ToBooleanIsTrue()) {
- // Generate only then block.
+ // Generate then block unconditionally as always true.
Visit(stmt->then_statement());
} else if (stmt->condition()->ToBooleanIsFalse()) {
- // Generate only else block if it exists.
+ // Generate else block unconditionally if it exists.
if (stmt->HasElseStatement()) {
Visit(stmt->else_statement());
}
} else {
+ // TODO(oth): If then statement is BreakStatement or
+ // ContinueStatement we can reduce number of generated
+ // jump/jump_ifs here. See BasicLoops test.
VisitForAccumulatorValue(stmt->condition());
builder()->JumpIfFalse(&else_label);
Visit(stmt->then_statement());
@@ -559,7 +623,6 @@ void BytecodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
void BytecodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
- EffectResultScope effect_scope(this);
VisitForAccumulatorValue(stmt->expression());
builder()->Return();
}
@@ -571,9 +634,11 @@ void BytecodeGenerator::VisitWithStatement(WithStatement* stmt) {
void BytecodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+ // We need this scope because we visit for register values. We have to
+ // maintain a execution result scope where registers can be allocated.
ZoneList<CaseClause*>* clauses = stmt->cases();
SwitchBuilder switch_builder(builder(), clauses->length());
- ControlScopeForSwitch scope(this, stmt, &switch_builder);
+ ControlScopeForBreakable scope(this, stmt, &switch_builder);
int default_index = -1;
// Keep the switch value in a register until a case matches.
@@ -627,96 +692,70 @@ void BytecodeGenerator::VisitCaseClause(CaseClause* clause) {
void BytecodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
LoopBuilder loop_builder(builder());
ControlScopeForIteration execution_control(this, stmt, &loop_builder);
- BytecodeLabel body_label, condition_label, done_label;
-
+ loop_builder.LoopHeader();
if (stmt->cond()->ToBooleanIsFalse()) {
Visit(stmt->body());
- // Bind condition_label and done_label for processing continue and break.
- builder()->Bind(&condition_label);
- builder()->Bind(&done_label);
+ loop_builder.Condition();
+ } else if (stmt->cond()->ToBooleanIsTrue()) {
+ loop_builder.Condition();
+ Visit(stmt->body());
+ loop_builder.JumpToHeader();
} else {
- builder()->Bind(&body_label);
Visit(stmt->body());
-
- builder()->Bind(&condition_label);
- if (stmt->cond()->ToBooleanIsTrue()) {
- builder()->Jump(&body_label);
- } else {
- VisitForAccumulatorValue(stmt->cond());
- builder()->JumpIfTrue(&body_label);
- }
- builder()->Bind(&done_label);
+ loop_builder.Condition();
+ VisitForAccumulatorValue(stmt->cond());
+ loop_builder.JumpToHeaderIfTrue();
}
- loop_builder.SetBreakTarget(done_label);
- loop_builder.SetContinueTarget(condition_label);
+ loop_builder.EndLoop();
}
void BytecodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
- LoopBuilder loop_builder(builder());
- ControlScopeForIteration execution_control(this, stmt, &loop_builder);
-
- BytecodeLabel body_label, condition_label, done_label;
if (stmt->cond()->ToBooleanIsFalse()) {
- // If the condition is false there is no need to generating the loop.
+ // If the condition is false there is no need to generate the loop.
return;
}
+ LoopBuilder loop_builder(builder());
+ ControlScopeForIteration execution_control(this, stmt, &loop_builder);
+ loop_builder.LoopHeader();
+ loop_builder.Condition();
if (!stmt->cond()->ToBooleanIsTrue()) {
- builder()->Jump(&condition_label);
- }
- builder()->Bind(&body_label);
- Visit(stmt->body());
-
- builder()->Bind(&condition_label);
- if (stmt->cond()->ToBooleanIsTrue()) {
- builder()->Jump(&body_label);
- } else {
VisitForAccumulatorValue(stmt->cond());
- builder()->JumpIfTrue(&body_label);
+ loop_builder.BreakIfFalse();
}
- builder()->Bind(&done_label);
-
- loop_builder.SetBreakTarget(done_label);
- loop_builder.SetContinueTarget(condition_label);
+ Visit(stmt->body());
+ loop_builder.JumpToHeader();
+ loop_builder.EndLoop();
}
void BytecodeGenerator::VisitForStatement(ForStatement* stmt) {
- LoopBuilder loop_builder(builder());
- ControlScopeForIteration execution_control(this, stmt, &loop_builder);
-
if (stmt->init() != nullptr) {
Visit(stmt->init());
}
-
if (stmt->cond() && stmt->cond()->ToBooleanIsFalse()) {
// If the condition is known to be false there is no need to generate
// body, next or condition blocks. Init block should be generated.
return;
}
- BytecodeLabel body_label, condition_label, next_label, done_label;
+ LoopBuilder loop_builder(builder());
+ ControlScopeForIteration execution_control(this, stmt, &loop_builder);
+
+ loop_builder.LoopHeader();
+ loop_builder.Condition();
if (stmt->cond() && !stmt->cond()->ToBooleanIsTrue()) {
- builder()->Jump(&condition_label);
+ VisitForAccumulatorValue(stmt->cond());
+ loop_builder.BreakIfFalse();
}
- builder()->Bind(&body_label);
Visit(stmt->body());
- builder()->Bind(&next_label);
if (stmt->next() != nullptr) {
+ loop_builder.Next();
Visit(stmt->next());
}
- if (stmt->cond() && !stmt->cond()->ToBooleanIsTrue()) {
- builder()->Bind(&condition_label);
- VisitForAccumulatorValue(stmt->cond());
- builder()->JumpIfTrue(&body_label);
- } else {
- builder()->Jump(&body_label);
- }
- builder()->Bind(&done_label);
-
- loop_builder.SetBreakTarget(done_label);
- loop_builder.SetContinueTarget(next_label);
+ loop_builder.JumpToHeader();
+ loop_builder.EndLoop();
}
@@ -735,19 +774,19 @@ void BytecodeGenerator::VisitForInAssignment(Expression* expr,
break;
}
case NAMED_PROPERTY: {
- TemporaryRegisterScope temporary_register_scope(builder());
- Register value = temporary_register_scope.NewRegister();
+ RegisterAllocationScope register_scope(this);
+ Register value = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(value);
Register object = VisitForRegisterValue(property->obj());
- size_t name_index = builder()->GetConstantPoolEntry(
- property->key()->AsLiteral()->AsPropertyName());
- builder()->StoreNamedProperty(object, name_index, feedback_index(slot),
+ Handle<String> name = property->key()->AsLiteral()->AsPropertyName();
+ builder()->LoadAccumulatorWithRegister(value);
+ builder()->StoreNamedProperty(object, name, feedback_index(slot),
language_mode());
break;
}
case KEYED_PROPERTY: {
- TemporaryRegisterScope temporary_register_scope(builder());
- Register value = temporary_register_scope.NewRegister();
+ RegisterAllocationScope register_scope(this);
+ Register value = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(value);
Register object = VisitForRegisterValue(property->obj());
Register key = VisitForRegisterValue(property->key());
@@ -764,12 +803,6 @@ void BytecodeGenerator::VisitForInAssignment(Expression* expr,
void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
- // TODO(oth): For now we need a parent scope for paths that end up
- // in VisitLiteral which can allocate in the parent scope. A future
- // CL in preparation will add a StatementResultScope that will
- // remove the need for this EffectResultScope.
- EffectResultScope result_scope(this);
-
if (stmt->subject()->IsNullLiteral() ||
stmt->subject()->IsUndefinedLiteral(isolate())) {
// ForIn generates lots of code, skip if it wouldn't produce any effects.
@@ -778,58 +811,43 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
LoopBuilder loop_builder(builder());
ControlScopeForIteration control_scope(this, stmt, &loop_builder);
+ BytecodeLabel subject_null_label, subject_undefined_label, not_object_label;
// Prepare the state for executing ForIn.
VisitForAccumulatorValue(stmt->subject());
- loop_builder.BreakIfUndefined();
- loop_builder.BreakIfNull();
-
- Register receiver = execution_result()->NewRegister();
+ builder()->JumpIfUndefined(&subject_undefined_label);
+ builder()->JumpIfNull(&subject_null_label);
+ Register receiver = register_allocator()->NewRegister();
builder()->CastAccumulatorToJSObject();
+ builder()->JumpIfNull(&not_object_label);
builder()->StoreAccumulatorInRegister(receiver);
- builder()->CallRuntime(Runtime::kGetPropertyNamesFast, receiver, 1);
- builder()->ForInPrepare(receiver);
- loop_builder.BreakIfUndefined();
+ Register cache_type = register_allocator()->NewRegister();
+ Register cache_array = register_allocator()->NewRegister();
+ Register cache_length = register_allocator()->NewRegister();
+ builder()->ForInPrepare(cache_type, cache_array, cache_length);
- Register for_in_state = execution_result()->NewRegister();
- builder()->StoreAccumulatorInRegister(for_in_state);
-
- // The loop.
- BytecodeLabel condition_label, break_label, continue_label;
- Register index = receiver; // Re-using register as receiver no longer used.
+ // Set up loop counter
+ Register index = register_allocator()->NewRegister();
builder()->LoadLiteral(Smi::FromInt(0));
+ builder()->StoreAccumulatorInRegister(index);
- // Check loop termination (accumulator holds index).
- builder()
- ->Bind(&condition_label)
- .StoreAccumulatorInRegister(index)
- .ForInDone(for_in_state);
+ // The loop
+ loop_builder.LoopHeader();
+ loop_builder.Condition();
+ builder()->ForInDone(index, cache_length);
loop_builder.BreakIfTrue();
-
- // Get the next item.
- builder()->ForInNext(for_in_state, index);
-
- // Start again if the item, currently in the accumulator, is undefined.
+ builder()->ForInNext(receiver, cache_type, cache_array, index);
loop_builder.ContinueIfUndefined();
-
- // Store the value in the each variable.
VisitForInAssignment(stmt->each(), stmt->EachFeedbackSlot());
- // NB the user's loop variable will be assigned the value of each so
- // even an empty body will have this assignment.
Visit(stmt->body());
-
- // Increment the index and start loop again.
- builder()
- ->Bind(&continue_label)
- .LoadAccumulatorWithRegister(index)
- .CountOperation(Token::Value::ADD, language_mode_strength())
- .Jump(&condition_label);
-
- // End of the loop.
- builder()->Bind(&break_label);
-
- loop_builder.SetBreakTarget(break_label);
- loop_builder.SetContinueTarget(continue_label);
+ loop_builder.Next();
+ builder()->ForInStep(index);
+ builder()->StoreAccumulatorInRegister(index);
+ loop_builder.JumpToHeader();
+ loop_builder.EndLoop();
+ builder()->Bind(&not_object_label);
+ builder()->Bind(&subject_null_label);
+ builder()->Bind(&subject_undefined_label);
}
@@ -867,10 +885,8 @@ void BytecodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Handle<SharedFunctionInfo> shared_info =
Compiler::GetSharedFunctionInfo(expr, info()->script(), info());
CHECK(!shared_info.is_null()); // TODO(rmcilroy): Set stack overflow?
-
- builder()
- ->LoadLiteral(shared_info)
- .CreateClosure(expr->pretenure() ? TENURED : NOT_TENURED);
+ builder()->CreateClosure(shared_info,
+ expr->pretenure() ? TENURED : NOT_TENURED);
execution_result()->SetResultInAccumulator();
}
@@ -937,24 +953,17 @@ void BytecodeGenerator::VisitLiteral(Literal* expr) {
void BytecodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
// Materialize a regular expression literal.
- TemporaryRegisterScope temporary_register_scope(builder());
- Register flags = temporary_register_scope.NewRegister();
- builder()
- ->LoadLiteral(expr->flags())
- .StoreAccumulatorInRegister(flags)
- .LoadLiteral(expr->pattern())
- .CreateRegExpLiteral(expr->literal_index(), flags);
+ builder()->CreateRegExpLiteral(expr->pattern(), expr->literal_index(),
+ expr->flags());
execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// Deep-copy the literal boilerplate.
- builder()
- ->LoadLiteral(expr->constant_properties())
- .CreateObjectLiteral(expr->literal_index(), expr->ComputeFlags(true));
-
- TemporaryRegisterScope temporary_register_scope(builder());
+ builder()->CreateObjectLiteral(expr->constant_properties(),
+ expr->literal_index(),
+ expr->ComputeFlags(true));
Register literal;
// Store computed values into the literal.
@@ -962,17 +971,17 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
int property_index = 0;
AccessorTable accessor_table(zone());
for (; property_index < expr->properties()->length(); property_index++) {
- TemporaryRegisterScope inner_temporary_register_scope(builder());
ObjectLiteral::Property* property = expr->properties()->at(property_index);
if (property->is_computed_name()) break;
if (property->IsCompileTimeValue()) continue;
if (literal_in_accumulator) {
- literal = temporary_register_scope.NewRegister();
+ literal = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(literal);
literal_in_accumulator = false;
}
+ RegisterAllocationScope inner_register_scope(this);
Literal* literal_key = property->key()->AsLiteral();
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -985,26 +994,21 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// contains computed properties with an uninitialized value.
if (literal_key->value()->IsInternalizedString()) {
if (property->emit_store()) {
- size_t name_index =
- builder()->GetConstantPoolEntry(literal_key->AsPropertyName());
VisitForAccumulatorValue(property->value());
- builder()->StoreNamedProperty(literal, name_index,
- feedback_index(property->GetSlot(0)),
- language_mode());
+ builder()->StoreNamedProperty(
+ literal, literal_key->AsPropertyName(),
+ feedback_index(property->GetSlot(0)), language_mode());
} else {
VisitForEffect(property->value());
}
} else {
- inner_temporary_register_scope.PrepareForConsecutiveAllocations(3);
- Register key =
- inner_temporary_register_scope.NextConsecutiveRegister();
- Register value =
- inner_temporary_register_scope.NextConsecutiveRegister();
- Register language =
- inner_temporary_register_scope.NextConsecutiveRegister();
+ register_allocator()->PrepareForConsecutiveAllocations(3);
+ Register key = register_allocator()->NextConsecutiveRegister();
+ Register value = register_allocator()->NextConsecutiveRegister();
+ Register language = register_allocator()->NextConsecutiveRegister();
// TODO(oth): This is problematic - can't assume contiguous here.
- // literal is allocated in temporary_register_scope, whereas
- // key, value, language are in another.
+ // literal is allocated in outer register scope, whereas key, value,
+ // language are in another.
DCHECK(Register::AreContiguous(literal, key, value, language));
VisitForAccumulatorValue(property->key());
builder()->StoreAccumulatorInRegister(key);
@@ -1021,10 +1025,9 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
break;
}
case ObjectLiteral::Property::PROTOTYPE: {
- inner_temporary_register_scope.PrepareForConsecutiveAllocations(1);
+ register_allocator()->PrepareForConsecutiveAllocations(1);
DCHECK(property->emit_store());
- Register value =
- inner_temporary_register_scope.NextConsecutiveRegister();
+ Register value = register_allocator()->NextConsecutiveRegister();
DCHECK(Register::AreContiguous(literal, value));
VisitForAccumulatorValue(property->value());
builder()->StoreAccumulatorInRegister(value).CallRuntime(
@@ -1048,12 +1051,12 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// corresponding getters and setters.
for (AccessorTable::Iterator it = accessor_table.begin();
it != accessor_table.end(); ++it) {
- TemporaryRegisterScope inner_temporary_register_scope(builder());
- inner_temporary_register_scope.PrepareForConsecutiveAllocations(4);
- Register name = inner_temporary_register_scope.NextConsecutiveRegister();
- Register getter = inner_temporary_register_scope.NextConsecutiveRegister();
- Register setter = inner_temporary_register_scope.NextConsecutiveRegister();
- Register attr = inner_temporary_register_scope.NextConsecutiveRegister();
+ RegisterAllocationScope inner_register_scope(this);
+ register_allocator()->PrepareForConsecutiveAllocations(4);
+ Register name = register_allocator()->NextConsecutiveRegister();
+ Register getter = register_allocator()->NextConsecutiveRegister();
+ Register setter = register_allocator()->NextConsecutiveRegister();
+ Register attr = register_allocator()->NextConsecutiveRegister();
DCHECK(Register::AreContiguous(literal, name, getter, setter, attr));
VisitForAccumulatorValue(it->first);
builder()->StoreAccumulatorInRegister(name);
@@ -1075,19 +1078,17 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// compile them into a series of "SetOwnProperty" runtime calls. This will
// preserve insertion order.
for (; property_index < expr->properties()->length(); property_index++) {
- ObjectLiteral::Property* property = expr->properties()->at(property_index);
-
if (literal_in_accumulator) {
- temporary_register_scope.PrepareForConsecutiveAllocations(4);
- literal = temporary_register_scope.NextConsecutiveRegister();
+ literal = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(literal);
literal_in_accumulator = false;
}
+ ObjectLiteral::Property* property = expr->properties()->at(property_index);
+ RegisterAllocationScope inner_register_scope(this);
if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
DCHECK(property->emit_store());
- TemporaryRegisterScope inner_temporary_register_scope(builder());
- Register value = inner_temporary_register_scope.NewRegister();
+ Register value = register_allocator()->NewRegister();
DCHECK(Register::AreContiguous(literal, value));
VisitForAccumulatorValue(property->value());
builder()->StoreAccumulatorInRegister(value).CallRuntime(
@@ -1095,11 +1096,10 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
continue;
}
- TemporaryRegisterScope inner_temporary_register_scope(builder());
- inner_temporary_register_scope.PrepareForConsecutiveAllocations(3);
- Register key = inner_temporary_register_scope.NextConsecutiveRegister();
- Register value = inner_temporary_register_scope.NextConsecutiveRegister();
- Register attr = inner_temporary_register_scope.NextConsecutiveRegister();
+ register_allocator()->PrepareForConsecutiveAllocations(3);
+ Register key = register_allocator()->NextConsecutiveRegister();
+ Register value = register_allocator()->NextConsecutiveRegister();
+ Register attr = register_allocator()->NextConsecutiveRegister();
DCHECK(Register::AreContiguous(literal, key, value, attr));
VisitForAccumulatorValue(property->key());
@@ -1144,11 +1144,9 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Deep-copy the literal boilerplate.
- builder()
- ->LoadLiteral(expr->constant_elements())
- .CreateArrayLiteral(expr->literal_index(), expr->ComputeFlags(true));
-
- TemporaryRegisterScope temporary_register_scope(builder());
+ builder()->CreateArrayLiteral(expr->constant_elements(),
+ expr->literal_index(),
+ expr->ComputeFlags(true));
Register index, literal;
// Evaluate all the non-constant subexpressions and store them into the
@@ -1164,8 +1162,8 @@ void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
if (literal_in_accumulator) {
- index = temporary_register_scope.NewRegister();
- literal = temporary_register_scope.NewRegister();
+ index = register_allocator()->NewRegister();
+ literal = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(literal);
literal_in_accumulator = false;
}
@@ -1198,21 +1196,22 @@ void BytecodeGenerator::VisitVariableLoad(Variable* variable,
switch (variable->location()) {
case VariableLocation::LOCAL: {
Register source(Register(variable->index()));
- execution_result()->SetResultInRegister(source);
+ builder()->LoadAccumulatorWithRegister(source);
+ execution_result()->SetResultInAccumulator();
break;
}
case VariableLocation::PARAMETER: {
// The parameter indices are shifted by 1 (receiver is variable
// index -1 but is parameter index 0 in BytecodeArrayBuilder).
Register source = builder()->Parameter(variable->index() + 1);
- execution_result()->SetResultInRegister(source);
+ builder()->LoadAccumulatorWithRegister(source);
+ execution_result()->SetResultInAccumulator();
break;
}
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
- size_t name_index = builder()->GetConstantPoolEntry(variable->name());
- builder()->LoadGlobal(name_index, feedback_index(slot), language_mode(),
- typeof_mode);
+ builder()->LoadGlobal(variable->name(), feedback_index(slot),
+ language_mode(), typeof_mode);
execution_result()->SetResultInAccumulator();
break;
}
@@ -1223,10 +1222,12 @@ void BytecodeGenerator::VisitVariableLoad(Variable* variable,
if (context) {
context_reg = context->reg();
} else {
- context_reg = execution_result()->NewRegister();
+ context_reg = register_allocator()->NewRegister();
// Walk the context chain to find the context at the given depth.
// TODO(rmcilroy): Perform this work in a bytecode handler once we have
// a generic mechanism for performing jumps in interpreter.cc.
+ // TODO(mythria): Also update bytecode graph builder with correct depth
+ // when this changes.
builder()
->LoadAccumulatorWithRegister(execution_context()->reg())
.StoreAccumulatorInRegister(context_reg);
@@ -1242,8 +1243,11 @@ void BytecodeGenerator::VisitVariableLoad(Variable* variable,
// let variables.
break;
}
- case VariableLocation::LOOKUP:
- UNIMPLEMENTED();
+ case VariableLocation::LOOKUP: {
+ builder()->LoadLookupSlot(variable->name(), typeof_mode);
+ execution_result()->SetResultInAccumulator();
+ break;
+ }
}
}
@@ -1270,7 +1274,6 @@ void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
// TODO(rmcilroy): support const mode initialization.
Register destination(variable->index());
builder()->StoreAccumulatorInRegister(destination);
- RecordStoreToRegister(destination);
break;
}
case VariableLocation::PARAMETER: {
@@ -1278,13 +1281,12 @@ void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
// index -1 but is parameter index 0 in BytecodeArrayBuilder).
Register destination(builder()->Parameter(variable->index() + 1));
builder()->StoreAccumulatorInRegister(destination);
- RecordStoreToRegister(destination);
break;
}
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
- size_t name_index = builder()->GetConstantPoolEntry(variable->name());
- builder()->StoreGlobal(name_index, feedback_index(slot), language_mode());
+ builder()->StoreGlobal(variable->name(), feedback_index(slot),
+ language_mode());
break;
}
case VariableLocation::CONTEXT: {
@@ -1295,11 +1297,13 @@ void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
if (context) {
context_reg = context->reg();
} else {
- Register value_temp = execution_result()->NewRegister();
- context_reg = execution_result()->NewRegister();
+ Register value_temp = register_allocator()->NewRegister();
+ context_reg = register_allocator()->NewRegister();
// Walk the context chain to find the context at the given depth.
// TODO(rmcilroy): Perform this work in a bytecode handler once we have
// a generic mechanism for performing jumps in interpreter.cc.
+ // TODO(mythria): Also update bytecode graph builder with correct depth
+ // when this changes.
builder()
->StoreAccumulatorInRegister(value_temp)
.LoadAccumulatorWithRegister(execution_context()->reg())
@@ -1314,8 +1318,10 @@ void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
builder()->StoreContextSlot(context_reg, variable->index());
break;
}
- case VariableLocation::LOOKUP:
- UNIMPLEMENTED();
+ case VariableLocation::LOOKUP: {
+ builder()->StoreLookupSlot(variable->name(), language_mode());
+ break;
+ }
}
}
@@ -1323,7 +1329,7 @@ void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
void BytecodeGenerator::VisitAssignment(Assignment* expr) {
DCHECK(expr->target()->IsValidReferenceExpression());
Register object, key;
- size_t name_index = kMaxUInt32;
+ Handle<String> name;
// Left-hand side can only be a property, a global or a variable slot.
Property* property = expr->target()->AsProperty();
@@ -1336,8 +1342,7 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
break;
case NAMED_PROPERTY: {
object = VisitForRegisterValue(property->obj());
- name_index = builder()->GetConstantPoolEntry(
- property->key()->AsLiteral()->AsPropertyName());
+ name = property->key()->AsLiteral()->AsPropertyName();
break;
}
case KEYED_PROPERTY: {
@@ -1345,7 +1350,7 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
if (expr->is_compound()) {
// Use VisitForAccumulator and store to register so that the key is
// still in the accumulator for loading the old value below.
- key = execution_result()->NewRegister();
+ key = register_allocator()->NewRegister();
VisitForAccumulatorValue(property->key());
builder()->StoreAccumulatorInRegister(key);
} else {
@@ -1371,9 +1376,9 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
}
case NAMED_PROPERTY: {
FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
- old_value = execution_result()->NewRegister();
+ old_value = register_allocator()->NewRegister();
builder()
- ->LoadNamedProperty(object, name_index, feedback_index(slot),
+ ->LoadNamedProperty(object, name, feedback_index(slot),
language_mode())
.StoreAccumulatorInRegister(old_value);
break;
@@ -1382,7 +1387,7 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
// Key is already in accumulator at this point due to evaluating the
// LHS above.
FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
- old_value = execution_result()->NewRegister();
+ old_value = register_allocator()->NewRegister();
builder()
->LoadKeyedProperty(object, feedback_index(slot), language_mode())
.StoreAccumulatorInRegister(old_value);
@@ -1411,7 +1416,7 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
break;
}
case NAMED_PROPERTY:
- builder()->StoreNamedProperty(object, name_index, feedback_index(slot),
+ builder()->StoreNamedProperty(object, name, feedback_index(slot),
language_mode());
break;
case KEYED_PROPERTY:
@@ -1442,10 +1447,9 @@ void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* expr) {
case VARIABLE:
UNREACHABLE();
case NAMED_PROPERTY: {
- size_t name_index = builder()->GetConstantPoolEntry(
- expr->key()->AsLiteral()->AsPropertyName());
- builder()->LoadNamedProperty(obj, name_index, feedback_index(slot),
- language_mode());
+ builder()->LoadNamedProperty(obj,
+ expr->key()->AsLiteral()->AsPropertyName(),
+ feedback_index(slot), language_mode());
break;
}
case KEYED_PROPERTY: {
@@ -1489,16 +1493,16 @@ Register BytecodeGenerator::VisitArguments(ZoneList<Expression*>* args) {
// less calls to NextConsecutiveRegister(). Otherwise, the arguments
// here will be consecutive, but they will not be consecutive with
// earlier consecutive allocations made by the caller.
- execution_result()->PrepareForConsecutiveAllocations(args->length());
+ register_allocator()->PrepareForConsecutiveAllocations(args->length());
// Visit for first argument that goes into returned register
- Register first_arg = execution_result()->NextConsecutiveRegister();
+ Register first_arg = register_allocator()->NextConsecutiveRegister();
VisitForAccumulatorValue(args->at(0));
builder()->StoreAccumulatorInRegister(first_arg);
// Visit remaining arguments
for (int i = 1; i < static_cast<int>(args->length()); i++) {
- Register ith_arg = execution_result()->NextConsecutiveRegister();
+ Register ith_arg = register_allocator()->NextConsecutiveRegister();
VisitForAccumulatorValue(args->at(i));
builder()->StoreAccumulatorInRegister(ith_arg);
DCHECK(ith_arg.index() - i == first_arg.index());
@@ -1513,14 +1517,15 @@ void BytecodeGenerator::VisitCall(Call* expr) {
// Prepare the callee and the receiver to the function call. This depends on
// the semantics of the underlying call type.
- Register callee = execution_result()->NewRegister();
// The receiver and arguments need to be allocated consecutively for
- // Call(). Future optimizations could avoid this there are no
+ // Call(). We allocate the callee and receiver consecutively for calls to
+ // kLoadLookupSlot. Future optimizations could avoid this there are no
// arguments or the receiver and arguments are already consecutive.
ZoneList<Expression*>* args = expr->arguments();
- execution_result()->PrepareForConsecutiveAllocations(args->length() + 1);
- Register receiver = execution_result()->NextConsecutiveRegister();
+ register_allocator()->PrepareForConsecutiveAllocations(args->length() + 2);
+ Register callee = register_allocator()->NextConsecutiveRegister();
+ Register receiver = register_allocator()->NextConsecutiveRegister();
switch (call_type) {
case Call::NAMED_PROPERTY_CALL:
@@ -1542,6 +1547,27 @@ void BytecodeGenerator::VisitCall(Call* expr) {
builder()->StoreAccumulatorInRegister(callee);
break;
}
+ case Call::LOOKUP_SLOT_CALL:
+ case Call::POSSIBLY_EVAL_CALL: {
+ if (callee_expr->AsVariableProxy()->var()->IsLookupSlot()) {
+ RegisterAllocationScope inner_register_scope(this);
+ register_allocator()->PrepareForConsecutiveAllocations(2);
+ Register context = register_allocator()->NextConsecutiveRegister();
+ Register name = register_allocator()->NextConsecutiveRegister();
+
+ // Call LoadLookupSlot to get the callee and receiver.
+ DCHECK(Register::AreContiguous(callee, receiver));
+ Variable* variable = callee_expr->AsVariableProxy()->var();
+ builder()
+ ->MoveRegister(Register::function_context(), context)
+ .LoadLiteral(variable->name())
+ .StoreAccumulatorInRegister(name)
+ .CallRuntimeForPair(Runtime::kLoadLookupSlot, context, 2, callee);
+ break;
+ }
+ // Fall through.
+ DCHECK_EQ(call_type, Call::POSSIBLY_EVAL_CALL);
+ }
case Call::OTHER_CALL: {
builder()->LoadUndefined().StoreAccumulatorInRegister(receiver);
VisitForAccumulatorValue(callee_expr);
@@ -1550,9 +1576,7 @@ void BytecodeGenerator::VisitCall(Call* expr) {
}
case Call::NAMED_SUPER_PROPERTY_CALL:
case Call::KEYED_SUPER_PROPERTY_CALL:
- case Call::LOOKUP_SLOT_CALL:
case Call::SUPER_CALL:
- case Call::POSSIBLY_EVAL_CALL:
UNIMPLEMENTED();
}
@@ -1561,15 +1585,45 @@ void BytecodeGenerator::VisitCall(Call* expr) {
Register arg = VisitArguments(args);
CHECK(args->length() == 0 || arg.index() == receiver.index() + 1);
- // TODO(rmcilroy): Deal with possible direct eval here?
+ // Resolve callee for a potential direct eval call. This block will mutate the
+ // callee value.
+ if (call_type == Call::POSSIBLY_EVAL_CALL && args->length() > 0) {
+ RegisterAllocationScope inner_register_scope(this);
+ register_allocator()->PrepareForConsecutiveAllocations(5);
+ Register callee_for_eval = register_allocator()->NextConsecutiveRegister();
+ Register source = register_allocator()->NextConsecutiveRegister();
+ Register function = register_allocator()->NextConsecutiveRegister();
+ Register language = register_allocator()->NextConsecutiveRegister();
+ Register position = register_allocator()->NextConsecutiveRegister();
+
+ // Set up arguments for ResolvePossiblyDirectEval by copying callee, source
+ // strings and function closure, and loading language and
+ // position.
+ builder()
+ ->MoveRegister(callee, callee_for_eval)
+ .MoveRegister(arg, source)
+ .MoveRegister(Register::function_closure(), function)
+ .LoadLiteral(Smi::FromInt(language_mode()))
+ .StoreAccumulatorInRegister(language)
+ .LoadLiteral(
+ Smi::FromInt(execution_context()->scope()->start_position()))
+ .StoreAccumulatorInRegister(position);
+
+ // Call ResolvePossiblyDirectEval and modify the callee.
+ builder()
+ ->CallRuntime(Runtime::kResolvePossiblyDirectEval, callee_for_eval, 5)
+ .StoreAccumulatorInRegister(callee);
+ }
+
// TODO(rmcilroy): Use CallIC to allow call type feedback.
- builder()->Call(callee, receiver, args->length());
+ builder()->Call(callee, receiver, args->length(),
+ feedback_index(expr->CallFeedbackICSlot()));
execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitCallNew(CallNew* expr) {
- Register constructor = execution_result()->NewRegister();
+ Register constructor = register_allocator()->NewRegister();
VisitForAccumulatorValue(expr->expression());
builder()->StoreAccumulatorInRegister(constructor);
@@ -1585,8 +1639,8 @@ void BytecodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Register receiver;
if (expr->is_jsruntime()) {
// Allocate a register for the receiver and load it with undefined.
- execution_result()->PrepareForConsecutiveAllocations(args->length() + 1);
- receiver = execution_result()->NextConsecutiveRegister();
+ register_allocator()->PrepareForConsecutiveAllocations(args->length() + 1);
+ receiver = register_allocator()->NextConsecutiveRegister();
builder()->LoadUndefined().StoreAccumulatorInRegister(receiver);
}
// Evaluate all arguments to the runtime call.
@@ -1596,8 +1650,6 @@ void BytecodeGenerator::VisitCallRuntime(CallRuntime* expr) {
DCHECK(args->length() == 0 || first_arg.index() == receiver.index() + 1);
builder()->CallJSRuntime(expr->context_index(), receiver, args->length());
} else {
- // TODO(rmcilroy): support multiple return values.
- DCHECK_LE(expr->function()->result_size, 1);
Runtime::FunctionId function_id = expr->function()->function_id;
builder()->CallRuntime(function_id, first_arg, args->length());
}
@@ -1678,10 +1730,13 @@ void BytecodeGenerator::VisitDelete(UnaryOperation* expr) {
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
// Global var, let, const or variables not explicitly declared.
- Register global_object = execution_result()->NewRegister();
+ Register native_context = register_allocator()->NewRegister();
+ Register global_object = register_allocator()->NewRegister();
builder()
->LoadContextSlot(execution_context()->reg(),
- Context::GLOBAL_OBJECT_INDEX)
+ Context::NATIVE_CONTEXT_INDEX)
+ .StoreAccumulatorInRegister(native_context)
+ .LoadContextSlot(native_context, Context::EXTENSION_INDEX)
.StoreAccumulatorInRegister(global_object)
.LoadLiteral(variable->name())
.Delete(global_object, language_mode());
@@ -1700,7 +1755,7 @@ void BytecodeGenerator::VisitDelete(UnaryOperation* expr) {
break;
}
case VariableLocation::LOOKUP: {
- UNIMPLEMENTED();
+ builder()->LoadLiteral(variable->name()).DeleteLookupSlot();
break;
}
default:
@@ -1727,7 +1782,7 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
// Evaluate LHS expression and get old value.
Register obj, key, old_value;
- size_t name_index = kMaxUInt32;
+ Handle<String> name;
switch (assign_type) {
case VARIABLE: {
VariableProxy* proxy = expr->expression()->AsVariableProxy();
@@ -1738,9 +1793,8 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
obj = VisitForRegisterValue(property->obj());
- name_index = builder()->GetConstantPoolEntry(
- property->key()->AsLiteral()->AsPropertyName());
- builder()->LoadNamedProperty(obj, name_index, feedback_index(slot),
+ name = property->key()->AsLiteral()->AsPropertyName();
+ builder()->LoadNamedProperty(obj, name, feedback_index(slot),
language_mode());
break;
}
@@ -1749,7 +1803,7 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
obj = VisitForRegisterValue(property->obj());
// Use visit for accumulator here since we need the key in the accumulator
// for the LoadKeyedProperty.
- key = execution_result()->NewRegister();
+ key = register_allocator()->NewRegister();
VisitForAccumulatorValue(property->key());
builder()->StoreAccumulatorInRegister(key).LoadKeyedProperty(
obj, feedback_index(slot), language_mode());
@@ -1767,7 +1821,7 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
// Save result for postfix expressions.
if (is_postfix) {
- old_value = execution_result()->outer()->NewRegister();
+ old_value = register_allocator()->outer()->NewRegister();
builder()->StoreAccumulatorInRegister(old_value);
}
@@ -1783,8 +1837,8 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
case NAMED_PROPERTY: {
- builder()->StoreNamedProperty(
- obj, name_index, feedback_index(feedback_slot), language_mode());
+ builder()->StoreNamedProperty(obj, name, feedback_index(feedback_slot),
+ language_mode());
break;
}
case KEYED_PROPERTY: {
@@ -1825,37 +1879,17 @@ void BytecodeGenerator::VisitBinaryOperation(BinaryOperation* binop) {
void BytecodeGenerator::VisitCompareOperation(CompareOperation* expr) {
- // TODO(oth): Remove PrepareForBinaryExpression/CompleteBinaryExpression
- // once we have StatementScope that tracks hazardous loads/stores.
- PrepareForBinaryExpression();
Register lhs = VisitForRegisterValue(expr->left());
- if (builder()->RegisterIsParameterOrLocal(lhs)) {
- // Result was returned in an existing local or parameter. See if
- // it needs to be moved to a temporary.
- // TODO(oth) LoadFromAliasedRegister call into VisitVariableLoad().
- lhs = LoadFromAliasedRegister(lhs);
- }
VisitForAccumulatorValue(expr->right());
builder()->CompareOperation(expr->op(), lhs, language_mode_strength());
- CompleteBinaryExpression();
execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
- // TODO(oth): Remove PrepareForBinaryExpression/CompleteBinaryExpression
- // once we have StatementScope that tracks hazardous loads/stores.
- PrepareForBinaryExpression();
Register lhs = VisitForRegisterValue(expr->left());
- if (builder()->RegisterIsParameterOrLocal(lhs)) {
- // Result was returned in an existing local or parameter. See if
- // it needs to be moved to a temporary.
- // TODO(oth) LoadFromAliasedRegister call into VisitVariableLoad().
- lhs = LoadFromAliasedRegister(lhs);
- }
VisitForAccumulatorValue(expr->right());
builder()->BinaryOperation(expr->op(), lhs, language_mode_strength());
- CompleteBinaryExpression();
execution_result()->SetResultInAccumulator();
}
@@ -1928,15 +1962,21 @@ void BytecodeGenerator::VisitLogicalAndExpression(BinaryOperation* binop) {
}
+void BytecodeGenerator::VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* expr) {
+ Visit(expr->expression());
+}
+
+
void BytecodeGenerator::VisitNewLocalFunctionContext() {
AccumulatorResultScope accumulator_execution_result(this);
Scope* scope = this->scope();
// Allocate a new local context.
if (scope->is_script_scope()) {
- TemporaryRegisterScope temporary_register_scope(builder());
- Register closure = temporary_register_scope.NewRegister();
- Register scope_info = temporary_register_scope.NewRegister();
+ RegisterAllocationScope register_scope(this);
+ Register closure = register_allocator()->NewRegister();
+ Register scope_info = register_allocator()->NewRegister();
DCHECK(Register::AreContiguous(closure, scope_info));
builder()
->LoadAccumulatorWithRegister(Register::function_closure())
@@ -1956,7 +1996,12 @@ void BytecodeGenerator::VisitBuildLocalActivationContext() {
Scope* scope = this->scope();
if (scope->has_this_declaration() && scope->receiver()->IsContextSlot()) {
- UNIMPLEMENTED();
+ Variable* variable = scope->receiver();
+ Register receiver(builder()->Parameter(0));
+ // Context variable (at bottom of the context chain).
+ DCHECK_EQ(0, scope->ContextChainLength(variable->scope()));
+ builder()->LoadAccumulatorWithRegister(receiver).StoreContextSlot(
+ execution_context()->reg(), variable->index());
}
// Copy parameters into context if necessary.
@@ -1981,10 +2026,10 @@ void BytecodeGenerator::VisitNewLocalBlockContext(Scope* scope) {
DCHECK(scope->is_block_scope());
// Allocate a new local block context.
- TemporaryRegisterScope temporary_register_scope(builder());
- Register scope_info = temporary_register_scope.NewRegister();
- Register closure = temporary_register_scope.NewRegister();
- DCHECK(Register::AreContiguous(scope_info, closure));
+ register_allocator()->PrepareForConsecutiveAllocations(2);
+ Register scope_info = register_allocator()->NextConsecutiveRegister();
+ Register closure = register_allocator()->NextConsecutiveRegister();
+
builder()
->LoadLiteral(scope->GetScopeInfo(isolate()))
.StoreAccumulatorInRegister(scope_info);
@@ -2041,7 +2086,7 @@ void BytecodeGenerator::VisitThisFunctionVariable(Variable* variable) {
// TODO(rmcilroy): Remove once we have tests which exercise this code path.
UNIMPLEMENTED();
- // Store the closure we were called with in the this_function_var.
+ // Store the closure we were called with in the given variable.
builder()->LoadAccumulatorWithRegister(Register::function_closure());
VisitVariableAssignment(variable, FeedbackVectorSlot::Invalid());
}
@@ -2050,8 +2095,8 @@ void BytecodeGenerator::VisitThisFunctionVariable(Variable* variable) {
void BytecodeGenerator::VisitNewTargetVariable(Variable* variable) {
if (variable == nullptr) return;
- // Store the closure we were called with in the this_function_var.
- builder()->CallRuntime(Runtime::kGetOriginalConstructor, Register(), 0);
+ // Store the new target we were called with in the given variable.
+ builder()->LoadAccumulatorWithRegister(Register::new_target());
VisitVariableAssignment(variable, FeedbackVectorSlot::Invalid());
}
@@ -2063,8 +2108,12 @@ void BytecodeGenerator::VisitFunctionClosureForContext() {
closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function as
// their closure, not the anonymous closure containing the global code.
- // Pass a SMI sentinel and let the runtime look up the empty function.
- builder()->LoadLiteral(Smi::FromInt(0));
+ Register native_context = register_allocator()->NewRegister();
+ builder()
+ ->LoadContextSlot(execution_context()->reg(),
+ Context::NATIVE_CONTEXT_INDEX)
+ .StoreAccumulatorInRegister(native_context)
+ .LoadContextSlot(native_context, Context::CLOSURE_INDEX);
} else {
DCHECK(closure_scope->is_function_scope());
builder()->LoadAccumulatorWithRegister(Register::function_closure());
@@ -2073,13 +2122,6 @@ void BytecodeGenerator::VisitFunctionClosureForContext() {
}
-void BytecodeGenerator::PrepareForBinaryExpression() {
- if (binary_expression_depth_++ == 0) {
- binary_expression_hazard_set_.clear();
- }
-}
-
-
// Visits the expression |expr| and places the result in the accumulator.
void BytecodeGenerator::VisitForAccumulatorValue(Expression* expr) {
AccumulatorResultScope accumulator_scope(this);
@@ -2103,35 +2145,6 @@ Register BytecodeGenerator::VisitForRegisterValue(Expression* expr) {
}
-Register BytecodeGenerator::LoadFromAliasedRegister(Register reg) {
- // TODO(oth): Follow on CL to load from re-map here.
- DCHECK(builder()->RegisterIsParameterOrLocal(reg));
- if (binary_expression_depth_ > 0) {
- binary_expression_hazard_set_.insert(reg.index());
- }
- return reg;
-}
-
-
-void BytecodeGenerator::RecordStoreToRegister(Register reg) {
- DCHECK(builder()->RegisterIsParameterOrLocal(reg));
- if (binary_expression_depth_ > 0) {
- // TODO(oth): a store to a register that's be loaded needs to be
- // remapped.
- DCHECK(binary_expression_hazard_set_.find(reg.index()) ==
- binary_expression_hazard_set_.end());
- }
-}
-
-
-void BytecodeGenerator::CompleteBinaryExpression() {
- DCHECK(binary_expression_depth_ > 0);
- binary_expression_depth_ -= 1;
- // TODO(oth): spill remapped registers into origins.
- // TODO(oth): make statement/top-level.
-}
-
-
Register BytecodeGenerator::NextContextRegister() const {
if (execution_context() == nullptr) {
// Return the incoming function context for the outermost execution context.
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index 7284cfe9e1..8bda7be301 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -5,7 +5,7 @@
#ifndef V8_INTERPRETER_BYTECODE_GENERATOR_H_
#define V8_INTERPRETER_BYTECODE_GENERATOR_H_
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecodes.h"
@@ -13,10 +13,9 @@ namespace v8 {
namespace internal {
namespace interpreter {
-class BytecodeGenerator : public AstVisitor {
+class BytecodeGenerator final : public AstVisitor {
public:
BytecodeGenerator(Isolate* isolate, Zone* zone);
- virtual ~BytecodeGenerator();
Handle<BytecodeArray> MakeBytecode(CompilationInfo* info);
@@ -24,18 +23,20 @@ class BytecodeGenerator : public AstVisitor {
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
- // Visiting function for declarations list is overridden.
+ // Visiting function for declarations list and statements are overridden.
void VisitDeclarations(ZoneList<Declaration*>* declarations) override;
+ void VisitStatements(ZoneList<Statement*>* statments) override;
private:
class ContextScope;
class ControlScope;
+ class ControlScopeForBreakable;
class ControlScopeForIteration;
- class ControlScopeForSwitch;
class ExpressionResultScope;
class EffectResultScope;
class AccumulatorResultScope;
class RegisterResultScope;
+ class RegisterAllocationScope;
void MakeBytecodeBody();
Register NextContextRegister() const;
@@ -54,6 +55,9 @@ class BytecodeGenerator : public AstVisitor {
void VisitNot(UnaryOperation* expr);
void VisitDelete(UnaryOperation* expr);
+ // Used by flow control routines to evaluate loop condition.
+ void VisitCondition(Expression* expr);
+
// Helper visitors which perform common operations.
Register VisitArguments(ZoneList<Expression*>* arguments);
@@ -84,17 +88,12 @@ class BytecodeGenerator : public AstVisitor {
Register value_out);
void VisitForInAssignment(Expression* expr, FeedbackVectorSlot slot);
-
// Visitors for obtaining expression result in the accumulator, in a
// register, or just getting the effect.
void VisitForAccumulatorValue(Expression* expression);
MUST_USE_RESULT Register VisitForRegisterValue(Expression* expression);
void VisitForEffect(Expression* node);
- // Methods marking the start and end of binary expressions.
- void PrepareForBinaryExpression();
- void CompleteBinaryExpression();
-
// Methods for tracking and remapping register.
void RecordStoreToRegister(Register reg);
Register LoadFromAliasedRegister(Register reg);
@@ -121,6 +120,13 @@ class BytecodeGenerator : public AstVisitor {
execution_result_ = execution_result;
}
ExpressionResultScope* execution_result() const { return execution_result_; }
+ inline void set_register_allocator(
+ RegisterAllocationScope* register_allocator) {
+ register_allocator_ = register_allocator;
+ }
+ RegisterAllocationScope* register_allocator() const {
+ return register_allocator_;
+ }
ZoneVector<Handle<Object>>* globals() { return &globals_; }
inline LanguageMode language_mode() const;
@@ -136,9 +142,7 @@ class BytecodeGenerator : public AstVisitor {
ControlScope* execution_control_;
ContextScope* execution_context_;
ExpressionResultScope* execution_result_;
-
- int binary_expression_depth_;
- ZoneSet<int> binary_expression_hazard_set_;
+ RegisterAllocationScope* register_allocator_;
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-register-allocator.cc b/deps/v8/src/interpreter/bytecode-register-allocator.cc
new file mode 100644
index 0000000000..4efb612db5
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-register-allocator.cc
@@ -0,0 +1,72 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-register-allocator.h"
+
+#include "src/interpreter/bytecode-array-builder.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+BytecodeRegisterAllocator::BytecodeRegisterAllocator(
+ BytecodeArrayBuilder* builder)
+ : builder_(builder),
+ allocated_(builder->zone()),
+ next_consecutive_register_(-1),
+ next_consecutive_count_(-1) {}
+
+
+BytecodeRegisterAllocator::~BytecodeRegisterAllocator() {
+ for (auto i = allocated_.rbegin(); i != allocated_.rend(); i++) {
+ builder_->ReturnTemporaryRegister(*i);
+ }
+ allocated_.clear();
+}
+
+
+Register BytecodeRegisterAllocator::NewRegister() {
+ int allocated = -1;
+ if (next_consecutive_count_ <= 0) {
+ allocated = builder_->BorrowTemporaryRegister();
+ } else {
+ allocated = builder_->BorrowTemporaryRegisterNotInRange(
+ next_consecutive_register_,
+ next_consecutive_register_ + next_consecutive_count_ - 1);
+ }
+ allocated_.push_back(allocated);
+ return Register(allocated);
+}
+
+
+bool BytecodeRegisterAllocator::RegisterIsAllocatedInThisScope(
+ Register reg) const {
+ for (auto i = allocated_.begin(); i != allocated_.end(); i++) {
+ if (*i == reg.index()) return true;
+ }
+ return false;
+}
+
+
+void BytecodeRegisterAllocator::PrepareForConsecutiveAllocations(size_t count) {
+ if (static_cast<int>(count) > next_consecutive_count_) {
+ next_consecutive_register_ =
+ builder_->PrepareForConsecutiveTemporaryRegisters(count);
+ next_consecutive_count_ = static_cast<int>(count);
+ }
+}
+
+
+Register BytecodeRegisterAllocator::NextConsecutiveRegister() {
+ DCHECK_GE(next_consecutive_register_, 0);
+ DCHECK_GT(next_consecutive_count_, 0);
+ builder_->BorrowConsecutiveTemporaryRegister(next_consecutive_register_);
+ allocated_.push_back(next_consecutive_register_);
+ next_consecutive_count_--;
+ return Register(next_consecutive_register_++);
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-register-allocator.h b/deps/v8/src/interpreter/bytecode-register-allocator.h
new file mode 100644
index 0000000000..74ab3a4272
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-register-allocator.h
@@ -0,0 +1,49 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_REGISTER_ALLOCATOR_H_
+#define V8_INTERPRETER_BYTECODE_REGISTER_ALLOCATOR_H_
+
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class BytecodeArrayBuilder;
+class Register;
+
+// A class than allows the instantiator to allocate temporary registers that are
+// cleaned up when scope is closed.
+class BytecodeRegisterAllocator {
+ public:
+ explicit BytecodeRegisterAllocator(BytecodeArrayBuilder* builder);
+ ~BytecodeRegisterAllocator();
+ Register NewRegister();
+
+ void PrepareForConsecutiveAllocations(size_t count);
+ Register NextConsecutiveRegister();
+
+ bool RegisterIsAllocatedInThisScope(Register reg) const;
+
+ bool HasConsecutiveAllocations() const { return next_consecutive_count_ > 0; }
+
+ private:
+ void* operator new(size_t size);
+ void operator delete(void* p);
+
+ BytecodeArrayBuilder* builder_;
+ ZoneVector<int> allocated_;
+ int next_consecutive_register_;
+ int next_consecutive_count_;
+
+ DISALLOW_COPY_AND_ASSIGN(BytecodeRegisterAllocator);
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+
+#endif // V8_INTERPRETER_BYTECODE_REGISTER_ALLOCATOR_H_
diff --git a/deps/v8/src/interpreter/bytecodes.cc b/deps/v8/src/interpreter/bytecodes.cc
index df2a1dd4f1..2d4406cc1b 100644
--- a/deps/v8/src/interpreter/bytecodes.cc
+++ b/deps/v8/src/interpreter/bytecodes.cc
@@ -159,8 +159,8 @@ OperandSize Bytecodes::SizeOfOperand(OperandType operand_type) {
// static
-bool Bytecodes::IsJump(Bytecode bytecode) {
- return bytecode == Bytecode::kJump || bytecode == Bytecode::kJumpIfTrue ||
+bool Bytecodes::IsConditionalJumpImmediate(Bytecode bytecode) {
+ return bytecode == Bytecode::kJumpIfTrue ||
bytecode == Bytecode::kJumpIfFalse ||
bytecode == Bytecode::kJumpIfToBooleanTrue ||
bytecode == Bytecode::kJumpIfToBooleanFalse ||
@@ -170,18 +170,69 @@ bool Bytecodes::IsJump(Bytecode bytecode) {
// static
-bool Bytecodes::IsJumpConstant(Bytecode bytecode) {
- return bytecode == Bytecode::kJumpConstant ||
- bytecode == Bytecode::kJumpIfTrueConstant ||
+bool Bytecodes::IsConditionalJumpConstant(Bytecode bytecode) {
+ return bytecode == Bytecode::kJumpIfTrueConstant ||
bytecode == Bytecode::kJumpIfFalseConstant ||
bytecode == Bytecode::kJumpIfToBooleanTrueConstant ||
bytecode == Bytecode::kJumpIfToBooleanFalseConstant ||
- bytecode == Bytecode::kJumpIfNull ||
+ bytecode == Bytecode::kJumpIfNullConstant ||
bytecode == Bytecode::kJumpIfUndefinedConstant;
}
// static
+bool Bytecodes::IsConditionalJumpConstantWide(Bytecode bytecode) {
+ return bytecode == Bytecode::kJumpIfTrueConstantWide ||
+ bytecode == Bytecode::kJumpIfFalseConstantWide ||
+ bytecode == Bytecode::kJumpIfToBooleanTrueConstantWide ||
+ bytecode == Bytecode::kJumpIfToBooleanFalseConstantWide ||
+ bytecode == Bytecode::kJumpIfNullConstantWide ||
+ bytecode == Bytecode::kJumpIfUndefinedConstantWide;
+}
+
+
+// static
+bool Bytecodes::IsConditionalJump(Bytecode bytecode) {
+ return IsConditionalJumpImmediate(bytecode) ||
+ IsConditionalJumpConstant(bytecode) ||
+ IsConditionalJumpConstantWide(bytecode);
+}
+
+
+// static
+bool Bytecodes::IsJumpImmediate(Bytecode bytecode) {
+ return bytecode == Bytecode::kJump || IsConditionalJumpImmediate(bytecode);
+}
+
+
+// static
+bool Bytecodes::IsJumpConstant(Bytecode bytecode) {
+ return bytecode == Bytecode::kJumpConstant ||
+ IsConditionalJumpConstant(bytecode);
+}
+
+
+// static
+bool Bytecodes::IsJumpConstantWide(Bytecode bytecode) {
+ return bytecode == Bytecode::kJumpConstantWide ||
+ IsConditionalJumpConstantWide(bytecode);
+}
+
+
+// static
+bool Bytecodes::IsJump(Bytecode bytecode) {
+ return IsJumpImmediate(bytecode) || IsJumpConstant(bytecode) ||
+ IsJumpConstantWide(bytecode);
+}
+
+
+// static
+bool Bytecodes::IsJumpOrReturn(Bytecode bytecode) {
+ return bytecode == Bytecode::kReturn || IsJump(bytecode);
+}
+
+
+// static
std::ostream& Bytecodes::Decode(std::ostream& os, const uint8_t* bytecode_start,
int parameter_count) {
Vector<char> buf = Vector<char>::New(50);
@@ -209,13 +260,15 @@ std::ostream& Bytecodes::Decode(std::ostream& os, const uint8_t* bytecode_start,
case interpreter::OperandType::kCount8:
os << "#" << static_cast<unsigned int>(*operand_start);
break;
+ case interpreter::OperandType::kCount16:
+ os << '#' << ReadUnalignedUInt16(operand_start);
+ break;
case interpreter::OperandType::kIdx8:
os << "[" << static_cast<unsigned int>(*operand_start) << "]";
break;
- case interpreter::OperandType::kIdx16: {
+ case interpreter::OperandType::kIdx16:
os << "[" << ReadUnalignedUInt16(operand_start) << "]";
break;
- }
case interpreter::OperandType::kImm8:
os << "#" << static_cast<int>(static_cast<int8_t>(*operand_start));
break;
@@ -226,6 +279,8 @@ std::ostream& Bytecodes::Decode(std::ostream& os, const uint8_t* bytecode_start,
os << "<context>";
} else if (reg.is_function_closure()) {
os << "<closure>";
+ } else if (reg.is_new_target()) {
+ os << "<new.target>";
} else if (reg.is_parameter()) {
int parameter_index = reg.ToParameterIndex(parameter_count);
if (parameter_index == 0) {
@@ -238,6 +293,29 @@ std::ostream& Bytecodes::Decode(std::ostream& os, const uint8_t* bytecode_start,
}
break;
}
+ case interpreter::OperandType::kRegPair8: {
+ Register reg = Register::FromOperand(*operand_start);
+ if (reg.is_parameter()) {
+ int parameter_index = reg.ToParameterIndex(parameter_count);
+ DCHECK_NE(parameter_index, 0);
+ os << "a" << parameter_index - 1 << "-" << parameter_index;
+ } else {
+ os << "r" << reg.index() << "-" << reg.index() + 1;
+ }
+ break;
+ }
+ case interpreter::OperandType::kReg16: {
+ Register reg =
+ Register::FromWideOperand(ReadUnalignedUInt16(operand_start));
+ if (reg.is_parameter()) {
+ int parameter_index = reg.ToParameterIndex(parameter_count);
+ DCHECK_NE(parameter_index, 0);
+ os << "a" << parameter_index - 1;
+ } else {
+ os << "r" << reg.index();
+ }
+ break;
+ }
case interpreter::OperandType::kNone:
UNREACHABLE();
break;
@@ -271,6 +349,8 @@ static const int kFunctionClosureRegisterIndex =
-InterpreterFrameConstants::kFunctionFromRegisterPointer / kPointerSize;
static const int kFunctionContextRegisterIndex =
-InterpreterFrameConstants::kContextFromRegisterPointer / kPointerSize;
+static const int kNewTargetRegisterIndex =
+ -InterpreterFrameConstants::kNewTargetFromRegisterPointer / kPointerSize;
// Registers occupy range 0-127 in 8-bit value leaving 128 unused values.
@@ -285,7 +365,7 @@ Register Register::FromParameterIndex(int index, int parameter_count) {
DCHECK_LE(parameter_count, kMaxParameterIndex + 1);
int register_index = kLastParamRegisterIndex - parameter_count + index + 1;
DCHECK_LT(register_index, 0);
- DCHECK_GE(register_index, Register::kMinRegisterIndex);
+ DCHECK_GE(register_index, kMinInt8);
return Register(register_index);
}
@@ -316,10 +396,22 @@ bool Register::is_function_context() const {
}
+Register Register::new_target() { return Register(kNewTargetRegisterIndex); }
+
+
+bool Register::is_new_target() const {
+ return index() == kNewTargetRegisterIndex;
+}
+
+
int Register::MaxParameterIndex() { return kMaxParameterIndex; }
-uint8_t Register::ToOperand() const { return static_cast<uint8_t>(-index_); }
+uint8_t Register::ToOperand() const {
+ DCHECK_GE(index_, kMinInt8);
+ DCHECK_LE(index_, kMaxInt8);
+ return static_cast<uint8_t>(-index_);
+}
Register Register::FromOperand(uint8_t operand) {
@@ -327,6 +419,18 @@ Register Register::FromOperand(uint8_t operand) {
}
+uint16_t Register::ToWideOperand() const {
+ DCHECK_GE(index_, kMinInt16);
+ DCHECK_LE(index_, kMaxInt16);
+ return static_cast<uint16_t>(-index_);
+}
+
+
+Register Register::FromWideOperand(uint16_t operand) {
+ return Register(-static_cast<int16_t>(operand));
+}
+
+
bool Register::AreContiguous(Register reg1, Register reg2, Register reg3,
Register reg4, Register reg5) {
if (reg1.index() + 1 != reg2.index()) {
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index 8eaf920d1b..a9beb6c918 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -25,11 +25,14 @@ namespace interpreter {
V(Count8, OperandSize::kByte) \
V(Imm8, OperandSize::kByte) \
V(Idx8, OperandSize::kByte) \
- V(Reg8, OperandSize::kByte) \
V(MaybeReg8, OperandSize::kByte) \
+ V(Reg8, OperandSize::kByte) \
+ V(RegPair8, OperandSize::kByte) \
\
/* Short operands. */ \
- V(Idx16, OperandSize::kShort)
+ V(Count16, OperandSize::kShort) \
+ V(Idx16, OperandSize::kShort) \
+ V(Reg16, OperandSize::kShort)
// The list of bytecodes which are interpreted by the interpreter.
#define BYTECODE_LIST(V) \
@@ -64,11 +67,28 @@ namespace interpreter {
V(PopContext, OperandType::kReg8) \
V(LdaContextSlot, OperandType::kReg8, OperandType::kIdx8) \
V(StaContextSlot, OperandType::kReg8, OperandType::kIdx8) \
+ V(LdaContextSlotWide, OperandType::kReg8, OperandType::kIdx16) \
+ V(StaContextSlotWide, OperandType::kReg8, OperandType::kIdx16) \
+ \
+ /* Load-Store lookup slots */ \
+ V(LdaLookupSlot, OperandType::kIdx8) \
+ V(LdaLookupSlotInsideTypeof, OperandType::kIdx8) \
+ V(LdaLookupSlotWide, OperandType::kIdx16) \
+ V(LdaLookupSlotInsideTypeofWide, OperandType::kIdx16) \
+ V(StaLookupSlotSloppy, OperandType::kIdx8) \
+ V(StaLookupSlotStrict, OperandType::kIdx8) \
+ V(StaLookupSlotSloppyWide, OperandType::kIdx16) \
+ V(StaLookupSlotStrictWide, OperandType::kIdx16) \
\
/* Register-accumulator transfers */ \
V(Ldar, OperandType::kReg8) \
V(Star, OperandType::kReg8) \
\
+ /* Register-register transfers */ \
+ V(Mov, OperandType::kReg8, OperandType::kReg8) \
+ V(Exchange, OperandType::kReg8, OperandType::kReg16) \
+ V(ExchangeWide, OperandType::kReg16, OperandType::kReg16) \
+ \
/* LoadIC operations */ \
V(LoadICSloppy, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8) \
V(LoadICStrict, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8) \
@@ -119,11 +139,17 @@ namespace interpreter {
V(TypeOf, OperandType::kNone) \
V(DeletePropertyStrict, OperandType::kReg8) \
V(DeletePropertySloppy, OperandType::kReg8) \
+ V(DeleteLookupSlot, OperandType::kNone) \
\
/* Call operations */ \
- V(Call, OperandType::kReg8, OperandType::kReg8, OperandType::kCount8) \
+ V(Call, OperandType::kReg8, OperandType::kReg8, OperandType::kCount8, \
+ OperandType::kIdx8) \
+ V(CallWide, OperandType::kReg8, OperandType::kReg8, OperandType::kCount16, \
+ OperandType::kIdx16) \
V(CallRuntime, OperandType::kIdx16, OperandType::kMaybeReg8, \
OperandType::kCount8) \
+ V(CallRuntimeForPair, OperandType::kIdx16, OperandType::kMaybeReg8, \
+ OperandType::kCount8, OperandType::kRegPair8) \
V(CallJSRuntime, OperandType::kIdx16, OperandType::kReg8, \
OperandType::kCount8) \
\
@@ -143,18 +169,27 @@ namespace interpreter {
V(TestIn, OperandType::kReg8) \
\
/* Cast operators */ \
- V(ToBoolean, OperandType::kNone) \
V(ToName, OperandType::kNone) \
V(ToNumber, OperandType::kNone) \
V(ToObject, OperandType::kNone) \
\
/* Literals */ \
- V(CreateRegExpLiteral, OperandType::kIdx8, OperandType::kReg8) \
- V(CreateArrayLiteral, OperandType::kIdx8, OperandType::kImm8) \
- V(CreateObjectLiteral, OperandType::kIdx8, OperandType::kImm8) \
+ V(CreateRegExpLiteral, OperandType::kIdx8, OperandType::kIdx8, \
+ OperandType::kImm8) \
+ V(CreateArrayLiteral, OperandType::kIdx8, OperandType::kIdx8, \
+ OperandType::kImm8) \
+ V(CreateObjectLiteral, OperandType::kIdx8, OperandType::kIdx8, \
+ OperandType::kImm8) \
+ V(CreateRegExpLiteralWide, OperandType::kIdx16, OperandType::kIdx16, \
+ OperandType::kImm8) \
+ V(CreateArrayLiteralWide, OperandType::kIdx16, OperandType::kIdx16, \
+ OperandType::kImm8) \
+ V(CreateObjectLiteralWide, OperandType::kIdx16, OperandType::kIdx16, \
+ OperandType::kImm8) \
\
/* Closure allocation */ \
- V(CreateClosure, OperandType::kImm8) \
+ V(CreateClosure, OperandType::kIdx8, OperandType::kImm8) \
+ V(CreateClosureWide, OperandType::kIdx16, OperandType::kImm8) \
\
/* Arguments allocation */ \
V(CreateMappedArguments, OperandType::kNone) \
@@ -163,23 +198,32 @@ namespace interpreter {
/* Control Flow */ \
V(Jump, OperandType::kImm8) \
V(JumpConstant, OperandType::kIdx8) \
+ V(JumpConstantWide, OperandType::kIdx16) \
V(JumpIfTrue, OperandType::kImm8) \
V(JumpIfTrueConstant, OperandType::kIdx8) \
+ V(JumpIfTrueConstantWide, OperandType::kIdx16) \
V(JumpIfFalse, OperandType::kImm8) \
V(JumpIfFalseConstant, OperandType::kIdx8) \
+ V(JumpIfFalseConstantWide, OperandType::kIdx16) \
V(JumpIfToBooleanTrue, OperandType::kImm8) \
V(JumpIfToBooleanTrueConstant, OperandType::kIdx8) \
+ V(JumpIfToBooleanTrueConstantWide, OperandType::kIdx16) \
V(JumpIfToBooleanFalse, OperandType::kImm8) \
V(JumpIfToBooleanFalseConstant, OperandType::kIdx8) \
+ V(JumpIfToBooleanFalseConstantWide, OperandType::kIdx16) \
V(JumpIfNull, OperandType::kImm8) \
V(JumpIfNullConstant, OperandType::kIdx8) \
+ V(JumpIfNullConstantWide, OperandType::kIdx16) \
V(JumpIfUndefined, OperandType::kImm8) \
V(JumpIfUndefinedConstant, OperandType::kIdx8) \
+ V(JumpIfUndefinedConstantWide, OperandType::kIdx16) \
\
/* Complex flow control For..in */ \
- V(ForInPrepare, OperandType::kReg8) \
- V(ForInNext, OperandType::kReg8, OperandType::kReg8) \
- V(ForInDone, OperandType::kReg8) \
+ V(ForInPrepare, OperandType::kReg8, OperandType::kReg8, OperandType::kReg8) \
+ V(ForInDone, OperandType::kReg8, OperandType::kReg8) \
+ V(ForInNext, OperandType::kReg8, OperandType::kReg8, OperandType::kReg8, \
+ OperandType::kReg8) \
+ V(ForInStep, OperandType::kReg8) \
\
/* Non-local flow control */ \
V(Throw, OperandType::kNone) \
@@ -224,15 +268,9 @@ enum class Bytecode : uint8_t {
// in its stack-frame. Register hold parameters, this, and expression values.
class Register {
public:
- static const int kMaxRegisterIndex = 127;
- static const int kMinRegisterIndex = -128;
-
Register() : index_(kIllegalIndex) {}
- explicit Register(int index) : index_(index) {
- DCHECK_LE(index_, kMaxRegisterIndex);
- DCHECK_GE(index_, kMinRegisterIndex);
- }
+ explicit Register(int index) : index_(index) {}
int index() const {
DCHECK(index_ != kIllegalIndex);
@@ -253,9 +291,16 @@ class Register {
static Register function_context();
bool is_function_context() const;
+ // Returns the register for the incoming new target value.
+ static Register new_target();
+ bool is_new_target() const;
+
static Register FromOperand(uint8_t operand);
uint8_t ToOperand() const;
+ static Register FromWideOperand(uint16_t operand);
+ uint16_t ToWideOperand() const;
+
static bool AreContiguous(Register reg1, Register reg2,
Register reg3 = Register(),
Register reg4 = Register(),
@@ -320,14 +365,41 @@ class Bytecodes {
// Returns the size of |operand|.
static OperandSize SizeOfOperand(OperandType operand);
+ // Return true if the bytecode is a conditional jump taking
+ // an immediate byte operand (OperandType::kImm8).
+ static bool IsConditionalJumpImmediate(Bytecode bytecode);
+
+ // Return true if the bytecode is a conditional jump taking
+ // a constant pool entry (OperandType::kIdx8).
+ static bool IsConditionalJumpConstant(Bytecode bytecode);
+
+ // Return true if the bytecode is a conditional jump taking
+ // a constant pool entry (OperandType::kIdx16).
+ static bool IsConditionalJumpConstantWide(Bytecode bytecode);
+
+ // Return true if the bytecode is a conditional jump taking
+ // any kind of operand.
+ static bool IsConditionalJump(Bytecode bytecode);
+
// Return true if the bytecode is a jump or a conditional jump taking
// an immediate byte operand (OperandType::kImm8).
- static bool IsJump(Bytecode bytecode);
+ static bool IsJumpImmediate(Bytecode bytecode);
// Return true if the bytecode is a jump or conditional jump taking a
- // constant pool entry (OperandType::kIdx).
+ // constant pool entry (OperandType::kIdx8).
static bool IsJumpConstant(Bytecode bytecode);
+ // Return true if the bytecode is a jump or conditional jump taking a
+ // constant pool entry (OperandType::kIdx16).
+ static bool IsJumpConstantWide(Bytecode bytecode);
+
+ // Return true if the bytecode is a jump or conditional jump taking
+ // any kind of operand.
+ static bool IsJump(Bytecode bytecode);
+
+ // Return true if the bytecode is a conditional jump, a jump, or a return.
+ static bool IsJumpOrReturn(Bytecode bytecode);
+
// Decode a single bytecode and operands to |os|.
static std::ostream& Decode(std::ostream& os, const uint8_t* bytecode_start,
int number_of_parameters);
diff --git a/deps/v8/src/interpreter/constant-array-builder.cc b/deps/v8/src/interpreter/constant-array-builder.cc
new file mode 100644
index 0000000000..2586e1ff4d
--- /dev/null
+++ b/deps/v8/src/interpreter/constant-array-builder.cc
@@ -0,0 +1,174 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/constant-array-builder.h"
+
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+ConstantArrayBuilder::ConstantArraySlice::ConstantArraySlice(Zone* zone,
+ size_t start_index,
+ size_t capacity)
+ : start_index_(start_index),
+ capacity_(capacity),
+ reserved_(0),
+ constants_(zone) {}
+
+
+void ConstantArrayBuilder::ConstantArraySlice::Reserve() {
+ DCHECK_GT(available(), 0u);
+ reserved_++;
+ DCHECK_LE(reserved_, capacity() - constants_.size());
+}
+
+
+void ConstantArrayBuilder::ConstantArraySlice::Unreserve() {
+ DCHECK_GT(reserved_, 0u);
+ reserved_--;
+}
+
+
+size_t ConstantArrayBuilder::ConstantArraySlice::Allocate(
+ Handle<Object> object) {
+ DCHECK_GT(available(), 0u);
+ size_t index = constants_.size();
+ DCHECK_LT(index, capacity());
+ constants_.push_back(object);
+ return index + start_index();
+}
+
+
+Handle<Object> ConstantArrayBuilder::ConstantArraySlice::At(
+ size_t index) const {
+ return constants_[index - start_index()];
+}
+
+
+STATIC_CONST_MEMBER_DEFINITION const size_t ConstantArrayBuilder::kMaxCapacity;
+STATIC_CONST_MEMBER_DEFINITION const size_t ConstantArrayBuilder::kLowCapacity;
+
+
+ConstantArrayBuilder::ConstantArrayBuilder(Isolate* isolate, Zone* zone)
+ : isolate_(isolate),
+ idx8_slice_(zone, 0, kLowCapacity),
+ idx16_slice_(zone, kLowCapacity, kHighCapacity),
+ constants_map_(isolate->heap(), zone) {
+ STATIC_ASSERT(kMaxCapacity == static_cast<size_t>(kMaxUInt16 + 1));
+ DCHECK_EQ(idx8_slice_.start_index(), 0u);
+ DCHECK_EQ(idx8_slice_.capacity(), kLowCapacity);
+ DCHECK_EQ(idx16_slice_.start_index(), kLowCapacity);
+ DCHECK_EQ(idx16_slice_.capacity(), kMaxCapacity - kLowCapacity);
+}
+
+
+size_t ConstantArrayBuilder::size() const {
+ if (idx16_slice_.size() > 0) {
+ return idx16_slice_.start_index() + idx16_slice_.size();
+ } else {
+ return idx8_slice_.size();
+ }
+}
+
+
+Handle<Object> ConstantArrayBuilder::At(size_t index) const {
+ if (index >= idx16_slice_.start_index()) {
+ return idx16_slice_.At(index);
+ } else if (index < idx8_slice_.size()) {
+ return idx8_slice_.At(index);
+ } else {
+ return isolate_->factory()->the_hole_value();
+ }
+}
+
+
+Handle<FixedArray> ConstantArrayBuilder::ToFixedArray(Factory* factory) const {
+ Handle<FixedArray> fixed_array =
+ factory->NewFixedArray(static_cast<int>(size()), PretenureFlag::TENURED);
+ for (int i = 0; i < fixed_array->length(); i++) {
+ fixed_array->set(i, *At(static_cast<size_t>(i)));
+ }
+ return fixed_array;
+}
+
+
+size_t ConstantArrayBuilder::Insert(Handle<Object> object) {
+ index_t* entry = constants_map_.Find(object);
+ return (entry == nullptr) ? AllocateEntry(object) : *entry;
+}
+
+
+ConstantArrayBuilder::index_t ConstantArrayBuilder::AllocateEntry(
+ Handle<Object> object) {
+ DCHECK(!object->IsOddball());
+ size_t index;
+ index_t* entry = constants_map_.Get(object);
+ if (idx8_slice_.available() > 0) {
+ index = idx8_slice_.Allocate(object);
+ } else {
+ index = idx16_slice_.Allocate(object);
+ }
+ CHECK_LT(index, kMaxCapacity);
+ *entry = static_cast<index_t>(index);
+ return *entry;
+}
+
+
+OperandSize ConstantArrayBuilder::CreateReservedEntry() {
+ if (idx8_slice_.available() > 0) {
+ idx8_slice_.Reserve();
+ return OperandSize::kByte;
+ } else if (idx16_slice_.available() > 0) {
+ idx16_slice_.Reserve();
+ return OperandSize::kShort;
+ } else {
+ UNREACHABLE();
+ return OperandSize::kNone;
+ }
+}
+
+
+size_t ConstantArrayBuilder::CommitReservedEntry(OperandSize operand_size,
+ Handle<Object> object) {
+ DiscardReservedEntry(operand_size);
+ size_t index;
+ index_t* entry = constants_map_.Find(object);
+ if (nullptr == entry) {
+ index = AllocateEntry(object);
+ } else {
+ if (operand_size == OperandSize::kByte &&
+ *entry >= idx8_slice_.capacity()) {
+ // The object is already in the constant array, but has an index
+ // outside the range of an idx8 operand so we need to create a
+ // duplicate entry in the idx8 operand range to satisfy the
+ // commitment.
+ *entry = static_cast<index_t>(idx8_slice_.Allocate(object));
+ }
+ index = *entry;
+ }
+ DCHECK(operand_size == OperandSize::kShort || index < idx8_slice_.capacity());
+ DCHECK_LT(index, kMaxCapacity);
+ return index;
+}
+
+
+void ConstantArrayBuilder::DiscardReservedEntry(OperandSize operand_size) {
+ switch (operand_size) {
+ case OperandSize::kByte:
+ idx8_slice_.Unreserve();
+ return;
+ case OperandSize::kShort:
+ idx16_slice_.Unreserve();
+ return;
+ default:
+ UNREACHABLE();
+ }
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/interpreter/constant-array-builder.h b/deps/v8/src/interpreter/constant-array-builder.h
new file mode 100644
index 0000000000..c882b1d540
--- /dev/null
+++ b/deps/v8/src/interpreter/constant-array-builder.h
@@ -0,0 +1,97 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_CONSTANT_ARRAY_BUILDER_H_
+#define V8_INTERPRETER_CONSTANT_ARRAY_BUILDER_H_
+
+#include "src/identity-map.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class Factory;
+class Isolate;
+
+namespace interpreter {
+
+// A helper class for constructing constant arrays for the interpreter.
+class ConstantArrayBuilder final : public ZoneObject {
+ public:
+ // Capacity of the 8-bit operand slice.
+ static const size_t kLowCapacity = 1u << kBitsPerByte;
+
+ // Capacity of the combined 8-bit and 16-bit operand slices.
+ static const size_t kMaxCapacity = 1u << (2 * kBitsPerByte);
+
+ // Capacity of the 16-bit operand slice.
+ static const size_t kHighCapacity = kMaxCapacity - kLowCapacity;
+
+ ConstantArrayBuilder(Isolate* isolate, Zone* zone);
+
+ // Generate a fixed array of constants based on inserted objects.
+ Handle<FixedArray> ToFixedArray(Factory* factory) const;
+
+ // Returns the object in the constant pool array that at index
+ // |index|.
+ Handle<Object> At(size_t index) const;
+
+ // Returns the number of elements in the array.
+ size_t size() const;
+
+ // Insert an object into the constants array if it is not already
+ // present. Returns the array index associated with the object.
+ size_t Insert(Handle<Object> object);
+
+ // Creates a reserved entry in the constant pool and returns
+ // the size of the operand that'll be required to hold the entry
+ // when committed.
+ OperandSize CreateReservedEntry();
+
+ // Commit reserved entry and returns the constant pool index for the
+ // object.
+ size_t CommitReservedEntry(OperandSize operand_size, Handle<Object> object);
+
+ // Discards constant pool reservation.
+ void DiscardReservedEntry(OperandSize operand_size);
+
+ private:
+ typedef uint16_t index_t;
+
+ index_t AllocateEntry(Handle<Object> object);
+
+ struct ConstantArraySlice final {
+ ConstantArraySlice(Zone* zone, size_t start_index, size_t capacity);
+ void Reserve();
+ void Unreserve();
+ size_t Allocate(Handle<Object> object);
+ Handle<Object> At(size_t index) const;
+
+ inline size_t available() const { return capacity() - reserved() - size(); }
+ inline size_t reserved() const { return reserved_; }
+ inline size_t capacity() const { return capacity_; }
+ inline size_t size() const { return constants_.size(); }
+ inline size_t start_index() const { return start_index_; }
+
+ private:
+ const size_t start_index_;
+ const size_t capacity_;
+ size_t reserved_;
+ ZoneVector<Handle<Object>> constants_;
+
+ DISALLOW_COPY_AND_ASSIGN(ConstantArraySlice);
+ };
+
+ Isolate* isolate_;
+ ConstantArraySlice idx8_slice_;
+ ConstantArraySlice idx16_slice_;
+ IdentityMap<index_t> constants_map_;
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_CONSTANT_ARRAY_BUILDER_H_
diff --git a/deps/v8/src/interpreter/control-flow-builders.cc b/deps/v8/src/interpreter/control-flow-builders.cc
index 3ecabe4351..99066e8c7e 100644
--- a/deps/v8/src/interpreter/control-flow-builders.cc
+++ b/deps/v8/src/interpreter/control-flow-builders.cc
@@ -32,6 +32,13 @@ void BreakableControlFlowBuilder::EmitJumpIfTrue(
}
+void BreakableControlFlowBuilder::EmitJumpIfFalse(
+ ZoneVector<BytecodeLabel>* sites) {
+ sites->push_back(BytecodeLabel());
+ builder()->JumpIfFalse(&sites->back());
+}
+
+
void BreakableControlFlowBuilder::EmitJumpIfUndefined(
ZoneVector<BytecodeLabel>* sites) {
sites->push_back(BytecodeLabel());
@@ -58,6 +65,12 @@ void BreakableControlFlowBuilder::EmitJumpIfTrue(
}
+void BreakableControlFlowBuilder::EmitJumpIfFalse(
+ ZoneVector<BytecodeLabel>* sites, int index) {
+ builder()->JumpIfFalse(&sites->at(index));
+}
+
+
void BreakableControlFlowBuilder::BindLabels(const BytecodeLabel& target,
ZoneVector<BytecodeLabel>* sites) {
for (size_t i = 0; i < sites->size(); i++) {
@@ -68,9 +81,43 @@ void BreakableControlFlowBuilder::BindLabels(const BytecodeLabel& target,
}
+void BlockBuilder::EndBlock() {
+ builder()->Bind(&block_end_);
+ SetBreakTarget(block_end_);
+}
+
+
LoopBuilder::~LoopBuilder() { DCHECK(continue_sites_.empty()); }
+void LoopBuilder::LoopHeader() {
+ // Jumps from before the loop header into the loop violate ordering
+ // requirements of bytecode basic blocks. The only entry into a loop
+ // must be the loop header. Surely breaks is okay? Not if nested
+ // and misplaced between the headers.
+ DCHECK(break_sites_.empty() && continue_sites_.empty());
+ builder()->Bind(&loop_header_);
+}
+
+
+void LoopBuilder::EndLoop() {
+ // Loop must have closed form, i.e. all loop elements are within the loop,
+ // the loop header precedes the body and next elements in the loop.
+ DCHECK(loop_header_.is_bound());
+ builder()->Bind(&loop_end_);
+ SetBreakTarget(loop_end_);
+ if (next_.is_bound()) {
+ DCHECK(!condition_.is_bound() || next_.offset() >= condition_.offset());
+ SetContinueTarget(next_);
+ } else {
+ DCHECK(condition_.is_bound());
+ DCHECK_GE(condition_.offset(), loop_header_.offset());
+ DCHECK_LE(condition_.offset(), loop_end_.offset());
+ SetContinueTarget(condition_);
+ }
+}
+
+
void LoopBuilder::SetContinueTarget(const BytecodeLabel& target) {
BindLabels(target, &continue_sites_);
}
diff --git a/deps/v8/src/interpreter/control-flow-builders.h b/deps/v8/src/interpreter/control-flow-builders.h
index c9be6dcdc7..24a7dfe3e5 100644
--- a/deps/v8/src/interpreter/control-flow-builders.h
+++ b/deps/v8/src/interpreter/control-flow-builders.h
@@ -44,6 +44,7 @@ class BreakableControlFlowBuilder : public ControlFlowBuilder {
// SetBreakTarget is called.
void Break() { EmitJump(&break_sites_); }
void BreakIfTrue() { EmitJumpIfTrue(&break_sites_); }
+ void BreakIfFalse() { EmitJumpIfFalse(&break_sites_); }
void BreakIfUndefined() { EmitJumpIfUndefined(&break_sites_); }
void BreakIfNull() { EmitJumpIfNull(&break_sites_); }
@@ -52,19 +53,33 @@ class BreakableControlFlowBuilder : public ControlFlowBuilder {
void EmitJump(ZoneVector<BytecodeLabel>* labels, int index);
void EmitJumpIfTrue(ZoneVector<BytecodeLabel>* labels);
void EmitJumpIfTrue(ZoneVector<BytecodeLabel>* labels, int index);
+ void EmitJumpIfFalse(ZoneVector<BytecodeLabel>* labels);
+ void EmitJumpIfFalse(ZoneVector<BytecodeLabel>* labels, int index);
void EmitJumpIfUndefined(ZoneVector<BytecodeLabel>* labels);
void EmitJumpIfNull(ZoneVector<BytecodeLabel>* labels);
void BindLabels(const BytecodeLabel& target, ZoneVector<BytecodeLabel>* site);
- private:
// Unbound labels that identify jumps for break statements in the code.
ZoneVector<BytecodeLabel> break_sites_;
};
+
+// Class to track control flow for block statements (which can break in JS).
+class BlockBuilder final : public BreakableControlFlowBuilder {
+ public:
+ explicit BlockBuilder(BytecodeArrayBuilder* builder)
+ : BreakableControlFlowBuilder(builder) {}
+
+ void EndBlock();
+
+ private:
+ BytecodeLabel block_end_;
+};
+
+
// A class to help with co-ordinating break and continue statements with
// their loop.
-// TODO(oth): add support for TF branch/merge info.
class LoopBuilder final : public BreakableControlFlowBuilder {
public:
explicit LoopBuilder(BytecodeArrayBuilder* builder)
@@ -72,9 +87,12 @@ class LoopBuilder final : public BreakableControlFlowBuilder {
continue_sites_(builder->zone()) {}
~LoopBuilder();
- // This methods should be called by the LoopBuilder owner before
- // destruction to update sites that emit jumps for continue.
- void SetContinueTarget(const BytecodeLabel& continue_target);
+ void LoopHeader();
+ void Condition() { builder()->Bind(&condition_); }
+ void Next() { builder()->Bind(&next_); }
+ void JumpToHeader() { builder()->Jump(&loop_header_); }
+ void JumpToHeaderIfTrue() { builder()->JumpIfTrue(&loop_header_); }
+ void EndLoop();
// This method is called when visiting continue statements in the AST.
// Inserts a jump to a unbound label that is patched when the corresponding
@@ -85,12 +103,19 @@ class LoopBuilder final : public BreakableControlFlowBuilder {
void ContinueIfNull() { EmitJumpIfNull(&continue_sites_); }
private:
+ void SetContinueTarget(const BytecodeLabel& continue_target);
+
+ BytecodeLabel loop_header_;
+ BytecodeLabel condition_;
+ BytecodeLabel next_;
+ BytecodeLabel loop_end_;
+
// Unbound labels that identify jumps for continue statements in the code.
ZoneVector<BytecodeLabel> continue_sites_;
};
+
// A class to help with co-ordinating break statements with their switch.
-// TODO(oth): add support for TF branch/merge info.
class SwitchBuilder final : public BreakableControlFlowBuilder {
public:
explicit SwitchBuilder(BytecodeArrayBuilder* builder, int number_of_cases)
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index e089a5d475..574602b0ed 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -17,6 +17,7 @@ namespace internal {
namespace interpreter {
using compiler::Node;
+
#define __ assembler->
@@ -200,11 +201,47 @@ void Interpreter::DoStar(compiler::InterpreterAssembler* assembler) {
}
+// Exchange <reg8> <reg16>
+//
+// Exchange two registers.
+void Interpreter::DoExchange(compiler::InterpreterAssembler* assembler) {
+ Node* reg0_index = __ BytecodeOperandReg(0);
+ Node* reg1_index = __ BytecodeOperandReg(1);
+ Node* reg0_value = __ LoadRegister(reg0_index);
+ Node* reg1_value = __ LoadRegister(reg1_index);
+ __ StoreRegister(reg1_value, reg0_index);
+ __ StoreRegister(reg0_value, reg1_index);
+ __ Dispatch();
+}
+
+
+// ExchangeWide <reg16> <reg16>
+//
+// Exchange two registers.
+void Interpreter::DoExchangeWide(compiler::InterpreterAssembler* assembler) {
+ return DoExchange(assembler);
+}
+
+
+// Mov <src> <dst>
+//
+// Stores the value of register <src> to register <dst>.
+void Interpreter::DoMov(compiler::InterpreterAssembler* assembler) {
+ Node* src_index = __ BytecodeOperandReg(0);
+ Node* src_value = __ LoadRegister(src_index);
+ Node* dst_index = __ BytecodeOperandReg(1);
+ __ StoreRegister(src_value, dst_index);
+ __ Dispatch();
+}
+
+
void Interpreter::DoLoadGlobal(Callable ic,
compiler::InterpreterAssembler* assembler) {
// Get the global object.
Node* context = __ GetContext();
- Node* global = __ LoadContextSlot(context, Context::GLOBAL_OBJECT_INDEX);
+ Node* native_context =
+ __ LoadContextSlot(context, Context::NATIVE_CONTEXT_INDEX);
+ Node* global = __ LoadContextSlot(native_context, Context::EXTENSION_INDEX);
// Load the global via the LoadIC.
Node* code_target = __ HeapConstant(ic.code());
@@ -216,7 +253,6 @@ void Interpreter::DoLoadGlobal(Callable ic,
Node* result = __ CallIC(ic.descriptor(), code_target, global, name, smi_slot,
type_feedback_vector);
__ SetAccumulator(result);
-
__ Dispatch();
}
@@ -319,7 +355,9 @@ void Interpreter::DoStoreGlobal(Callable ic,
compiler::InterpreterAssembler* assembler) {
// Get the global object.
Node* context = __ GetContext();
- Node* global = __ LoadContextSlot(context, Context::GLOBAL_OBJECT_INDEX);
+ Node* native_context =
+ __ LoadContextSlot(context, Context::NATIVE_CONTEXT_INDEX);
+ Node* global = __ LoadContextSlot(native_context, Context::EXTENSION_INDEX);
// Store the global via the StoreIC.
Node* code_target = __ HeapConstant(ic.code());
@@ -395,6 +433,15 @@ void Interpreter::DoLdaContextSlot(compiler::InterpreterAssembler* assembler) {
}
+// LdaContextSlotWide <context> <slot_index>
+//
+// Load the object in |slot_index| of |context| into the accumulator.
+void Interpreter::DoLdaContextSlotWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoLdaContextSlot(assembler);
+}
+
+
// StaContextSlot <context> <slot_index>
//
// Stores the object in the accumulator into |slot_index| of |context|.
@@ -408,6 +455,120 @@ void Interpreter::DoStaContextSlot(compiler::InterpreterAssembler* assembler) {
}
+// StaContextSlot <context> <slot_index>
+//
+// Stores the object in the accumulator into |slot_index| of |context|.
+void Interpreter::DoStaContextSlotWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoStaContextSlot(assembler);
+}
+
+
+void Interpreter::DoLoadLookupSlot(Runtime::FunctionId function_id,
+ compiler::InterpreterAssembler* assembler) {
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* name = __ LoadConstantPoolEntry(index);
+ Node* context = __ GetContext();
+ Node* result_pair = __ CallRuntime(function_id, context, name);
+ Node* result = __ Projection(0, result_pair);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// LdaLookupSlot <name_index>
+//
+// Lookup the object with the name in constant pool entry |name_index|
+// dynamically.
+void Interpreter::DoLdaLookupSlot(compiler::InterpreterAssembler* assembler) {
+ DoLoadLookupSlot(Runtime::kLoadLookupSlot, assembler);
+}
+
+
+// LdaLookupSlotInsideTypeof <name_index>
+//
+// Lookup the object with the name in constant pool entry |name_index|
+// dynamically without causing a NoReferenceError.
+void Interpreter::DoLdaLookupSlotInsideTypeof(
+ compiler::InterpreterAssembler* assembler) {
+ DoLoadLookupSlot(Runtime::kLoadLookupSlotNoReferenceError, assembler);
+}
+
+
+// LdaLookupSlotWide <name_index>
+//
+// Lookup the object with the name in constant pool entry |name_index|
+// dynamically.
+void Interpreter::DoLdaLookupSlotWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoLdaLookupSlot(assembler);
+}
+
+
+// LdaLookupSlotInsideTypeofWide <name_index>
+//
+// Lookup the object with the name in constant pool entry |name_index|
+// dynamically without causing a NoReferenceError.
+void Interpreter::DoLdaLookupSlotInsideTypeofWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoLdaLookupSlotInsideTypeof(assembler);
+}
+
+
+void Interpreter::DoStoreLookupSlot(LanguageMode language_mode,
+ compiler::InterpreterAssembler* assembler) {
+ Node* value = __ GetAccumulator();
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* name = __ LoadConstantPoolEntry(index);
+ Node* context = __ GetContext();
+ Node* language_mode_node = __ NumberConstant(language_mode);
+ Node* result = __ CallRuntime(Runtime::kStoreLookupSlot, value, context, name,
+ language_mode_node);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// StaLookupSlotSloppy <name_index>
+//
+// Store the object in accumulator to the object with the name in constant
+// pool entry |name_index| in sloppy mode.
+void Interpreter::DoStaLookupSlotSloppy(
+ compiler::InterpreterAssembler* assembler) {
+ DoStoreLookupSlot(LanguageMode::SLOPPY, assembler);
+}
+
+
+// StaLookupSlotStrict <name_index>
+//
+// Store the object in accumulator to the object with the name in constant
+// pool entry |name_index| in strict mode.
+void Interpreter::DoStaLookupSlotStrict(
+ compiler::InterpreterAssembler* assembler) {
+ DoStoreLookupSlot(LanguageMode::STRICT, assembler);
+}
+
+
+// StaLookupSlotSloppyWide <name_index>
+//
+// Store the object in accumulator to the object with the name in constant
+// pool entry |name_index| in sloppy mode.
+void Interpreter::DoStaLookupSlotSloppyWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoStaLookupSlotSloppy(assembler);
+}
+
+
+// StaLookupSlotStrictWide <name_index>
+//
+// Store the object in accumulator to the object with the name in constant
+// pool entry |name_index| in strict mode.
+void Interpreter::DoStaLookupSlotStrictWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoStaLookupSlotStrict(assembler);
+}
+
+
void Interpreter::DoLoadIC(Callable ic,
compiler::InterpreterAssembler* assembler) {
Node* code_target = __ HeapConstant(ic.code());
@@ -882,22 +1043,51 @@ void Interpreter::DoDeletePropertySloppy(
}
-// Call <callable> <receiver> <arg_count>
+// DeleteLookupSlot
//
-// Call a JSfunction or Callable in |callable| with the |receiver| and
-// |arg_count| arguments in subsequent registers.
-void Interpreter::DoCall(compiler::InterpreterAssembler* assembler) {
+// Delete the variable with the name specified in the accumulator by dynamically
+// looking it up.
+void Interpreter::DoDeleteLookupSlot(
+ compiler::InterpreterAssembler* assembler) {
+ Node* name = __ GetAccumulator();
+ Node* context = __ GetContext();
+ Node* result = __ CallRuntime(Runtime::kDeleteLookupSlot, context, name);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+void Interpreter::DoJSCall(compiler::InterpreterAssembler* assembler) {
Node* function_reg = __ BytecodeOperandReg(0);
Node* function = __ LoadRegister(function_reg);
Node* receiver_reg = __ BytecodeOperandReg(1);
Node* first_arg = __ RegisterLocation(receiver_reg);
Node* args_count = __ BytecodeOperandCount(2);
+ // TODO(rmcilroy): Use the call type feedback slot to call via CallIC.
Node* result = __ CallJS(function, first_arg, args_count);
__ SetAccumulator(result);
__ Dispatch();
}
+// Call <callable> <receiver> <arg_count>
+//
+// Call a JSfunction or Callable in |callable| with the |receiver| and
+// |arg_count| arguments in subsequent registers.
+void Interpreter::DoCall(compiler::InterpreterAssembler* assembler) {
+ DoJSCall(assembler);
+}
+
+
+// CallWide <callable> <receiver> <arg_count>
+//
+// Call a JSfunction or Callable in |callable| with the |receiver| and
+// |arg_count| arguments in subsequent registers.
+void Interpreter::DoCallWide(compiler::InterpreterAssembler* assembler) {
+ DoJSCall(assembler);
+}
+
+
// CallRuntime <function_id> <first_arg> <arg_count>
//
// Call the runtime function |function_id| with the first argument in
@@ -914,6 +1104,33 @@ void Interpreter::DoCallRuntime(compiler::InterpreterAssembler* assembler) {
}
+// CallRuntimeForPair <function_id> <first_arg> <arg_count> <first_return>
+//
+// Call the runtime function |function_id| which returns a pair, with the
+// first argument in register |first_arg| and |arg_count| arguments in
+// subsequent registers. Returns the result in <first_return> and
+// <first_return + 1>
+void Interpreter::DoCallRuntimeForPair(
+ compiler::InterpreterAssembler* assembler) {
+ // Call the runtime function.
+ Node* function_id = __ BytecodeOperandIdx(0);
+ Node* first_arg_reg = __ BytecodeOperandReg(1);
+ Node* first_arg = __ RegisterLocation(first_arg_reg);
+ Node* args_count = __ BytecodeOperandCount(2);
+ Node* result_pair = __ CallRuntime(function_id, first_arg, args_count, 2);
+
+ // Store the results in <first_return> and <first_return + 1>
+ Node* first_return_reg = __ BytecodeOperandReg(3);
+ Node* second_return_reg = __ NextRegister(first_return_reg);
+ Node* result0 = __ Projection(0, result_pair);
+ Node* result1 = __ Projection(1, result_pair);
+ __ StoreRegister(result0, first_return_reg);
+ __ StoreRegister(result1, second_return_reg);
+
+ __ Dispatch();
+}
+
+
// CallJSRuntime <context_index> <receiver> <arg_count>
//
// Call the JS runtime function that has the |context_index| with the receiver
@@ -926,9 +1143,8 @@ void Interpreter::DoCallJSRuntime(compiler::InterpreterAssembler* assembler) {
// Get the function to call from the native context.
Node* context = __ GetContext();
- Node* global = __ LoadContextSlot(context, Context::GLOBAL_OBJECT_INDEX);
Node* native_context =
- __ LoadObjectField(global, JSGlobalObject::kNativeContextOffset);
+ __ LoadContextSlot(context, Context::NATIVE_CONTEXT_INDEX);
Node* function = __ LoadContextSlot(native_context, context_index);
// Call the function.
@@ -938,7 +1154,7 @@ void Interpreter::DoCallJSRuntime(compiler::InterpreterAssembler* assembler) {
}
-// New <constructor> <arg_count>
+// New <constructor> <first_arg> <arg_count>
//
// Call operator new with |constructor| and the first argument in
// register |first_arg| and |arg_count| arguments in subsequent
@@ -1045,17 +1261,6 @@ void Interpreter::DoTestInstanceOf(compiler::InterpreterAssembler* assembler) {
}
-// ToBoolean
-//
-// Cast the object referenced by the accumulator to a boolean.
-void Interpreter::DoToBoolean(compiler::InterpreterAssembler* assembler) {
- Node* accumulator = __ GetAccumulator();
- Node* result = __ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
- __ SetAccumulator(result);
- __ Dispatch();
-}
-
-
// ToName
//
// Cast the object referenced by the accumulator to a name.
@@ -1098,9 +1303,9 @@ void Interpreter::DoJump(compiler::InterpreterAssembler* assembler) {
}
-// JumpConstant <idx>
+// JumpConstant <idx8>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool.
+// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool.
void Interpreter::DoJumpConstant(compiler::InterpreterAssembler* assembler) {
Node* index = __ BytecodeOperandIdx(0);
Node* constant = __ LoadConstantPoolEntry(index);
@@ -1109,6 +1314,16 @@ void Interpreter::DoJumpConstant(compiler::InterpreterAssembler* assembler) {
}
+// JumpConstantWide <idx16>
+//
+// Jump by number of bytes in the Smi in the |idx16| entry in the
+// constant pool.
+void Interpreter::DoJumpConstantWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoJumpConstant(assembler);
+}
+
+
// JumpIfTrue <imm8>
//
// Jump by number of bytes represented by an immediate operand if the
@@ -1121,9 +1336,9 @@ void Interpreter::DoJumpIfTrue(compiler::InterpreterAssembler* assembler) {
}
-// JumpIfTrueConstant <idx>
+// JumpIfTrueConstant <idx8>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
// if the accumulator contains true.
void Interpreter::DoJumpIfTrueConstant(
compiler::InterpreterAssembler* assembler) {
@@ -1136,6 +1351,16 @@ void Interpreter::DoJumpIfTrueConstant(
}
+// JumpIfTrueConstantWide <idx16>
+//
+// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
+// if the accumulator contains true.
+void Interpreter::DoJumpIfTrueConstantWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoJumpIfTrueConstant(assembler);
+}
+
+
// JumpIfFalse <imm8>
//
// Jump by number of bytes represented by an immediate operand if the
@@ -1148,9 +1373,9 @@ void Interpreter::DoJumpIfFalse(compiler::InterpreterAssembler* assembler) {
}
-// JumpIfFalseConstant <idx>
+// JumpIfFalseConstant <idx8>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
// if the accumulator contains false.
void Interpreter::DoJumpIfFalseConstant(
compiler::InterpreterAssembler* assembler) {
@@ -1163,6 +1388,16 @@ void Interpreter::DoJumpIfFalseConstant(
}
+// JumpIfFalseConstant <idx16>
+//
+// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
+// if the accumulator contains false.
+void Interpreter::DoJumpIfFalseConstantWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoJumpIfFalseConstant(assembler);
+}
+
+
// JumpIfToBooleanTrue <imm8>
//
// Jump by number of bytes represented by an immediate operand if the object
@@ -1170,17 +1405,17 @@ void Interpreter::DoJumpIfFalseConstant(
void Interpreter::DoJumpIfToBooleanTrue(
compiler::InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
- Node* relative_jump = __ BytecodeOperandImm(0);
Node* to_boolean_value =
__ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
+ Node* relative_jump = __ BytecodeOperandImm(0);
Node* true_value = __ BooleanConstant(true);
__ JumpIfWordEqual(to_boolean_value, true_value, relative_jump);
}
-// JumpIfToBooleanTrueConstant <idx>
+// JumpIfToBooleanTrueConstant <idx8>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
// if the object referenced by the accumulator is true when the object is cast
// to boolean.
void Interpreter::DoJumpIfToBooleanTrueConstant(
@@ -1196,6 +1431,17 @@ void Interpreter::DoJumpIfToBooleanTrueConstant(
}
+// JumpIfToBooleanTrueConstantWide <idx16>
+//
+// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
+// if the object referenced by the accumulator is true when the object is cast
+// to boolean.
+void Interpreter::DoJumpIfToBooleanTrueConstantWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoJumpIfToBooleanTrueConstant(assembler);
+}
+
+
// JumpIfToBooleanFalse <imm8>
//
// Jump by number of bytes represented by an immediate operand if the object
@@ -1203,17 +1449,17 @@ void Interpreter::DoJumpIfToBooleanTrueConstant(
void Interpreter::DoJumpIfToBooleanFalse(
compiler::InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
- Node* relative_jump = __ BytecodeOperandImm(0);
Node* to_boolean_value =
__ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
+ Node* relative_jump = __ BytecodeOperandImm(0);
Node* false_value = __ BooleanConstant(false);
__ JumpIfWordEqual(to_boolean_value, false_value, relative_jump);
}
-// JumpIfToBooleanFalseConstant <idx>
+// JumpIfToBooleanFalseConstant <idx8>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
// if the object referenced by the accumulator is false when the object is cast
// to boolean.
void Interpreter::DoJumpIfToBooleanFalseConstant(
@@ -1229,6 +1475,17 @@ void Interpreter::DoJumpIfToBooleanFalseConstant(
}
+// JumpIfToBooleanFalseConstantWide <idx16>
+//
+// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
+// if the object referenced by the accumulator is false when the object is cast
+// to boolean.
+void Interpreter::DoJumpIfToBooleanFalseConstantWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoJumpIfToBooleanFalseConstant(assembler);
+}
+
+
// JumpIfNull <imm8>
//
// Jump by number of bytes represented by an immediate operand if the object
@@ -1241,9 +1498,9 @@ void Interpreter::DoJumpIfNull(compiler::InterpreterAssembler* assembler) {
}
-// JumpIfNullConstant <idx>
+// JumpIfNullConstant <idx8>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
// if the object referenced by the accumulator is the null constant.
void Interpreter::DoJumpIfNullConstant(
compiler::InterpreterAssembler* assembler) {
@@ -1256,7 +1513,17 @@ void Interpreter::DoJumpIfNullConstant(
}
-// JumpIfUndefined <imm8>
+// JumpIfNullConstantWide <idx16>
+//
+// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
+// if the object referenced by the accumulator is the null constant.
+void Interpreter::DoJumpIfNullConstantWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoJumpIfNullConstant(assembler);
+}
+
+
+// jumpifundefined <imm8>
//
// Jump by number of bytes represented by an immediate operand if the object
// referenced by the accumulator is the undefined constant.
@@ -1269,9 +1536,9 @@ void Interpreter::DoJumpIfUndefined(compiler::InterpreterAssembler* assembler) {
}
-// JumpIfUndefinedConstant <idx>
+// JumpIfUndefinedConstant <idx8>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
// if the object referenced by the accumulator is the undefined constant.
void Interpreter::DoJumpIfUndefinedConstant(
compiler::InterpreterAssembler* assembler) {
@@ -1285,73 +1552,102 @@ void Interpreter::DoJumpIfUndefinedConstant(
}
-// CreateRegExpLiteral <idx> <flags_reg>
+// JumpIfUndefinedConstantWide <idx16>
//
-// Creates a regular expression literal for literal index <idx> with flags held
-// in <flags_reg> and the pattern in the accumulator.
-void Interpreter::DoCreateRegExpLiteral(
+// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
+// if the object referenced by the accumulator is the undefined constant.
+void Interpreter::DoJumpIfUndefinedConstantWide(
compiler::InterpreterAssembler* assembler) {
- Node* pattern = __ GetAccumulator();
- Node* literal_index_raw = __ BytecodeOperandIdx(0);
- Node* literal_index = __ SmiTag(literal_index_raw);
- Node* flags_reg = __ BytecodeOperandReg(1);
- Node* flags = __ LoadRegister(flags_reg);
- Node* closure = __ LoadRegister(Register::function_closure());
- Node* literals_array =
- __ LoadObjectField(closure, JSFunction::kLiteralsOffset);
- Node* result = __ CallRuntime(Runtime::kMaterializeRegExpLiteral,
- literals_array, literal_index, pattern, flags);
- __ SetAccumulator(result);
- __ Dispatch();
+ DoJumpIfUndefinedConstant(assembler);
}
void Interpreter::DoCreateLiteral(Runtime::FunctionId function_id,
compiler::InterpreterAssembler* assembler) {
- Node* constant_elements = __ GetAccumulator();
- Node* literal_index_raw = __ BytecodeOperandIdx(0);
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* constant_elements = __ LoadConstantPoolEntry(index);
+ Node* literal_index_raw = __ BytecodeOperandIdx(1);
Node* literal_index = __ SmiTag(literal_index_raw);
- Node* flags_raw = __ BytecodeOperandImm(1);
+ Node* flags_raw = __ BytecodeOperandImm(2);
Node* flags = __ SmiTag(flags_raw);
Node* closure = __ LoadRegister(Register::function_closure());
- Node* literals_array =
- __ LoadObjectField(closure, JSFunction::kLiteralsOffset);
- Node* result = __ CallRuntime(function_id, literals_array, literal_index,
+ Node* result = __ CallRuntime(function_id, closure, literal_index,
constant_elements, flags);
__ SetAccumulator(result);
__ Dispatch();
}
-// CreateArrayLiteral <idx> <flags>
+// CreateRegExpLiteral <pattern_idx> <literal_idx> <flags>
//
-// Creates an array literal for literal index <idx> with flags <flags> and
-// constant elements in the accumulator.
+// Creates a regular expression literal for literal index <literal_idx> with
+// <flags> and the pattern in <pattern_idx>.
+void Interpreter::DoCreateRegExpLiteral(
+ compiler::InterpreterAssembler* assembler) {
+ DoCreateLiteral(Runtime::kCreateRegExpLiteral, assembler);
+}
+
+
+// CreateRegExpLiteralWide <pattern_idx> <literal_idx> <flags>
+//
+// Creates a regular expression literal for literal index <literal_idx> with
+// <flags> and the pattern in <pattern_idx>.
+void Interpreter::DoCreateRegExpLiteralWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoCreateLiteral(Runtime::kCreateRegExpLiteral, assembler);
+}
+
+
+// CreateArrayLiteral <element_idx> <literal_idx> <flags>
+//
+// Creates an array literal for literal index <literal_idx> with flags <flags>
+// and constant elements in <element_idx>.
void Interpreter::DoCreateArrayLiteral(
compiler::InterpreterAssembler* assembler) {
DoCreateLiteral(Runtime::kCreateArrayLiteral, assembler);
}
-// CreateObjectLiteral <idx> <flags>
+// CreateArrayLiteralWide <element_idx> <literal_idx> <flags>
+//
+// Creates an array literal for literal index <literal_idx> with flags <flags>
+// and constant elements in <element_idx>.
+void Interpreter::DoCreateArrayLiteralWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoCreateLiteral(Runtime::kCreateArrayLiteral, assembler);
+}
+
+
+// CreateObjectLiteral <element_idx> <literal_idx> <flags>
//
-// Creates an object literal for literal index <idx> with flags <flags> and
-// constant elements in the accumulator.
+// Creates an object literal for literal index <literal_idx> with flags <flags>
+// and constant elements in <element_idx>.
void Interpreter::DoCreateObjectLiteral(
compiler::InterpreterAssembler* assembler) {
DoCreateLiteral(Runtime::kCreateObjectLiteral, assembler);
}
-// CreateClosure <tenured>
+// CreateObjectLiteralWide <element_idx> <literal_idx> <flags>
+//
+// Creates an object literal for literal index <literal_idx> with flags <flags>
+// and constant elements in <element_idx>.
+void Interpreter::DoCreateObjectLiteralWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoCreateLiteral(Runtime::kCreateObjectLiteral, assembler);
+}
+
+
+// CreateClosure <index> <tenured>
//
-// Creates a new closure for SharedFunctionInfo in the accumulator with the
-// PretenureFlag <tenured>.
+// Creates a new closure for SharedFunctionInfo at position |index| in the
+// constant pool and with the PretenureFlag <tenured>.
void Interpreter::DoCreateClosure(compiler::InterpreterAssembler* assembler) {
// TODO(rmcilroy): Possibly call FastNewClosureStub when possible instead of
// calling into the runtime.
- Node* shared = __ GetAccumulator();
- Node* tenured_raw = __ BytecodeOperandImm(0);
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* shared = __ LoadConstantPoolEntry(index);
+ Node* tenured_raw = __ BytecodeOperandImm(1);
Node* tenured = __ SmiTag(tenured_raw);
Node* result =
__ CallRuntime(Runtime::kInterpreterNewClosure, shared, tenured);
@@ -1360,6 +1656,16 @@ void Interpreter::DoCreateClosure(compiler::InterpreterAssembler* assembler) {
}
+// CreateClosureWide <index> <tenured>
+//
+// Creates a new closure for SharedFunctionInfo at position |index| in the
+// constant pool and with the PretenureFlag <tenured>.
+void Interpreter::DoCreateClosureWide(
+ compiler::InterpreterAssembler* assembler) {
+ return DoCreateClosure(assembler);
+}
+
+
// CreateMappedArguments
//
// Creates a new mapped arguments object.
@@ -1403,33 +1709,36 @@ void Interpreter::DoReturn(compiler::InterpreterAssembler* assembler) {
}
-// ForInPrepare <receiver>
+// ForInPrepare <cache_type> <cache_array> <cache_length>
//
-// Returns state for for..in loop execution based on the |receiver| and
-// the property names in the accumulator.
+// Returns state for for..in loop execution based on the object in the
+// accumulator. The registers |cache_type|, |cache_array|, and
+// |cache_length| represent output parameters.
void Interpreter::DoForInPrepare(compiler::InterpreterAssembler* assembler) {
- Node* receiver_reg = __ BytecodeOperandReg(0);
- Node* receiver = __ LoadRegister(receiver_reg);
- Node* property_names = __ GetAccumulator();
- Node* result = __ CallRuntime(Runtime::kInterpreterForInPrepare, receiver,
- property_names);
+ Node* object = __ GetAccumulator();
+ Node* result = __ CallRuntime(Runtime::kInterpreterForInPrepare, object);
+ for (int i = 0; i < 3; i++) {
+ // 0 == cache_type, 1 == cache_array, 2 == cache_length
+ Node* cache_info = __ LoadFixedArrayElement(result, i);
+ Node* cache_info_reg = __ BytecodeOperandReg(i);
+ __ StoreRegister(cache_info, cache_info_reg);
+ }
__ SetAccumulator(result);
__ Dispatch();
}
-// ForInNext <for_in_state> <index>
+// ForInNext <receiver> <cache_type> <cache_array> <index>
//
-// Returns the next key in a for..in loop. The state associated with the
-// iteration is contained in |for_in_state| and |index| is the current
-// zero-based iteration count.
+// Returns the next enumerable property in the the accumulator.
void Interpreter::DoForInNext(compiler::InterpreterAssembler* assembler) {
- Node* for_in_state_reg = __ BytecodeOperandReg(0);
- Node* for_in_state = __ LoadRegister(for_in_state_reg);
- Node* receiver = __ LoadFixedArrayElement(for_in_state, 0);
- Node* cache_array = __ LoadFixedArrayElement(for_in_state, 1);
- Node* cache_type = __ LoadFixedArrayElement(for_in_state, 2);
- Node* index_reg = __ BytecodeOperandReg(1);
+ Node* receiver_reg = __ BytecodeOperandReg(0);
+ Node* receiver = __ LoadRegister(receiver_reg);
+ Node* cache_type_reg = __ BytecodeOperandReg(1);
+ Node* cache_type = __ LoadRegister(cache_type_reg);
+ Node* cache_array_reg = __ BytecodeOperandReg(2);
+ Node* cache_array = __ LoadRegister(cache_array_reg);
+ Node* index_reg = __ BytecodeOperandReg(3);
Node* index = __ LoadRegister(index_reg);
Node* result = __ CallRuntime(Runtime::kForInNext, receiver, cache_array,
cache_type, index);
@@ -1438,22 +1747,34 @@ void Interpreter::DoForInNext(compiler::InterpreterAssembler* assembler) {
}
-// ForInDone <for_in_state>
+// ForInDone <index> <cache_length>
//
-// Returns the next key in a for..in loop. The accumulator contains the current
-// zero-based iteration count and |for_in_state| is the state returned by an
-// earlier invocation of ForInPrepare.
+// Returns true if the end of the enumerable properties has been reached.
void Interpreter::DoForInDone(compiler::InterpreterAssembler* assembler) {
- Node* index = __ GetAccumulator();
- Node* for_in_state_reg = __ BytecodeOperandReg(0);
- Node* for_in_state = __ LoadRegister(for_in_state_reg);
- Node* cache_length = __ LoadFixedArrayElement(for_in_state, 3);
+ // TODO(oth): Implement directly rather than making a runtime call.
+ Node* index_reg = __ BytecodeOperandReg(0);
+ Node* index = __ LoadRegister(index_reg);
+ Node* cache_length_reg = __ BytecodeOperandReg(1);
+ Node* cache_length = __ LoadRegister(cache_length_reg);
Node* result = __ CallRuntime(Runtime::kForInDone, index, cache_length);
__ SetAccumulator(result);
__ Dispatch();
}
+// ForInStep <index>
+//
+// Increments the loop counter in register |index| and stores the result
+// in the accumulator.
+void Interpreter::DoForInStep(compiler::InterpreterAssembler* assembler) {
+ // TODO(oth): Implement directly rather than making a runtime call.
+ Node* index_reg = __ BytecodeOperandReg(0);
+ Node* index = __ LoadRegister(index_reg);
+ Node* result = __ CallRuntime(Runtime::kForInStep, index);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index 560aba19d7..ef9b5d1fe3 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -11,8 +11,8 @@
#include "src/base/macros.h"
#include "src/builtins.h"
#include "src/interpreter/bytecodes.h"
+#include "src/parsing/token.h"
#include "src/runtime/runtime.h"
-#include "src/token.h"
namespace v8 {
namespace internal {
@@ -84,6 +84,9 @@ class Interpreter {
// Generates code to perform a keyed property store via |ic|.
void DoKeyedStoreIC(Callable ic, compiler::InterpreterAssembler* assembler);
+ // Generates code to perform a JS call.
+ void DoJSCall(compiler::InterpreterAssembler* assembler);
+
// Generates code ro create a literal via |function_id|.
void DoCreateLiteral(Runtime::FunctionId function_id,
compiler::InterpreterAssembler* assembler);
@@ -92,6 +95,14 @@ class Interpreter {
void DoDelete(Runtime::FunctionId function_id,
compiler::InterpreterAssembler* assembler);
+ // Generates code to perform a lookup slot load via |function_id|.
+ void DoLoadLookupSlot(Runtime::FunctionId function_id,
+ compiler::InterpreterAssembler* assembler);
+
+ // Generates code to perform a lookup slot store depending on |language_mode|.
+ void DoStoreLookupSlot(LanguageMode language_mode,
+ compiler::InterpreterAssembler* assembler);
+
bool IsInterpreterTableInitialized(Handle<FixedArray> handler_table);
Isolate* isolate_;