summaryrefslogtreecommitdiff
path: root/deps/v8/src/interpreter
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2017-03-21 10:16:54 +0100
committerMichaël Zasso <targos@protonmail.com>2017-03-25 09:44:10 +0100
commitc459d8ea5d402c702948c860d9497b2230ff7e8a (patch)
tree56c282fc4d40e5cb613b47cf7be3ea0526ed5b6f /deps/v8/src/interpreter
parente0bc5a7361b1d29c3ed034155fd779ce6f44fb13 (diff)
downloadandroid-node-v8-c459d8ea5d402c702948c860d9497b2230ff7e8a.tar.gz
android-node-v8-c459d8ea5d402c702948c860d9497b2230ff7e8a.tar.bz2
android-node-v8-c459d8ea5d402c702948c860d9497b2230ff7e8a.zip
deps: update V8 to 5.7.492.69
PR-URL: https://github.com/nodejs/node/pull/11752 Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: Franziska Hinkelmann <franziska.hinkelmann@gmail.com>
Diffstat (limited to 'deps/v8/src/interpreter')
-rw-r--r--deps/v8/src/interpreter/OWNERS1
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.cc205
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.h76
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc116
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h51
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.cc175
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.h49
-rw-r--r--deps/v8/src/interpreter/bytecode-array-random-iterator.cc37
-rw-r--r--deps/v8/src/interpreter/bytecode-array-random-iterator.h78
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.cc11
-rw-r--r--deps/v8/src/interpreter/bytecode-flags.cc8
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc459
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h13
-rw-r--r--deps/v8/src/interpreter/bytecode-label.cc1
-rw-r--r--deps/v8/src/interpreter/bytecode-label.h4
-rw-r--r--deps/v8/src/interpreter/bytecode-operands.h55
-rw-r--r--deps/v8/src/interpreter/bytecode-peephole-optimizer.cc73
-rw-r--r--deps/v8/src/interpreter/bytecode-peephole-table.h21
-rw-r--r--deps/v8/src/interpreter/bytecode-pipeline.h138
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.cc34
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.h27
-rw-r--r--deps/v8/src/interpreter/bytecodes.h221
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.cc34
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.h5
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.cc7
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.h19
-rw-r--r--deps/v8/src/interpreter/handler-table-builder.h2
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc217
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h50
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics.cc33
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics.h37
-rw-r--r--deps/v8/src/interpreter/interpreter.cc1052
-rw-r--r--deps/v8/src/interpreter/interpreter.h9
-rw-r--r--deps/v8/src/interpreter/mkpeephole.cc22
34 files changed, 2280 insertions, 1060 deletions
diff --git a/deps/v8/src/interpreter/OWNERS b/deps/v8/src/interpreter/OWNERS
index 4e6a721fe0..0f2165c647 100644
--- a/deps/v8/src/interpreter/OWNERS
+++ b/deps/v8/src/interpreter/OWNERS
@@ -1,6 +1,7 @@
set noparent
bmeurer@chromium.org
+leszeks@chromium.org
mstarzinger@chromium.org
mythria@chromium.org
rmcilroy@chromium.org
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.cc b/deps/v8/src/interpreter/bytecode-array-accessor.cc
new file mode 100644
index 0000000000..8e6a732861
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.cc
@@ -0,0 +1,205 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-array-accessor.h"
+
+#include "src/interpreter/bytecode-decoder.h"
+#include "src/interpreter/interpreter-intrinsics.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+BytecodeArrayAccessor::BytecodeArrayAccessor(
+ Handle<BytecodeArray> bytecode_array, int initial_offset)
+ : bytecode_array_(bytecode_array),
+ bytecode_offset_(initial_offset),
+ operand_scale_(OperandScale::kSingle),
+ prefix_offset_(0) {
+ UpdateOperandScale();
+}
+
+void BytecodeArrayAccessor::SetOffset(int offset) {
+ bytecode_offset_ = offset;
+ UpdateOperandScale();
+}
+
+void BytecodeArrayAccessor::UpdateOperandScale() {
+ if (OffsetInBounds()) {
+ uint8_t current_byte = bytecode_array()->get(bytecode_offset_);
+ Bytecode current_bytecode = Bytecodes::FromByte(current_byte);
+ if (Bytecodes::IsPrefixScalingBytecode(current_bytecode)) {
+ operand_scale_ =
+ Bytecodes::PrefixBytecodeToOperandScale(current_bytecode);
+ prefix_offset_ = 1;
+ } else {
+ operand_scale_ = OperandScale::kSingle;
+ prefix_offset_ = 0;
+ }
+ }
+}
+
+bool BytecodeArrayAccessor::OffsetInBounds() const {
+ return bytecode_offset_ >= 0 && bytecode_offset_ < bytecode_array()->length();
+}
+
+Bytecode BytecodeArrayAccessor::current_bytecode() const {
+ DCHECK(OffsetInBounds());
+ uint8_t current_byte =
+ bytecode_array()->get(bytecode_offset_ + current_prefix_offset());
+ Bytecode current_bytecode = Bytecodes::FromByte(current_byte);
+ DCHECK(!Bytecodes::IsPrefixScalingBytecode(current_bytecode));
+ return current_bytecode;
+}
+
+int BytecodeArrayAccessor::current_bytecode_size() const {
+ return current_prefix_offset() +
+ Bytecodes::Size(current_bytecode(), current_operand_scale());
+}
+
+uint32_t BytecodeArrayAccessor::GetUnsignedOperand(
+ int operand_index, OperandType operand_type) const {
+ DCHECK_GE(operand_index, 0);
+ DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
+ DCHECK_EQ(operand_type,
+ Bytecodes::GetOperandType(current_bytecode(), operand_index));
+ DCHECK(Bytecodes::IsUnsignedOperandType(operand_type));
+ const uint8_t* operand_start =
+ bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
+ current_prefix_offset() +
+ Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
+ current_operand_scale());
+ return BytecodeDecoder::DecodeUnsignedOperand(operand_start, operand_type,
+ current_operand_scale());
+}
+
+int32_t BytecodeArrayAccessor::GetSignedOperand(
+ int operand_index, OperandType operand_type) const {
+ DCHECK_GE(operand_index, 0);
+ DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
+ DCHECK_EQ(operand_type,
+ Bytecodes::GetOperandType(current_bytecode(), operand_index));
+ DCHECK(!Bytecodes::IsUnsignedOperandType(operand_type));
+ const uint8_t* operand_start =
+ bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
+ current_prefix_offset() +
+ Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
+ current_operand_scale());
+ return BytecodeDecoder::DecodeSignedOperand(operand_start, operand_type,
+ current_operand_scale());
+}
+
+uint32_t BytecodeArrayAccessor::GetFlagOperand(int operand_index) const {
+ DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
+ OperandType::kFlag8);
+ return GetUnsignedOperand(operand_index, OperandType::kFlag8);
+}
+
+uint32_t BytecodeArrayAccessor::GetUnsignedImmediateOperand(
+ int operand_index) const {
+ DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
+ OperandType::kUImm);
+ return GetUnsignedOperand(operand_index, OperandType::kUImm);
+}
+
+int32_t BytecodeArrayAccessor::GetImmediateOperand(int operand_index) const {
+ DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
+ OperandType::kImm);
+ return GetSignedOperand(operand_index, OperandType::kImm);
+}
+
+uint32_t BytecodeArrayAccessor::GetRegisterCountOperand(
+ int operand_index) const {
+ DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
+ OperandType::kRegCount);
+ return GetUnsignedOperand(operand_index, OperandType::kRegCount);
+}
+
+uint32_t BytecodeArrayAccessor::GetIndexOperand(int operand_index) const {
+ OperandType operand_type =
+ Bytecodes::GetOperandType(current_bytecode(), operand_index);
+ DCHECK_EQ(operand_type, OperandType::kIdx);
+ return GetUnsignedOperand(operand_index, operand_type);
+}
+
+Register BytecodeArrayAccessor::GetRegisterOperand(int operand_index) const {
+ OperandType operand_type =
+ Bytecodes::GetOperandType(current_bytecode(), operand_index);
+ const uint8_t* operand_start =
+ bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
+ current_prefix_offset() +
+ Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
+ current_operand_scale());
+ return BytecodeDecoder::DecodeRegisterOperand(operand_start, operand_type,
+ current_operand_scale());
+}
+
+int BytecodeArrayAccessor::GetRegisterOperandRange(int operand_index) const {
+ DCHECK_LE(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
+ const OperandType* operand_types =
+ Bytecodes::GetOperandTypes(current_bytecode());
+ OperandType operand_type = operand_types[operand_index];
+ DCHECK(Bytecodes::IsRegisterOperandType(operand_type));
+ if (operand_type == OperandType::kRegList) {
+ return GetRegisterCountOperand(operand_index + 1);
+ } else {
+ return Bytecodes::GetNumberOfRegistersRepresentedBy(operand_type);
+ }
+}
+
+Runtime::FunctionId BytecodeArrayAccessor::GetRuntimeIdOperand(
+ int operand_index) const {
+ OperandType operand_type =
+ Bytecodes::GetOperandType(current_bytecode(), operand_index);
+ DCHECK(operand_type == OperandType::kRuntimeId);
+ uint32_t raw_id = GetUnsignedOperand(operand_index, operand_type);
+ return static_cast<Runtime::FunctionId>(raw_id);
+}
+
+Runtime::FunctionId BytecodeArrayAccessor::GetIntrinsicIdOperand(
+ int operand_index) const {
+ OperandType operand_type =
+ Bytecodes::GetOperandType(current_bytecode(), operand_index);
+ DCHECK(operand_type == OperandType::kIntrinsicId);
+ uint32_t raw_id = GetUnsignedOperand(operand_index, operand_type);
+ return IntrinsicsHelper::ToRuntimeId(
+ static_cast<IntrinsicsHelper::IntrinsicId>(raw_id));
+}
+
+Handle<Object> BytecodeArrayAccessor::GetConstantForIndexOperand(
+ int operand_index) const {
+ return FixedArray::get(bytecode_array()->constant_pool(),
+ GetIndexOperand(operand_index),
+ bytecode_array()->GetIsolate());
+}
+
+int BytecodeArrayAccessor::GetJumpTargetOffset() const {
+ Bytecode bytecode = current_bytecode();
+ if (interpreter::Bytecodes::IsJumpImmediate(bytecode)) {
+ int relative_offset = GetImmediateOperand(0);
+ return current_offset() + relative_offset + current_prefix_offset();
+ } else if (interpreter::Bytecodes::IsJumpConstant(bytecode)) {
+ Smi* smi = Smi::cast(*GetConstantForIndexOperand(0));
+ return current_offset() + smi->value() + current_prefix_offset();
+ } else {
+ UNREACHABLE();
+ return kMinInt;
+ }
+}
+
+bool BytecodeArrayAccessor::OffsetWithinBytecode(int offset) const {
+ return current_offset() <= offset &&
+ offset < current_offset() + current_bytecode_size();
+}
+
+std::ostream& BytecodeArrayAccessor::PrintTo(std::ostream& os) const {
+ return BytecodeDecoder::Decode(
+ os, bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_,
+ bytecode_array()->parameter_count());
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.h b/deps/v8/src/interpreter/bytecode-array-accessor.h
new file mode 100644
index 0000000000..e5a24f3e7f
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.h
@@ -0,0 +1,76 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_ARRAY_ACCESSOR_H_
+#define V8_INTERPRETER_BYTECODE_ARRAY_ACCESSOR_H_
+
+#include "src/globals.h"
+#include "src/handles.h"
+#include "src/interpreter/bytecode-register.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/objects.h"
+#include "src/runtime/runtime.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class V8_EXPORT_PRIVATE BytecodeArrayAccessor {
+ public:
+ BytecodeArrayAccessor(Handle<BytecodeArray> bytecode_array,
+ int initial_offset);
+
+ void SetOffset(int offset);
+
+ Bytecode current_bytecode() const;
+ int current_bytecode_size() const;
+ int current_offset() const { return bytecode_offset_; }
+ OperandScale current_operand_scale() const { return operand_scale_; }
+ int current_prefix_offset() const { return prefix_offset_; }
+ const Handle<BytecodeArray>& bytecode_array() const {
+ return bytecode_array_;
+ }
+
+ uint32_t GetFlagOperand(int operand_index) const;
+ uint32_t GetUnsignedImmediateOperand(int operand_index) const;
+ int32_t GetImmediateOperand(int operand_index) const;
+ uint32_t GetIndexOperand(int operand_index) const;
+ uint32_t GetRegisterCountOperand(int operand_index) const;
+ Register GetRegisterOperand(int operand_index) const;
+ int GetRegisterOperandRange(int operand_index) const;
+ Runtime::FunctionId GetRuntimeIdOperand(int operand_index) const;
+ Runtime::FunctionId GetIntrinsicIdOperand(int operand_index) const;
+ Handle<Object> GetConstantForIndexOperand(int operand_index) const;
+
+ // Returns the absolute offset of the branch target at the current
+ // bytecode. It is an error to call this method if the bytecode is
+ // not for a jump or conditional jump.
+ int GetJumpTargetOffset() const;
+
+ bool OffsetWithinBytecode(int offset) const;
+
+ std::ostream& PrintTo(std::ostream& os) const;
+
+ private:
+ bool OffsetInBounds() const;
+
+ uint32_t GetUnsignedOperand(int operand_index,
+ OperandType operand_type) const;
+ int32_t GetSignedOperand(int operand_index, OperandType operand_type) const;
+
+ void UpdateOperandScale();
+
+ Handle<BytecodeArray> bytecode_array_;
+ int bytecode_offset_;
+ OperandScale operand_scale_;
+ int prefix_offset_;
+
+ DISALLOW_COPY_AND_ASSIGN(BytecodeArrayAccessor);
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_BYTECODE_GRAPH_ACCESSOR_H_
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index 904a8e021d..58d7d6df41 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -143,7 +143,8 @@ class OperandHelper {};
template <> \
class OperandHelper<OperandType::k##Name> \
: public UnsignedOperandHelper<Type> {};
-UNSIGNED_SCALAR_OPERAND_TYPE_LIST(DEFINE_UNSIGNED_OPERAND_HELPER)
+UNSIGNED_FIXED_SCALAR_OPERAND_TYPE_LIST(DEFINE_UNSIGNED_OPERAND_HELPER)
+UNSIGNED_SCALABLE_SCALAR_OPERAND_TYPE_LIST(DEFINE_UNSIGNED_OPERAND_HELPER)
#undef DEFINE_UNSIGNED_OPERAND_HELPER
template <>
@@ -211,14 +212,15 @@ class OperandHelper<OperandType::kRegOutTriple> {
} // namespace
-template <OperandType... operand_types>
+template <Bytecode bytecode, AccumulatorUse accumulator_use,
+ OperandType... operand_types>
class BytecodeNodeBuilder {
public:
template <typename... Operands>
INLINE(static BytecodeNode Make(BytecodeArrayBuilder* builder,
BytecodeSourceInfo source_info,
- Bytecode bytecode, Operands... operands)) {
- builder->PrepareToOutputBytecode(bytecode);
+ Operands... operands)) {
+ builder->PrepareToOutputBytecode<bytecode, accumulator_use>();
// The "OperandHelper<operand_types>::Convert(builder, operands)..." will
// expand both the OperandType... and Operands... parameter packs e.g. for:
// BytecodeNodeBuilder<OperandType::kReg, OperandType::kImm>::Make<
@@ -226,30 +228,34 @@ class BytecodeNodeBuilder {
// the code will expand into:
// OperandHelper<OperandType::kReg>::Convert(builder, reg),
// OperandHelper<OperandType::kImm>::Convert(builder, immediate),
- return BytecodeNode(
- bytecode, OperandHelper<operand_types>::Convert(builder, operands)...,
- source_info);
+ return BytecodeNode::Create<bytecode, accumulator_use, operand_types...>(
+ source_info,
+ OperandHelper<operand_types>::Convert(builder, operands)...);
}
};
-#define DEFINE_BYTECODE_OUTPUT(name, accumulator_use, ...) \
- template <typename... Operands> \
- void BytecodeArrayBuilder::Output##name(Operands... operands) { \
- BytecodeNode node(BytecodeNodeBuilder<__VA_ARGS__>::Make<Operands...>( \
- this, CurrentSourcePosition(Bytecode::k##name), Bytecode::k##name, \
- operands...)); \
- pipeline()->Write(&node); \
- } \
- \
- template <typename... Operands> \
- void BytecodeArrayBuilder::Output##name(BytecodeLabel* label, \
- Operands... operands) { \
- DCHECK(Bytecodes::IsJump(Bytecode::k##name)); \
- BytecodeNode node(BytecodeNodeBuilder<__VA_ARGS__>::Make<Operands...>( \
- this, CurrentSourcePosition(Bytecode::k##name), Bytecode::k##name, \
- operands...)); \
- pipeline()->WriteJump(&node, label); \
- LeaveBasicBlock(); \
+#define DEFINE_BYTECODE_OUTPUT(name, ...) \
+ template <typename... Operands> \
+ void BytecodeArrayBuilder::Output##name(Operands... operands) { \
+ static_assert(sizeof...(Operands) <= Bytecodes::kMaxOperands, \
+ "too many operands for bytecode"); \
+ BytecodeNode node( \
+ BytecodeNodeBuilder<Bytecode::k##name, __VA_ARGS__>::Make< \
+ Operands...>(this, CurrentSourcePosition(Bytecode::k##name), \
+ operands...)); \
+ pipeline()->Write(&node); \
+ } \
+ \
+ template <typename... Operands> \
+ void BytecodeArrayBuilder::Output##name(BytecodeLabel* label, \
+ Operands... operands) { \
+ DCHECK(Bytecodes::IsJump(Bytecode::k##name)); \
+ BytecodeNode node( \
+ BytecodeNodeBuilder<Bytecode::k##name, __VA_ARGS__>::Make< \
+ Operands...>(this, CurrentSourcePosition(Bytecode::k##name), \
+ operands...)); \
+ pipeline()->WriteJump(&node, label); \
+ LeaveBasicBlock(); \
}
BYTECODE_LIST(DEFINE_BYTECODE_OUTPUT)
#undef DEFINE_BYTECODE_OUTPUT
@@ -318,6 +324,11 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::TypeOf() {
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::GetSuperConstructor(Register out) {
+ OutputGetSuperConstructor(out);
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation(
Token::Value op, Register reg, int feedback_slot) {
switch (op) {
@@ -433,13 +444,14 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::MoveRegister(Register from,
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(int feedback_slot,
- TypeofMode typeof_mode) {
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(
+ const Handle<String> name, int feedback_slot, TypeofMode typeof_mode) {
+ size_t name_index = GetConstantPoolEntry(name);
if (typeof_mode == INSIDE_TYPEOF) {
- OutputLdaGlobalInsideTypeof(feedback_slot);
+ OutputLdaGlobalInsideTypeof(name_index, feedback_slot);
} else {
DCHECK_EQ(typeof_mode, NOT_INSIDE_TYPEOF);
- OutputLdaGlobal(feedback_slot);
+ OutputLdaGlobal(name_index, feedback_slot);
}
return *this;
}
@@ -541,6 +553,13 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadKeyedProperty(
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::StoreDataPropertyInLiteral(
+ Register object, Register name, DataPropertyInLiteralFlags flags,
+ int feedback_slot) {
+ OutputStaDataPropertyInLiteral(object, name, flags, feedback_slot);
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
Register object, const Handle<Name> name, int feedback_slot,
LanguageMode language_mode) {
@@ -566,9 +585,9 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreKeyedProperty(
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::CreateClosure(size_t entry,
- int flags) {
- OutputCreateClosure(entry, flags);
+BytecodeArrayBuilder& BytecodeArrayBuilder::CreateClosure(
+ size_t shared_function_info_entry, int slot, int flags) {
+ OutputCreateClosure(shared_function_info_entry, slot, flags);
return *this;
}
@@ -592,6 +611,11 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CreateFunctionContext(int slots) {
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::CreateEvalContext(int slots) {
+ OutputCreateEvalContext(slots);
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateWithContext(
Register object, Handle<ScopeInfo> scope_info) {
size_t scope_info_index = GetConstantPoolEntry(scope_info);
@@ -625,16 +649,14 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CreateRegExpLiteral(
}
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArrayLiteral(
- Handle<FixedArray> constant_elements, int literal_index, int flags) {
- size_t constant_elements_entry = GetConstantPoolEntry(constant_elements);
+ size_t constant_elements_entry, int literal_index, int flags) {
OutputCreateArrayLiteral(constant_elements_entry, literal_index, flags);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateObjectLiteral(
- Handle<FixedArray> constant_properties, int literal_index, int flags,
+ size_t constant_properties_entry, int literal_index, int flags,
Register output) {
- size_t constant_properties_entry = GetConstantPoolEntry(constant_properties);
OutputCreateObjectLiteral(constant_properties_entry, literal_index, flags,
output);
return *this;
@@ -718,6 +740,12 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfNotHole(
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfJSReceiver(
+ BytecodeLabel* label) {
+ OutputJumpIfJSReceiver(label, 0);
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpLoop(BytecodeLabel* label,
int loop_depth) {
OutputJumpLoop(label, 0, loop_depth);
@@ -742,6 +770,11 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StackCheck(int position) {
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::SetPendingMessage() {
+ OutputSetPendingMessage();
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::Throw() {
OutputThrow();
return *this;
@@ -914,6 +947,11 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CallJSRuntime(int context_index,
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::NewWithSpread(RegisterList args) {
+ OutputNewWithSpread(args, args.register_count());
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::Delete(Register object,
LanguageMode language_mode) {
if (language_mode == SLOPPY) {
@@ -975,8 +1013,10 @@ bool BytecodeArrayBuilder::RegisterListIsValid(RegisterList reg_list) const {
}
}
-void BytecodeArrayBuilder::PrepareToOutputBytecode(Bytecode bytecode) {
- if (register_optimizer_) register_optimizer_->PrepareForBytecode(bytecode);
+template <Bytecode bytecode, AccumulatorUse accumulator_use>
+void BytecodeArrayBuilder::PrepareToOutputBytecode() {
+ if (register_optimizer_)
+ register_optimizer_->PrepareForBytecode<bytecode, accumulator_use>();
}
uint32_t BytecodeArrayBuilder::GetInputRegisterOperand(Register reg) {
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index cc5b5e782b..121b84d523 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -84,7 +84,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
BytecodeArrayBuilder& LoadFalse();
// Global loads to the accumulator and stores from the accumulator.
- BytecodeArrayBuilder& LoadGlobal(int feedback_slot, TypeofMode typeof_mode);
+ BytecodeArrayBuilder& LoadGlobal(const Handle<String> name, int feedback_slot,
+ TypeofMode typeof_mode);
BytecodeArrayBuilder& StoreGlobal(const Handle<String> name,
int feedback_slot,
LanguageMode language_mode);
@@ -121,6 +122,12 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
// Keyed load property. The key should be in the accumulator.
BytecodeArrayBuilder& LoadKeyedProperty(Register object, int feedback_slot);
+ // Store properties. Flag for NeedsSetFunctionName() should
+ // be in the accumulator.
+ BytecodeArrayBuilder& StoreDataPropertyInLiteral(
+ Register object, Register name, DataPropertyInLiteralFlags flags,
+ int feedback_slot);
+
// Store properties. The value to be stored should be in the accumulator.
BytecodeArrayBuilder& StoreNamedProperty(Register object,
const Handle<Name> name,
@@ -153,8 +160,9 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
LanguageMode language_mode);
// Create a new closure for a SharedFunctionInfo which will be inserted at
- // constant pool index |entry|.
- BytecodeArrayBuilder& CreateClosure(size_t entry, int flags);
+ // constant pool index |shared_function_info_entry|.
+ BytecodeArrayBuilder& CreateClosure(size_t shared_function_info_entry,
+ int slot, int flags);
// Create a new local context for a |scope_info| and a closure which should be
// in the accumulator.
@@ -169,6 +177,9 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
// Create a new context with size |slots|.
BytecodeArrayBuilder& CreateFunctionContext(int slots);
+ // Create a new eval context with size |slots|.
+ BytecodeArrayBuilder& CreateEvalContext(int slots);
+
// Creates a new context with the given |scope_info| for a with-statement
// with the |object| in a register and the closure in the accumulator.
BytecodeArrayBuilder& CreateWithContext(Register object,
@@ -180,11 +191,11 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
// Literals creation. Constant elements should be in the accumulator.
BytecodeArrayBuilder& CreateRegExpLiteral(Handle<String> pattern,
int literal_index, int flags);
- BytecodeArrayBuilder& CreateArrayLiteral(Handle<FixedArray> constant_elements,
+ BytecodeArrayBuilder& CreateArrayLiteral(size_t constant_elements_entry,
int literal_index, int flags);
- BytecodeArrayBuilder& CreateObjectLiteral(
- Handle<FixedArray> constant_properties, int literal_index, int flags,
- Register output);
+ BytecodeArrayBuilder& CreateObjectLiteral(size_t constant_properties_entry,
+ int literal_index, int flags,
+ Register output);
// Push the context in accumulator as the new context, and store in register
// |context|.
@@ -232,6 +243,11 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
// Call the JS runtime function with |context_index| and arguments |args|.
BytecodeArrayBuilder& CallJSRuntime(int context_index, RegisterList args);
+ // Call the constructor in |args[0]| with new_target in |args[1]| and the
+ // arguments starting at |args[2]| onwards. The final argument must be a
+ // spread.
+ BytecodeArrayBuilder& NewWithSpread(RegisterList args);
+
// Operators (register holds the lhs value, accumulator holds the rhs value).
// Type feedback will be recorded in the |feedback_slot|
BytecodeArrayBuilder& BinaryOperation(Token::Value binop, Register reg,
@@ -245,6 +261,11 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
BytecodeArrayBuilder& LogicalNot();
BytecodeArrayBuilder& TypeOf();
+ // Expects a heap object in the accumulator. Returns its super constructor in
+ // the register |out| if it passes the IsConstructor test. Otherwise, it
+ // throws a TypeError exception.
+ BytecodeArrayBuilder& GetSuperConstructor(Register out);
+
// Deletes property from an object. This expects that accumulator contains
// the key to be deleted and the register contains a reference to the object.
BytecodeArrayBuilder& Delete(Register object, LanguageMode language_mode);
@@ -266,12 +287,17 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
BytecodeArrayBuilder& JumpIfTrue(BytecodeLabel* label);
BytecodeArrayBuilder& JumpIfFalse(BytecodeLabel* label);
BytecodeArrayBuilder& JumpIfNotHole(BytecodeLabel* label);
+ BytecodeArrayBuilder& JumpIfJSReceiver(BytecodeLabel* label);
BytecodeArrayBuilder& JumpIfNull(BytecodeLabel* label);
BytecodeArrayBuilder& JumpIfUndefined(BytecodeLabel* label);
BytecodeArrayBuilder& JumpLoop(BytecodeLabel* label, int loop_depth);
BytecodeArrayBuilder& StackCheck(int position);
+ // Sets the pending message to the value in the accumulator, and returns the
+ // previous pending message in the accumulator.
+ BytecodeArrayBuilder& SetPendingMessage();
+
BytecodeArrayBuilder& Throw();
BytecodeArrayBuilder& ReThrow();
BytecodeArrayBuilder& Return();
@@ -302,6 +328,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
// entry, so that it can be referenced by above exception handling support.
int NewHandlerEntry() { return handler_table_builder()->NewHandlerEntry(); }
+ // Gets a constant pool entry for the |object|.
+ size_t GetConstantPoolEntry(Handle<Object> object);
// Allocates a slot in the constant pool which can later be inserted.
size_t AllocateConstantPoolEntry();
// Inserts a entry into an allocated constant pool entry.
@@ -347,7 +375,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
private:
friend class BytecodeRegisterAllocator;
- template <OperandType... operand_types>
+ template <Bytecode bytecode, AccumulatorUse accumulator_use,
+ OperandType... operand_types>
friend class BytecodeNodeBuilder;
// Returns the current source position for the given |bytecode|.
@@ -367,15 +396,13 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
// Set position for return.
void SetReturnPosition();
- // Gets a constant pool entry for the |object|.
- size_t GetConstantPoolEntry(Handle<Object> object);
-
// Not implemented as the illegal bytecode is used inside internally
// to indicate a bytecode field is not valid or an error has occured
// during bytecode generation.
BytecodeArrayBuilder& Illegal();
- void PrepareToOutputBytecode(Bytecode bytecode);
+ template <Bytecode bytecode, AccumulatorUse accumulator_use>
+ void PrepareToOutputBytecode();
void LeaveBasicBlock() { return_seen_in_block_ = false; }
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.cc b/deps/v8/src/interpreter/bytecode-array-iterator.cc
index e596b11a05..0248dfda46 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.cc
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.cc
@@ -3,9 +3,6 @@
// found in the LICENSE file.
#include "src/interpreter/bytecode-array-iterator.h"
-
-#include "src/interpreter/bytecode-decoder.h"
-#include "src/interpreter/interpreter-intrinsics.h"
#include "src/objects-inl.h"
namespace v8 {
@@ -14,180 +11,14 @@ namespace interpreter {
BytecodeArrayIterator::BytecodeArrayIterator(
Handle<BytecodeArray> bytecode_array)
- : bytecode_array_(bytecode_array),
- bytecode_offset_(0),
- operand_scale_(OperandScale::kSingle),
- prefix_offset_(0) {
- UpdateOperandScale();
-}
+ : BytecodeArrayAccessor(bytecode_array, 0) {}
void BytecodeArrayIterator::Advance() {
- bytecode_offset_ += current_bytecode_size();
- UpdateOperandScale();
-}
-
-void BytecodeArrayIterator::UpdateOperandScale() {
- if (!done()) {
- uint8_t current_byte = bytecode_array()->get(bytecode_offset_);
- Bytecode current_bytecode = Bytecodes::FromByte(current_byte);
- if (Bytecodes::IsPrefixScalingBytecode(current_bytecode)) {
- operand_scale_ =
- Bytecodes::PrefixBytecodeToOperandScale(current_bytecode);
- prefix_offset_ = 1;
- } else {
- operand_scale_ = OperandScale::kSingle;
- prefix_offset_ = 0;
- }
- }
+ SetOffset(current_offset() + current_bytecode_size());
}
bool BytecodeArrayIterator::done() const {
- return bytecode_offset_ >= bytecode_array()->length();
-}
-
-Bytecode BytecodeArrayIterator::current_bytecode() const {
- DCHECK(!done());
- uint8_t current_byte =
- bytecode_array()->get(bytecode_offset_ + current_prefix_offset());
- Bytecode current_bytecode = Bytecodes::FromByte(current_byte);
- DCHECK(!Bytecodes::IsPrefixScalingBytecode(current_bytecode));
- return current_bytecode;
-}
-
-int BytecodeArrayIterator::current_bytecode_size() const {
- return current_prefix_offset() +
- Bytecodes::Size(current_bytecode(), current_operand_scale());
-}
-
-uint32_t BytecodeArrayIterator::GetUnsignedOperand(
- int operand_index, OperandType operand_type) const {
- DCHECK_GE(operand_index, 0);
- DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
- DCHECK_EQ(operand_type,
- Bytecodes::GetOperandType(current_bytecode(), operand_index));
- DCHECK(Bytecodes::IsUnsignedOperandType(operand_type));
- const uint8_t* operand_start =
- bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
- current_prefix_offset() +
- Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
- current_operand_scale());
- return BytecodeDecoder::DecodeUnsignedOperand(operand_start, operand_type,
- current_operand_scale());
-}
-
-int32_t BytecodeArrayIterator::GetSignedOperand(
- int operand_index, OperandType operand_type) const {
- DCHECK_GE(operand_index, 0);
- DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
- DCHECK_EQ(operand_type,
- Bytecodes::GetOperandType(current_bytecode(), operand_index));
- DCHECK(!Bytecodes::IsUnsignedOperandType(operand_type));
- const uint8_t* operand_start =
- bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
- current_prefix_offset() +
- Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
- current_operand_scale());
- return BytecodeDecoder::DecodeSignedOperand(operand_start, operand_type,
- current_operand_scale());
-}
-
-uint32_t BytecodeArrayIterator::GetFlagOperand(int operand_index) const {
- DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
- OperandType::kFlag8);
- return GetUnsignedOperand(operand_index, OperandType::kFlag8);
-}
-
-uint32_t BytecodeArrayIterator::GetUnsignedImmediateOperand(
- int operand_index) const {
- DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
- OperandType::kUImm);
- return GetUnsignedOperand(operand_index, OperandType::kUImm);
-}
-
-int32_t BytecodeArrayIterator::GetImmediateOperand(int operand_index) const {
- DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
- OperandType::kImm);
- return GetSignedOperand(operand_index, OperandType::kImm);
-}
-
-uint32_t BytecodeArrayIterator::GetRegisterCountOperand(
- int operand_index) const {
- DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
- OperandType::kRegCount);
- return GetUnsignedOperand(operand_index, OperandType::kRegCount);
-}
-
-uint32_t BytecodeArrayIterator::GetIndexOperand(int operand_index) const {
- OperandType operand_type =
- Bytecodes::GetOperandType(current_bytecode(), operand_index);
- DCHECK_EQ(operand_type, OperandType::kIdx);
- return GetUnsignedOperand(operand_index, operand_type);
-}
-
-Register BytecodeArrayIterator::GetRegisterOperand(int operand_index) const {
- OperandType operand_type =
- Bytecodes::GetOperandType(current_bytecode(), operand_index);
- const uint8_t* operand_start =
- bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
- current_prefix_offset() +
- Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
- current_operand_scale());
- return BytecodeDecoder::DecodeRegisterOperand(operand_start, operand_type,
- current_operand_scale());
-}
-
-int BytecodeArrayIterator::GetRegisterOperandRange(int operand_index) const {
- DCHECK_LE(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
- const OperandType* operand_types =
- Bytecodes::GetOperandTypes(current_bytecode());
- OperandType operand_type = operand_types[operand_index];
- DCHECK(Bytecodes::IsRegisterOperandType(operand_type));
- if (operand_type == OperandType::kRegList) {
- return GetRegisterCountOperand(operand_index + 1);
- } else {
- return Bytecodes::GetNumberOfRegistersRepresentedBy(operand_type);
- }
-}
-
-Runtime::FunctionId BytecodeArrayIterator::GetRuntimeIdOperand(
- int operand_index) const {
- OperandType operand_type =
- Bytecodes::GetOperandType(current_bytecode(), operand_index);
- DCHECK(operand_type == OperandType::kRuntimeId);
- uint32_t raw_id = GetUnsignedOperand(operand_index, operand_type);
- return static_cast<Runtime::FunctionId>(raw_id);
-}
-
-Runtime::FunctionId BytecodeArrayIterator::GetIntrinsicIdOperand(
- int operand_index) const {
- OperandType operand_type =
- Bytecodes::GetOperandType(current_bytecode(), operand_index);
- DCHECK(operand_type == OperandType::kIntrinsicId);
- uint32_t raw_id = GetUnsignedOperand(operand_index, operand_type);
- return IntrinsicsHelper::ToRuntimeId(
- static_cast<IntrinsicsHelper::IntrinsicId>(raw_id));
-}
-
-Handle<Object> BytecodeArrayIterator::GetConstantForIndexOperand(
- int operand_index) const {
- return FixedArray::get(bytecode_array()->constant_pool(),
- GetIndexOperand(operand_index),
- bytecode_array()->GetIsolate());
-}
-
-
-int BytecodeArrayIterator::GetJumpTargetOffset() const {
- Bytecode bytecode = current_bytecode();
- if (interpreter::Bytecodes::IsJumpImmediate(bytecode)) {
- int relative_offset = GetImmediateOperand(0);
- return current_offset() + relative_offset + current_prefix_offset();
- } else if (interpreter::Bytecodes::IsJumpConstant(bytecode)) {
- Smi* smi = Smi::cast(*GetConstantForIndexOperand(0));
- return current_offset() + smi->value() + current_prefix_offset();
- } else {
- UNREACHABLE();
- return kMinInt;
- }
+ return current_offset() >= bytecode_array()->length();
}
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.h b/deps/v8/src/interpreter/bytecode-array-iterator.h
index 03279cbd43..7ec9d1288c 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.h
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.h
@@ -1,64 +1,25 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
+// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_INTERPRETER_BYTECODE_ARRAY_ITERATOR_H_
#define V8_INTERPRETER_BYTECODE_ARRAY_ITERATOR_H_
-#include "src/globals.h"
-#include "src/handles.h"
-#include "src/interpreter/bytecode-register.h"
-#include "src/interpreter/bytecodes.h"
-#include "src/objects.h"
-#include "src/runtime/runtime.h"
+#include "src/interpreter/bytecode-array-accessor.h"
namespace v8 {
namespace internal {
namespace interpreter {
-class V8_EXPORT_PRIVATE BytecodeArrayIterator {
+class V8_EXPORT_PRIVATE BytecodeArrayIterator final
+ : public BytecodeArrayAccessor {
public:
explicit BytecodeArrayIterator(Handle<BytecodeArray> bytecode_array);
void Advance();
bool done() const;
- Bytecode current_bytecode() const;
- int current_bytecode_size() const;
- int current_offset() const { return bytecode_offset_; }
- OperandScale current_operand_scale() const { return operand_scale_; }
- int current_prefix_offset() const { return prefix_offset_; }
- const Handle<BytecodeArray>& bytecode_array() const {
- return bytecode_array_;
- }
-
- uint32_t GetFlagOperand(int operand_index) const;
- uint32_t GetUnsignedImmediateOperand(int operand_index) const;
- int32_t GetImmediateOperand(int operand_index) const;
- uint32_t GetIndexOperand(int operand_index) const;
- uint32_t GetRegisterCountOperand(int operand_index) const;
- Register GetRegisterOperand(int operand_index) const;
- int GetRegisterOperandRange(int operand_index) const;
- Runtime::FunctionId GetRuntimeIdOperand(int operand_index) const;
- Runtime::FunctionId GetIntrinsicIdOperand(int operand_index) const;
- Handle<Object> GetConstantForIndexOperand(int operand_index) const;
-
- // Returns the absolute offset of the branch target at the current
- // bytecode. It is an error to call this method if the bytecode is
- // not for a jump or conditional jump.
- int GetJumpTargetOffset() const;
private:
- uint32_t GetUnsignedOperand(int operand_index,
- OperandType operand_type) const;
- int32_t GetSignedOperand(int operand_index, OperandType operand_type) const;
-
- void UpdateOperandScale();
-
- Handle<BytecodeArray> bytecode_array_;
- int bytecode_offset_;
- OperandScale operand_scale_;
- int prefix_offset_;
-
DISALLOW_COPY_AND_ASSIGN(BytecodeArrayIterator);
};
@@ -66,4 +27,4 @@ class V8_EXPORT_PRIVATE BytecodeArrayIterator {
} // namespace internal
} // namespace v8
-#endif // V8_INTERPRETER_BYTECODE_GRAPH_ITERATOR_H_
+#endif // V8_INTERPRETER_BYTECODE_ARRAY_ITERATOR_H_
diff --git a/deps/v8/src/interpreter/bytecode-array-random-iterator.cc b/deps/v8/src/interpreter/bytecode-array-random-iterator.cc
new file mode 100644
index 0000000000..f499887ccb
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-array-random-iterator.cc
@@ -0,0 +1,37 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-array-random-iterator.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+BytecodeArrayRandomIterator::BytecodeArrayRandomIterator(
+ Handle<BytecodeArray> bytecode_array, Zone* zone)
+ : BytecodeArrayAccessor(bytecode_array, 0), offsets_(zone) {
+ // Run forwards through the bytecode array to determine the offset of each
+ // bytecode.
+ while (current_offset() < bytecode_array->length()) {
+ offsets_.push_back(current_offset());
+ SetOffset(current_offset() + current_bytecode_size());
+ }
+ GoToStart();
+}
+
+bool BytecodeArrayRandomIterator::IsValid() const {
+ return current_index_ >= 0 &&
+ static_cast<size_t>(current_index_) < offsets_.size();
+}
+
+void BytecodeArrayRandomIterator::UpdateOffsetFromIndex() {
+ if (IsValid()) {
+ SetOffset(offsets_[current_index_]);
+ }
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-array-random-iterator.h b/deps/v8/src/interpreter/bytecode-array-random-iterator.h
new file mode 100644
index 0000000000..7d559ea176
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-array-random-iterator.h
@@ -0,0 +1,78 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_ARRAY_RANDOM_ITERATOR_H_
+#define V8_INTERPRETER_BYTECODE_ARRAY_RANDOM_ITERATOR_H_
+
+#include "src/interpreter/bytecode-array-accessor.h"
+#include "src/zone/zone-containers.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class V8_EXPORT_PRIVATE BytecodeArrayRandomIterator final
+ : public BytecodeArrayAccessor {
+ public:
+ explicit BytecodeArrayRandomIterator(Handle<BytecodeArray> bytecode_array,
+ Zone* zone);
+
+ BytecodeArrayRandomIterator& operator++() {
+ ++current_index_;
+ UpdateOffsetFromIndex();
+ return *this;
+ }
+ BytecodeArrayRandomIterator& operator--() {
+ --current_index_;
+ UpdateOffsetFromIndex();
+ return *this;
+ }
+
+ BytecodeArrayRandomIterator& operator+=(int offset) {
+ current_index_ += offset;
+ UpdateOffsetFromIndex();
+ return *this;
+ }
+
+ BytecodeArrayRandomIterator& operator-=(int offset) {
+ current_index_ -= offset;
+ UpdateOffsetFromIndex();
+ return *this;
+ }
+
+ int current_index() const { return current_index_; }
+
+ size_t size() const { return offsets_.size(); }
+
+ void GoToIndex(int index) {
+ current_index_ = index;
+ UpdateOffsetFromIndex();
+ }
+ void GoToStart() {
+ current_index_ = 0;
+ UpdateOffsetFromIndex();
+ }
+ void GoToEnd() {
+ DCHECK_LT(offsets_.size() - 1, static_cast<size_t>(INT_MAX));
+ current_index_ = static_cast<int>(offsets_.size() - 1);
+ UpdateOffsetFromIndex();
+ }
+
+ bool IsValid() const;
+
+ private:
+ ZoneVector<int> offsets_;
+ int current_index_;
+
+ void UpdateOffsetFromIndex();
+
+ DISALLOW_COPY_AND_ASSIGN(BytecodeArrayRandomIterator);
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_BYTECODE_ARRAY_RANDOM_ITERATOR_H_
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.cc b/deps/v8/src/interpreter/bytecode-array-writer.cc
index 28f997b534..dd91564b16 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.cc
+++ b/deps/v8/src/interpreter/bytecode-array-writer.cc
@@ -9,6 +9,7 @@
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/constant-array-builder.h"
#include "src/log.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -163,6 +164,8 @@ Bytecode GetJumpWithConstantOperand(Bytecode jump_bytecode) {
return Bytecode::kJumpIfNullConstant;
case Bytecode::kJumpIfUndefined:
return Bytecode::kJumpIfUndefinedConstant;
+ case Bytecode::kJumpIfJSReceiver:
+ return Bytecode::kJumpIfJSReceiverConstant;
default:
UNREACHABLE();
return Bytecode::kIllegal;
@@ -290,7 +293,7 @@ void BytecodeArrayWriter::EmitJump(BytecodeNode* node, BytecodeLabel* label) {
delta -= 1;
}
DCHECK_EQ(Bytecode::kJumpLoop, node->bytecode());
- node->set_bytecode(node->bytecode(), delta, node->operand(1));
+ node->update_operand0(delta);
} else {
// The label has not yet been bound so this is a forward reference
// that will be patched when the label is bound. We create a
@@ -308,13 +311,13 @@ void BytecodeArrayWriter::EmitJump(BytecodeNode* node, BytecodeLabel* label) {
UNREACHABLE();
break;
case OperandSize::kByte:
- node->set_bytecode(node->bytecode(), k8BitJumpPlaceholder);
+ node->update_operand0(k8BitJumpPlaceholder);
break;
case OperandSize::kShort:
- node->set_bytecode(node->bytecode(), k16BitJumpPlaceholder);
+ node->update_operand0(k16BitJumpPlaceholder);
break;
case OperandSize::kQuad:
- node->set_bytecode(node->bytecode(), k32BitJumpPlaceholder);
+ node->update_operand0(k32BitJumpPlaceholder);
break;
}
}
diff --git a/deps/v8/src/interpreter/bytecode-flags.cc b/deps/v8/src/interpreter/bytecode-flags.cc
index 158af13ea7..31ac88c1f7 100644
--- a/deps/v8/src/interpreter/bytecode-flags.cc
+++ b/deps/v8/src/interpreter/bytecode-flags.cc
@@ -4,6 +4,7 @@
#include "src/interpreter/bytecode-flags.h"
+#include "src/builtins/builtins-constructor.h"
#include "src/code-stubs.h"
namespace v8 {
@@ -25,10 +26,11 @@ uint8_t CreateObjectLiteralFlags::Encode(bool fast_clone_supported,
uint8_t result = FlagsBits::encode(runtime_flags);
if (fast_clone_supported) {
STATIC_ASSERT(
- FastCloneShallowObjectStub::kMaximumClonedProperties <=
+ ConstructorBuiltinsAssembler::kMaximumClonedShallowObjectProperties <=
1 << CreateObjectLiteralFlags::FastClonePropertiesCountBits::kShift);
- DCHECK_LE(properties_count,
- FastCloneShallowObjectStub::kMaximumClonedProperties);
+ DCHECK_LE(
+ properties_count,
+ ConstructorBuiltinsAssembler::kMaximumClonedShallowObjectProperties);
result |= CreateObjectLiteralFlags::FastClonePropertiesCountBits::encode(
properties_count);
}
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 99e76725d5..02f6c3bb2c 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -6,6 +6,7 @@
#include "src/ast/compile-time-value.h"
#include "src/ast/scopes.h"
+#include "src/builtins/builtins-constructor.h"
#include "src/code-stubs.h"
#include "src/compilation-info.h"
#include "src/compiler.h"
@@ -496,24 +497,24 @@ class BytecodeGenerator::GlobalDeclarationsBuilder final : public ZoneObject {
constant_pool_entry_(0),
has_constant_pool_entry_(false) {}
- void AddFunctionDeclaration(FeedbackVectorSlot slot, FunctionLiteral* func) {
+ void AddFunctionDeclaration(Handle<String> name, FeedbackVectorSlot slot,
+ FunctionLiteral* func) {
DCHECK(!slot.IsInvalid());
- declarations_.push_back(std::make_pair(slot, func));
+ declarations_.push_back(Declaration(name, slot, func));
}
- void AddUndefinedDeclaration(FeedbackVectorSlot slot) {
+ void AddUndefinedDeclaration(Handle<String> name, FeedbackVectorSlot slot) {
DCHECK(!slot.IsInvalid());
- declarations_.push_back(std::make_pair(slot, nullptr));
+ declarations_.push_back(Declaration(name, slot, nullptr));
}
- Handle<FixedArray> AllocateDeclarationPairs(CompilationInfo* info) {
+ Handle<FixedArray> AllocateDeclarations(CompilationInfo* info) {
DCHECK(has_constant_pool_entry_);
int array_index = 0;
- Handle<FixedArray> pairs = info->isolate()->factory()->NewFixedArray(
- static_cast<int>(declarations_.size() * 2), TENURED);
- for (std::pair<FeedbackVectorSlot, FunctionLiteral*> declaration :
- declarations_) {
- FunctionLiteral* func = declaration.second;
+ Handle<FixedArray> data = info->isolate()->factory()->NewFixedArray(
+ static_cast<int>(declarations_.size() * 3), TENURED);
+ for (const Declaration& declaration : declarations_) {
+ FunctionLiteral* func = declaration.func;
Handle<Object> initial_value;
if (func == nullptr) {
initial_value = info->isolate()->factory()->undefined_value();
@@ -526,10 +527,11 @@ class BytecodeGenerator::GlobalDeclarationsBuilder final : public ZoneObject {
// will set stack overflow.
if (initial_value.is_null()) return Handle<FixedArray>();
- pairs->set(array_index++, Smi::FromInt(declaration.first.ToInt()));
- pairs->set(array_index++, *initial_value);
+ data->set(array_index++, *declaration.name);
+ data->set(array_index++, Smi::FromInt(declaration.slot.ToInt()));
+ data->set(array_index++, *initial_value);
}
- return pairs;
+ return data;
}
size_t constant_pool_entry() {
@@ -547,7 +549,17 @@ class BytecodeGenerator::GlobalDeclarationsBuilder final : public ZoneObject {
bool empty() { return declarations_.empty(); }
private:
- ZoneVector<std::pair<FeedbackVectorSlot, FunctionLiteral*>> declarations_;
+ struct Declaration {
+ Declaration() : slot(FeedbackVectorSlot::Invalid()), func(nullptr) {}
+ Declaration(Handle<String> name, FeedbackVectorSlot slot,
+ FunctionLiteral* func)
+ : name(name), slot(slot), func(func) {}
+
+ Handle<String> name;
+ FeedbackVectorSlot slot;
+ FunctionLiteral* func;
+ };
+ ZoneVector<Declaration> declarations_;
size_t constant_pool_entry_;
bool has_constant_pool_entry_;
};
@@ -565,6 +577,8 @@ BytecodeGenerator::BytecodeGenerator(CompilationInfo* info)
global_declarations_(0, info->zone()),
function_literals_(0, info->zone()),
native_function_literals_(0, info->zone()),
+ object_literals_(0, info->zone()),
+ array_literals_(0, info->zone()),
execution_control_(nullptr),
execution_context_(nullptr),
execution_result_(nullptr),
@@ -572,24 +586,23 @@ BytecodeGenerator::BytecodeGenerator(CompilationInfo* info)
generator_state_(),
loop_depth_(0),
home_object_symbol_(info->isolate()->factory()->home_object_symbol()),
- empty_fixed_array_(info->isolate()->factory()->empty_fixed_array()) {
- AstValueFactory* ast_value_factory = info->parse_info()->ast_value_factory();
- const AstRawString* prototype_string = ast_value_factory->prototype_string();
- ast_value_factory->Internalize(info->isolate());
- prototype_string_ = prototype_string->string();
-}
+ iterator_symbol_(info->isolate()->factory()->iterator_symbol()),
+ prototype_string_(info->isolate()->factory()->prototype_string()),
+ empty_fixed_array_(info->isolate()->factory()->empty_fixed_array()),
+ undefined_string_(
+ info->isolate()->ast_string_constants()->undefined_string()) {}
Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(Isolate* isolate) {
- AllocateDeferredConstants();
+ AllocateDeferredConstants(isolate);
if (HasStackOverflow()) return Handle<BytecodeArray>();
return builder()->ToBytecodeArray(isolate);
}
-void BytecodeGenerator::AllocateDeferredConstants() {
+void BytecodeGenerator::AllocateDeferredConstants(Isolate* isolate) {
// Build global declaration pair arrays.
for (GlobalDeclarationsBuilder* globals_builder : global_declarations_) {
Handle<FixedArray> declarations =
- globals_builder->AllocateDeclarationPairs(info());
+ globals_builder->AllocateDeclarations(info());
if (declarations.is_null()) return SetStackOverflow();
builder()->InsertConstantPoolEntryAt(globals_builder->constant_pool_entry(),
declarations);
@@ -614,6 +627,27 @@ void BytecodeGenerator::AllocateDeferredConstants() {
if (shared_info.is_null()) return SetStackOverflow();
builder()->InsertConstantPoolEntryAt(literal.second, shared_info);
}
+
+ // Build object literal constant properties
+ for (std::pair<ObjectLiteral*, size_t> literal : object_literals_) {
+ ObjectLiteral* object_literal = literal.first;
+ if (object_literal->properties_count() > 0) {
+ // If constant properties is an empty fixed array, we've already added it
+ // to the constant pool when visiting the object literal.
+ Handle<FixedArray> constant_properties =
+ object_literal->GetOrBuildConstantProperties(isolate);
+
+ builder()->InsertConstantPoolEntryAt(literal.second, constant_properties);
+ }
+ }
+
+ // Build array literal constant elements
+ for (std::pair<ArrayLiteral*, size_t> literal : array_literals_) {
+ ArrayLiteral* array_literal = literal.first;
+ Handle<ConstantElementsPair> constant_elements =
+ array_literal->GetOrBuildConstantElements(isolate);
+ builder()->InsertConstantPoolEntryAt(literal.second, constant_elements);
+ }
}
void BytecodeGenerator::GenerateBytecode(uintptr_t stack_limit) {
@@ -711,22 +745,25 @@ void BytecodeGenerator::VisitIterationHeader(IterationStatement* stmt,
LoopBuilder* loop_builder) {
// Recall that stmt->yield_count() is always zero inside ordinary
// (i.e. non-generator) functions.
+ if (stmt->yield_count() == 0) {
+ loop_builder->LoopHeader();
+ } else {
+ // Collect all labels for generator resume points within the loop (if any)
+ // so that they can be bound to the loop header below. Also create fresh
+ // labels for these resume points, to be used inside the loop.
+ ZoneVector<BytecodeLabel> resume_points_in_loop(zone());
+ size_t first_yield = stmt->first_yield_id();
+ DCHECK_LE(first_yield + stmt->yield_count(),
+ generator_resume_points_.size());
+ for (size_t id = first_yield; id < first_yield + stmt->yield_count();
+ id++) {
+ auto& label = generator_resume_points_[id];
+ resume_points_in_loop.push_back(label);
+ generator_resume_points_[id] = BytecodeLabel();
+ }
+
+ loop_builder->LoopHeader(&resume_points_in_loop);
- // Collect all labels for generator resume points within the loop (if any) so
- // that they can be bound to the loop header below. Also create fresh labels
- // for these resume points, to be used inside the loop.
- ZoneVector<BytecodeLabel> resume_points_in_loop(zone());
- size_t first_yield = stmt->first_yield_id();
- DCHECK_LE(first_yield + stmt->yield_count(), generator_resume_points_.size());
- for (size_t id = first_yield; id < first_yield + stmt->yield_count(); id++) {
- auto& label = generator_resume_points_[id];
- resume_points_in_loop.push_back(label);
- generator_resume_points_[id] = BytecodeLabel();
- }
-
- loop_builder->LoopHeader(&resume_points_in_loop);
-
- if (stmt->yield_count() > 0) {
// If we are not resuming, fall through to loop body.
// If we are resuming, perform state dispatch.
BytecodeLabel not_resuming;
@@ -751,10 +788,13 @@ void BytecodeGenerator::VisitGeneratorPrologue() {
->LoadAccumulatorWithRegister(generator_object)
.JumpIfUndefined(&regular_call);
- // This is a resume call. Restore registers and perform state dispatch.
- // (The current context has already been restored by the trampoline.)
+ // This is a resume call. Restore the current context and the registers, then
+ // perform state dispatch.
+ Register dummy = register_allocator()->NewRegister();
builder()
- ->ResumeGenerator(generator_object)
+ ->CallRuntime(Runtime::kInlineGeneratorGetContext, generator_object)
+ .PushContext(dummy)
+ .ResumeGenerator(generator_object)
.StoreAccumulatorInRegister(generator_state_);
BuildIndexedJump(generator_state_, 0, generator_resume_points_.size(),
generator_resume_points_);
@@ -795,7 +835,7 @@ void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
FeedbackVectorSlot slot = decl->proxy()->VariableFeedbackSlot();
- globals_builder()->AddUndefinedDeclaration(slot);
+ globals_builder()->AddUndefinedDeclaration(variable->name(), slot);
break;
}
case VariableLocation::LOCAL:
@@ -849,7 +889,8 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
FeedbackVectorSlot slot = decl->proxy()->VariableFeedbackSlot();
- globals_builder()->AddFunctionDeclaration(slot, decl->fun());
+ globals_builder()->AddFunctionDeclaration(variable->name(), slot,
+ decl->fun());
break;
}
case VariableLocation::PARAMETER:
@@ -1300,7 +1341,7 @@ void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
// If requested, clear message object as we enter the catch block.
if (stmt->clear_pending_message()) {
- builder()->CallRuntime(Runtime::kInterpreterClearPendingMessage);
+ builder()->LoadTheHole().SetPendingMessage();
}
// Load the catch context into the accumulator.
@@ -1359,16 +1400,15 @@ void BytecodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
Register message = context; // Reuse register.
// Clear message object as we enter the finally block.
- builder()
- ->CallRuntime(Runtime::kInterpreterClearPendingMessage)
- .StoreAccumulatorInRegister(message);
+ builder()->LoadTheHole().SetPendingMessage().StoreAccumulatorInRegister(
+ message);
// Evaluate the finally-block.
Visit(stmt->finally_block());
try_control_builder.EndFinally();
// Pending message object is restored on exit.
- builder()->CallRuntime(Runtime::kInterpreterSetPendingMessage, message);
+ builder()->LoadAccumulatorWithRegister(message).SetPendingMessage();
// Dynamic dispatch after the finally-block.
commands.ApplyDeferredCommands();
@@ -1383,25 +1423,39 @@ void BytecodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
uint8_t flags = CreateClosureFlags::Encode(expr->pretenure(),
scope()->is_function_scope());
size_t entry = builder()->AllocateConstantPoolEntry();
- builder()->CreateClosure(entry, flags);
+ int slot_index = feedback_index(expr->LiteralFeedbackSlot());
+ builder()->CreateClosure(entry, slot_index, flags);
function_literals_.push_back(std::make_pair(expr, entry));
}
void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
- VisitClassLiteralForRuntimeDefinition(expr);
+ Register constructor = VisitForRegisterValue(expr->constructor());
+ {
+ RegisterAllocationScope register_scope(this);
+ RegisterList args = register_allocator()->NewRegisterList(4);
+ VisitForAccumulatorValueOrTheHole(expr->extends());
+ builder()
+ ->StoreAccumulatorInRegister(args[0])
+ .MoveRegister(constructor, args[1])
+ .LoadLiteral(Smi::FromInt(expr->start_position()))
+ .StoreAccumulatorInRegister(args[2])
+ .LoadLiteral(Smi::FromInt(expr->end_position()))
+ .StoreAccumulatorInRegister(args[3])
+ .CallRuntime(Runtime::kDefineClass, args);
+ }
+ Register prototype = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(prototype);
- // Load the "prototype" from the constructor.
- RegisterList args = register_allocator()->NewRegisterList(2);
- Register literal = args[0];
- Register prototype = args[1];
- FeedbackVectorSlot slot = expr->PrototypeSlot();
- builder()
- ->StoreAccumulatorInRegister(literal)
- .LoadNamedProperty(literal, prototype_string(), feedback_index(slot))
- .StoreAccumulatorInRegister(prototype);
+ if (FunctionLiteral::NeedsHomeObject(expr->constructor())) {
+ // Prototype is already in the accumulator.
+ builder()->StoreNamedProperty(constructor, home_object_symbol(),
+ feedback_index(expr->HomeObjectSlot()),
+ language_mode());
+ }
- VisitClassLiteralProperties(expr, literal, prototype);
- builder()->CallRuntime(Runtime::kToFastProperties, literal);
+ VisitClassLiteralProperties(expr, constructor, prototype);
+ BuildClassLiteralNameProperty(expr, constructor);
+ builder()->CallRuntime(Runtime::kToFastProperties, constructor);
// Assign to class variable.
if (expr->class_variable_proxy() != nullptr) {
VariableProxy* proxy = expr->class_variable_proxy();
@@ -1413,28 +1467,12 @@ void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
}
}
-void BytecodeGenerator::VisitClassLiteralForRuntimeDefinition(
- ClassLiteral* expr) {
- RegisterAllocationScope register_scope(this);
- RegisterList args = register_allocator()->NewRegisterList(4);
- VisitForAccumulatorValueOrTheHole(expr->extends());
- builder()->StoreAccumulatorInRegister(args[0]);
- VisitForRegisterValue(expr->constructor(), args[1]);
- builder()
- ->LoadLiteral(Smi::FromInt(expr->start_position()))
- .StoreAccumulatorInRegister(args[2])
- .LoadLiteral(Smi::FromInt(expr->end_position()))
- .StoreAccumulatorInRegister(args[3])
- .CallRuntime(Runtime::kDefineClass, args);
-}
-
void BytecodeGenerator::VisitClassLiteralProperties(ClassLiteral* expr,
- Register literal,
+ Register constructor,
Register prototype) {
RegisterAllocationScope register_scope(this);
- RegisterList args = register_allocator()->NewRegisterList(5);
- Register receiver = args[0], key = args[1], value = args[2], attr = args[3],
- set_function_name = args[4];
+ RegisterList args = register_allocator()->NewRegisterList(4);
+ Register receiver = args[0], key = args[1], value = args[2], attr = args[3];
bool attr_assigned = false;
Register old_receiver = Register::invalid_value();
@@ -1444,14 +1482,18 @@ void BytecodeGenerator::VisitClassLiteralProperties(ClassLiteral* expr,
ClassLiteral::Property* property = expr->properties()->at(i);
// Set-up receiver.
- Register new_receiver = property->is_static() ? literal : prototype;
+ Register new_receiver = property->is_static() ? constructor : prototype;
if (new_receiver != old_receiver) {
builder()->MoveRegister(new_receiver, receiver);
old_receiver = new_receiver;
}
- VisitForAccumulatorValue(property->key());
- builder()->ConvertAccumulatorToName(key);
+ if (property->key()->IsStringLiteral()) {
+ VisitForRegisterValue(property->key(), key);
+ } else {
+ VisitForAccumulatorValue(property->key());
+ builder()->ConvertAccumulatorToName(key);
+ }
if (property->is_static() && property->is_computed_name()) {
// The static prototype property is read only. We handle the non computed
@@ -1479,20 +1521,26 @@ void BytecodeGenerator::VisitClassLiteralProperties(ClassLiteral* expr,
switch (property->kind()) {
case ClassLiteral::Property::METHOD: {
+ DataPropertyInLiteralFlags flags = DataPropertyInLiteralFlag::kDontEnum;
+ if (property->NeedsSetFunctionName()) {
+ flags |= DataPropertyInLiteralFlag::kSetFunctionName;
+ }
+
+ FeedbackVectorSlot slot = property->GetStoreDataPropertySlot();
+ DCHECK(!slot.IsInvalid());
+
builder()
- ->LoadLiteral(Smi::FromInt(property->NeedsSetFunctionName()))
- .StoreAccumulatorInRegister(set_function_name)
- .CallRuntime(Runtime::kDefineDataPropertyInLiteral, args);
+ ->LoadAccumulatorWithRegister(value)
+ .StoreDataPropertyInLiteral(receiver, key, flags,
+ feedback_index(slot));
break;
}
case ClassLiteral::Property::GETTER: {
- builder()->CallRuntime(Runtime::kDefineGetterPropertyUnchecked,
- args.Truncate(4));
+ builder()->CallRuntime(Runtime::kDefineGetterPropertyUnchecked, args);
break;
}
case ClassLiteral::Property::SETTER: {
- builder()->CallRuntime(Runtime::kDefineSetterPropertyUnchecked,
- args.Truncate(4));
+ builder()->CallRuntime(Runtime::kDefineSetterPropertyUnchecked, args);
break;
}
case ClassLiteral::Property::FIELD: {
@@ -1503,10 +1551,23 @@ void BytecodeGenerator::VisitClassLiteralProperties(ClassLiteral* expr,
}
}
+void BytecodeGenerator::BuildClassLiteralNameProperty(ClassLiteral* expr,
+ Register literal) {
+ if (!expr->has_name_static_property() &&
+ !expr->constructor()->raw_name()->IsEmpty()) {
+ Runtime::FunctionId runtime_id =
+ expr->has_static_computed_names()
+ ? Runtime::kInstallClassNameAccessorWithCheck
+ : Runtime::kInstallClassNameAccessor;
+ builder()->CallRuntime(runtime_id, literal);
+ }
+}
+
void BytecodeGenerator::VisitNativeFunctionLiteral(
NativeFunctionLiteral* expr) {
size_t entry = builder()->AllocateConstantPoolEntry();
- builder()->CreateClosure(entry, NOT_TENURED);
+ int slot_index = feedback_index(expr->LiteralFeedbackSlot());
+ builder()->CreateClosure(entry, slot_index, NOT_TENURED);
native_function_literals_.push_back(std::make_pair(expr, entry));
}
@@ -1567,19 +1628,24 @@ void BytecodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
}
void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
- // Copy the literal boilerplate.
+ // Deep-copy the literal boilerplate.
uint8_t flags = CreateObjectLiteralFlags::Encode(
- FastCloneShallowObjectStub::IsSupported(expr),
- FastCloneShallowObjectStub::PropertiesCount(expr->properties_count()),
+ expr->IsFastCloningSupported(),
+ ConstructorBuiltinsAssembler::FastCloneShallowObjectPropertiesCount(
+ expr->properties_count()),
expr->ComputeFlags());
+
+ Register literal = register_allocator()->NewRegister();
+ size_t entry;
// If constant properties is an empty fixed array, use our cached
// empty_fixed_array to ensure it's only added to the constant pool once.
- Handle<FixedArray> constant_properties = expr->properties_count() == 0
- ? empty_fixed_array()
- : expr->constant_properties();
- Register literal = register_allocator()->NewRegister();
- builder()->CreateObjectLiteral(constant_properties, expr->literal_index(),
- flags, literal);
+ if (expr->properties_count() == 0) {
+ entry = builder()->GetConstantPoolEntry(empty_fixed_array());
+ } else {
+ entry = builder()->AllocateConstantPoolEntry();
+ object_literals_.push_back(std::make_pair(expr, entry));
+ }
+ builder()->CreateObjectLiteral(entry, expr->literal_index(), flags, literal);
// Store computed values into the literal.
int property_index = 0;
@@ -1592,6 +1658,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
RegisterAllocationScope inner_register_scope(this);
Literal* key = property->key()->AsLiteral();
switch (property->kind()) {
+ case ObjectLiteral::Property::SPREAD:
case ObjectLiteral::Property::CONSTANT:
UNREACHABLE();
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@@ -1700,18 +1767,26 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::CONSTANT:
case ObjectLiteral::Property::COMPUTED:
case ObjectLiteral::Property::MATERIALIZED_LITERAL: {
- RegisterList args = register_allocator()->NewRegisterList(5);
- builder()->MoveRegister(literal, args[0]);
+ Register key = register_allocator()->NewRegister();
VisitForAccumulatorValue(property->key());
- builder()->ConvertAccumulatorToName(args[1]);
- VisitForRegisterValue(property->value(), args[2]);
- VisitSetHomeObject(args[2], literal, property);
+ builder()->ConvertAccumulatorToName(key);
+
+ Register value = VisitForRegisterValue(property->value());
+ VisitSetHomeObject(value, literal, property);
+
+ DataPropertyInLiteralFlags data_property_flags =
+ DataPropertyInLiteralFlag::kNoFlags;
+ if (property->NeedsSetFunctionName()) {
+ data_property_flags |= DataPropertyInLiteralFlag::kSetFunctionName;
+ }
+
+ FeedbackVectorSlot slot = property->GetStoreDataPropertySlot();
+ DCHECK(!slot.IsInvalid());
+
builder()
- ->LoadLiteral(Smi::FromInt(NONE))
- .StoreAccumulatorInRegister(args[3])
- .LoadLiteral(Smi::FromInt(property->NeedsSetFunctionName()))
- .StoreAccumulatorInRegister(args[4]);
- builder()->CallRuntime(Runtime::kDefineDataPropertyInLiteral, args);
+ ->LoadAccumulatorWithRegister(value)
+ .StoreDataPropertyInLiteral(literal, key, data_property_flags,
+ feedback_index(slot));
break;
}
case ObjectLiteral::Property::GETTER:
@@ -1732,6 +1807,13 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
builder()->CallRuntime(function_id, args);
break;
}
+ case ObjectLiteral::Property::SPREAD: {
+ RegisterList args = register_allocator()->NewRegisterList(2);
+ builder()->MoveRegister(literal, args[0]);
+ VisitForRegisterValue(property->value(), args[1]);
+ builder()->CallRuntime(Runtime::kCopyDataProperties, args);
+ break;
+ }
case ObjectLiteral::Property::PROTOTYPE:
UNREACHABLE(); // Handled specially above.
break;
@@ -1743,14 +1825,13 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Deep-copy the literal boilerplate.
- int runtime_flags = expr->ComputeFlags();
- bool use_fast_shallow_clone =
- (runtime_flags & ArrayLiteral::kShallowElements) != 0 &&
- expr->values()->length() <= JSArray::kInitialMaxFastElementArray;
- uint8_t flags =
- CreateArrayLiteralFlags::Encode(use_fast_shallow_clone, runtime_flags);
- builder()->CreateArrayLiteral(expr->constant_elements(),
- expr->literal_index(), flags);
+ uint8_t flags = CreateArrayLiteralFlags::Encode(
+ expr->IsFastCloningSupported(), expr->ComputeFlags());
+
+ size_t entry = builder()->AllocateConstantPoolEntry();
+ builder()->CreateArrayLiteral(entry, expr->literal_index(), flags);
+ array_literals_.push_back(std::make_pair(expr, entry));
+
Register index, literal;
// Evaluate all the non-constant subexpressions and store them into the
@@ -1820,7 +1901,15 @@ void BytecodeGenerator::BuildVariableLoad(Variable* variable,
break;
}
case VariableLocation::UNALLOCATED: {
- builder()->LoadGlobal(feedback_index(slot), typeof_mode);
+ // The global identifier "undefined" is immutable. Everything
+ // else could be reassigned. For performance, we do a pointer comparison
+ // rather than checking if the raw_name is really "undefined".
+ if (variable->raw_name() == undefined_string()) {
+ builder()->LoadUndefined();
+ } else {
+ builder()->LoadGlobal(variable->name(), feedback_index(slot),
+ typeof_mode);
+ }
break;
}
case VariableLocation::CONTEXT: {
@@ -1920,25 +2009,19 @@ void BytecodeGenerator::BuildThrowIfHole(Handle<String> name) {
builder()->Bind(&no_reference_error);
}
-void BytecodeGenerator::BuildThrowIfNotHole(Handle<String> name) {
- // TODO(interpreter): Can the parser reduce the number of checks
- // performed? Or should there be a ThrowIfNotHole bytecode.
- BytecodeLabel no_reference_error, reference_error;
- builder()
- ->JumpIfNotHole(&reference_error)
- .Jump(&no_reference_error)
- .Bind(&reference_error);
- BuildThrowReferenceError(name);
- builder()->Bind(&no_reference_error);
-}
-
void BytecodeGenerator::BuildHoleCheckForVariableAssignment(Variable* variable,
Token::Value op) {
if (variable->is_this() && variable->mode() == CONST && op == Token::INIT) {
// Perform an initialization check for 'this'. 'this' variable is the
// only variable able to trigger bind operations outside the TDZ
// via 'super' calls.
- BuildThrowIfNotHole(variable->name());
+ BytecodeLabel no_reference_error, reference_error;
+ builder()
+ ->JumpIfNotHole(&reference_error)
+ .Jump(&no_reference_error)
+ .Bind(&reference_error)
+ .CallRuntime(Runtime::kThrowSuperAlreadyCalledError)
+ .Bind(&no_reference_error);
} else {
// Perform an initialization check for let/const declared variables.
// E.g. let x = (x = 20); is not allowed.
@@ -2477,29 +2560,44 @@ void BytecodeGenerator::VisitCallSuper(Call* expr) {
SuperCallReference* super = expr->expression()->AsSuperCallReference();
// Prepare the constructor to the super call.
- Register this_function = VisitForRegisterValue(super->this_function_var());
- builder()->CallRuntime(Runtime::kInlineGetSuperConstructor, this_function);
-
- Register constructor = this_function; // Re-use dead this_function register.
- builder()->StoreAccumulatorInRegister(constructor);
-
- RegisterList args = register_allocator()->NewGrowableRegisterList();
- VisitArguments(expr->arguments(), &args);
-
- // The new target is loaded into the accumulator from the
- // {new.target} variable.
- VisitForAccumulatorValue(super->new_target_var());
+ VisitForAccumulatorValue(super->this_function_var());
+ Register constructor = register_allocator()->NewRegister();
+ builder()->GetSuperConstructor(constructor);
+
+ ZoneList<Expression*>* args = expr->arguments();
+
+ // When a super call contains a spread, a CallSuper AST node is only created
+ // if there is exactly one spread, and it is the last argument.
+ if (!args->is_empty() && args->last()->IsSpread()) {
+ RegisterList args_regs = register_allocator()->NewGrowableRegisterList();
+ Register constructor_arg =
+ register_allocator()->GrowRegisterList(&args_regs);
+ builder()->MoveRegister(constructor, constructor_arg);
+ // Reserve argument reg for new.target in correct place for runtime call.
+ // TODO(petermarshall): Remove this when changing bytecode to use the new
+ // stub.
+ Register new_target = register_allocator()->GrowRegisterList(&args_regs);
+ VisitArguments(args, &args_regs);
+ VisitForRegisterValue(super->new_target_var(), new_target);
+ builder()->NewWithSpread(args_regs);
+ } else {
+ RegisterList args_regs = register_allocator()->NewGrowableRegisterList();
+ VisitArguments(args, &args_regs);
+ // The new target is loaded into the accumulator from the
+ // {new.target} variable.
+ VisitForAccumulatorValue(super->new_target_var());
- // Call construct.
- builder()->SetExpressionPosition(expr);
- // TODO(turbofan): For now we do gather feedback on super constructor
- // calls, utilizing the existing machinery to inline the actual call
- // target and the JSCreate for the implicit receiver allocation. This
- // is not an ideal solution for super constructor calls, but it gets
- // the job done for now. In the long run we might want to revisit this
- // and come up with a better way.
- int const feedback_slot_index = feedback_index(expr->CallFeedbackICSlot());
- builder()->New(constructor, args, feedback_slot_index);
+ // Call construct.
+ builder()->SetExpressionPosition(expr);
+ // TODO(turbofan): For now we do gather feedback on super constructor
+ // calls, utilizing the existing machinery to inline the actual call
+ // target and the JSCreate for the implicit receiver allocation. This
+ // is not an ideal solution for super constructor calls, but it gets
+ // the job done for now. In the long run we might want to revisit this
+ // and come up with a better way.
+ int const feedback_slot_index = feedback_index(expr->CallFeedbackICSlot());
+ builder()->New(constructor, args_regs, feedback_slot_index);
+ }
}
void BytecodeGenerator::VisitCallNew(CallNew* expr) {
@@ -2800,15 +2898,43 @@ void BytecodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
Register lhs = VisitForRegisterValue(expr->left());
VisitForAccumulatorValue(expr->right());
FeedbackVectorSlot slot = expr->BinaryOperationFeedbackSlot();
+ builder()->SetExpressionPosition(expr);
builder()->BinaryOperation(expr->op(), lhs, feedback_index(slot));
}
-void BytecodeGenerator::VisitSpread(Spread* expr) { UNREACHABLE(); }
+void BytecodeGenerator::VisitSpread(Spread* expr) { Visit(expr->expression()); }
void BytecodeGenerator::VisitEmptyParentheses(EmptyParentheses* expr) {
UNREACHABLE();
}
+void BytecodeGenerator::VisitGetIterator(GetIterator* expr) {
+ FeedbackVectorSlot load_slot = expr->IteratorPropertyFeedbackSlot();
+ FeedbackVectorSlot call_slot = expr->IteratorCallFeedbackSlot();
+
+ RegisterList args = register_allocator()->NewRegisterList(1);
+ Register method = register_allocator()->NewRegister();
+ Register obj = args[0];
+
+ VisitForAccumulatorValue(expr->iterable());
+
+ // Let method be GetMethod(obj, @@iterator).
+ builder()
+ ->StoreAccumulatorInRegister(obj)
+ .LoadNamedProperty(obj, iterator_symbol(), feedback_index(load_slot))
+ .StoreAccumulatorInRegister(method);
+
+ // Let iterator be Call(method, obj).
+ builder()->Call(method, args, feedback_index(call_slot),
+ Call::NAMED_PROPERTY_CALL);
+
+ // If Type(iterator) is not Object, throw a TypeError exception.
+ BytecodeLabel no_type_error;
+ builder()->JumpIfJSReceiver(&no_type_error);
+ builder()->CallRuntime(Runtime::kThrowSymbolIteratorInvalid);
+ builder()->Bind(&no_type_error);
+}
+
void BytecodeGenerator::VisitThisFunction(ThisFunction* expr) {
builder()->LoadAccumulatorWithRegister(Register::function_closure());
}
@@ -2930,12 +3056,27 @@ void BytecodeGenerator::BuildNewLocalActivationContext() {
.StoreAccumulatorInRegister(args[2])
.CallRuntime(Runtime::kPushModuleContext, args);
} else {
+ DCHECK(scope->is_function_scope() || scope->is_eval_scope());
int slot_count = scope->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (slot_count <= FastNewFunctionContextStub::kMaximumSlots) {
- builder()->CreateFunctionContext(slot_count);
+ if (slot_count <=
+ ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+ switch (scope->scope_type()) {
+ case EVAL_SCOPE:
+ builder()->CreateEvalContext(slot_count);
+ break;
+ case FUNCTION_SCOPE:
+ builder()->CreateFunctionContext(slot_count);
+ break;
+ default:
+ UNREACHABLE();
+ }
} else {
- builder()->CallRuntime(Runtime::kNewFunctionContext,
- Register::function_closure());
+ RegisterList args = register_allocator()->NewRegisterList(2);
+ builder()
+ ->MoveRegister(Register::function_closure(), args[0])
+ .LoadLiteral(Smi::FromInt(scope->scope_type()))
+ .StoreAccumulatorInRegister(args[1])
+ .CallRuntime(Runtime::kNewFunctionContext, args);
}
}
}
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index bcab9975d0..f15829dea8 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -53,7 +53,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
enum class TestFallthrough { kThen, kElse, kNone };
void GenerateBytecodeBody();
- void AllocateDeferredConstants();
+ void AllocateDeferredConstants(Isolate* isolate);
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
@@ -109,7 +109,6 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void BuildReThrow();
void BuildAbort(BailoutReason bailout_reason);
void BuildThrowIfHole(Handle<String> name);
- void BuildThrowIfNotHole(Handle<String> name);
void BuildThrowReferenceError(Handle<String> name);
void BuildHoleCheckForVariableAssignment(Variable* variable, Token::Value op);
@@ -129,9 +128,9 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void VisitArgumentsObject(Variable* variable);
void VisitRestArgumentsArray(Variable* rest);
void VisitCallSuper(Call* call);
- void VisitClassLiteralForRuntimeDefinition(ClassLiteral* expr);
- void VisitClassLiteralProperties(ClassLiteral* expr, Register literal,
+ void VisitClassLiteralProperties(ClassLiteral* expr, Register constructor,
Register prototype);
+ void BuildClassLiteralNameProperty(ClassLiteral* expr, Register constructor);
void VisitThisFunctionVariable(Variable* variable);
void VisitNewTargetVariable(Variable* variable);
void VisitBlockDeclarationsAndStatements(Block* stmt);
@@ -196,8 +195,10 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
int feedback_index(FeedbackVectorSlot slot) const;
Handle<Name> home_object_symbol() const { return home_object_symbol_; }
+ Handle<Name> iterator_symbol() const { return iterator_symbol_; }
Handle<Name> prototype_string() const { return prototype_string_; }
Handle<FixedArray> empty_fixed_array() const { return empty_fixed_array_; }
+ const AstRawString* undefined_string() const { return undefined_string_; }
Zone* zone_;
BytecodeArrayBuilder* builder_;
@@ -209,6 +210,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
ZoneVector<std::pair<FunctionLiteral*, size_t>> function_literals_;
ZoneVector<std::pair<NativeFunctionLiteral*, size_t>>
native_function_literals_;
+ ZoneVector<std::pair<ObjectLiteral*, size_t>> object_literals_;
+ ZoneVector<std::pair<ArrayLiteral*, size_t>> array_literals_;
ControlScope* execution_control_;
ContextScope* execution_context_;
@@ -219,8 +222,10 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
int loop_depth_;
Handle<Name> home_object_symbol_;
+ Handle<Name> iterator_symbol_;
Handle<Name> prototype_string_;
Handle<FixedArray> empty_fixed_array_;
+ const AstRawString* undefined_string_;
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-label.cc b/deps/v8/src/interpreter/bytecode-label.cc
index a12e8ab4cc..ef32bdd104 100644
--- a/deps/v8/src/interpreter/bytecode-label.cc
+++ b/deps/v8/src/interpreter/bytecode-label.cc
@@ -5,6 +5,7 @@
#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-array-builder.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/interpreter/bytecode-label.h b/deps/v8/src/interpreter/bytecode-label.h
index b5f602d216..4ef6265eb2 100644
--- a/deps/v8/src/interpreter/bytecode-label.h
+++ b/deps/v8/src/interpreter/bytecode-label.h
@@ -17,7 +17,7 @@ class BytecodeArrayBuilder;
// label is bound, it represents a known position in the bytecode
// array. For labels that are forward references there can be at most
// one reference whilst it is unbound.
-class BytecodeLabel final {
+class V8_EXPORT_PRIVATE BytecodeLabel final {
public:
BytecodeLabel() : bound_(false), offset_(kInvalidOffset) {}
@@ -54,7 +54,7 @@ class BytecodeLabel final {
};
// Class representing a branch target of multiple jumps.
-class BytecodeLabels {
+class V8_EXPORT_PRIVATE BytecodeLabels {
public:
explicit BytecodeLabels(Zone* zone) : labels_(zone) {}
diff --git a/deps/v8/src/interpreter/bytecode-operands.h b/deps/v8/src/interpreter/bytecode-operands.h
index 55485027d3..f649d93a08 100644
--- a/deps/v8/src/interpreter/bytecode-operands.h
+++ b/deps/v8/src/interpreter/bytecode-operands.h
@@ -23,27 +23,33 @@ namespace interpreter {
V(RegOutPair, OperandTypeInfo::kScalableSignedByte) \
V(RegOutTriple, OperandTypeInfo::kScalableSignedByte)
-#define UNSIGNED_SCALAR_OPERAND_TYPE_LIST(V) \
- V(Flag8, OperandTypeInfo::kFixedUnsignedByte) \
- V(IntrinsicId, OperandTypeInfo::kFixedUnsignedByte) \
+#define SIGNED_SCALABLE_SCALAR_OPERAND_TYPE_LIST(V) \
+ V(Imm, OperandTypeInfo::kScalableSignedByte)
+
+#define UNSIGNED_SCALABLE_SCALAR_OPERAND_TYPE_LIST(V) \
V(Idx, OperandTypeInfo::kScalableUnsignedByte) \
V(UImm, OperandTypeInfo::kScalableUnsignedByte) \
- V(RegCount, OperandTypeInfo::kScalableUnsignedByte) \
+ V(RegCount, OperandTypeInfo::kScalableUnsignedByte)
+
+#define UNSIGNED_FIXED_SCALAR_OPERAND_TYPE_LIST(V) \
+ V(Flag8, OperandTypeInfo::kFixedUnsignedByte) \
+ V(IntrinsicId, OperandTypeInfo::kFixedUnsignedByte) \
V(RuntimeId, OperandTypeInfo::kFixedUnsignedShort)
-#define SIGNED_SCALAR_OPERAND_TYPE_LIST(V) \
- V(Imm, OperandTypeInfo::kScalableSignedByte)
+// Carefully ordered for operand type range checks below.
+#define NON_REGISTER_OPERAND_TYPE_LIST(V) \
+ INVALID_OPERAND_TYPE_LIST(V) \
+ UNSIGNED_FIXED_SCALAR_OPERAND_TYPE_LIST(V) \
+ UNSIGNED_SCALABLE_SCALAR_OPERAND_TYPE_LIST(V) \
+ SIGNED_SCALABLE_SCALAR_OPERAND_TYPE_LIST(V)
+// Carefully ordered for operand type range checks below.
#define REGISTER_OPERAND_TYPE_LIST(V) \
REGISTER_INPUT_OPERAND_TYPE_LIST(V) \
REGISTER_OUTPUT_OPERAND_TYPE_LIST(V)
-#define NON_REGISTER_OPERAND_TYPE_LIST(V) \
- INVALID_OPERAND_TYPE_LIST(V) \
- UNSIGNED_SCALAR_OPERAND_TYPE_LIST(V) \
- SIGNED_SCALAR_OPERAND_TYPE_LIST(V)
-
// The list of operand types used by bytecodes.
+// Carefully ordered for operand type range checks below.
#define OPERAND_TYPE_LIST(V) \
NON_REGISTER_OPERAND_TYPE_LIST(V) \
REGISTER_OPERAND_TYPE_LIST(V)
@@ -125,6 +131,33 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
const OperandSize& operand_size);
std::ostream& operator<<(std::ostream& os, const OperandType& operand_type);
+class BytecodeOperands {
+ public:
+ // Returns true if |accumulator_use| reads the accumulator.
+ static constexpr bool ReadsAccumulator(AccumulatorUse accumulator_use) {
+ return accumulator_use == AccumulatorUse::kRead ||
+ accumulator_use == AccumulatorUse::kReadWrite;
+ }
+
+ // Returns true if |accumulator_use| writes the accumulator.
+ static constexpr bool WritesAccumulator(AccumulatorUse accumulator_use) {
+ return accumulator_use == AccumulatorUse::kWrite ||
+ accumulator_use == AccumulatorUse::kReadWrite;
+ }
+
+ // Returns true if |operand_type| is a scalable signed byte.
+ static constexpr bool IsScalableSignedByte(OperandType operand_type) {
+ return operand_type >= OperandType::kImm &&
+ operand_type <= OperandType::kRegOutTriple;
+ }
+
+ // Returns true if |operand_type| is a scalable unsigned byte.
+ static constexpr bool IsScalableUnsignedByte(OperandType operand_type) {
+ return operand_type >= OperandType::kIdx &&
+ operand_type <= OperandType::kRegCount;
+ }
+};
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-peephole-optimizer.cc b/deps/v8/src/interpreter/bytecode-peephole-optimizer.cc
index 40552943f7..acfe484ad3 100644
--- a/deps/v8/src/interpreter/bytecode-peephole-optimizer.cc
+++ b/deps/v8/src/interpreter/bytecode-peephole-optimizer.cc
@@ -13,7 +13,8 @@ namespace interpreter {
BytecodePeepholeOptimizer::BytecodePeepholeOptimizer(
BytecodePipelineStage* next_stage)
- : next_stage_(next_stage), last_(Bytecode::kIllegal, BytecodeSourceInfo()) {
+ : next_stage_(next_stage),
+ last_(BytecodeNode::Illegal(BytecodeSourceInfo())) {
InvalidateLast();
}
@@ -65,7 +66,7 @@ void BytecodePeepholeOptimizer::Flush() {
}
void BytecodePeepholeOptimizer::InvalidateLast() {
- last_.set_bytecode(Bytecode::kIllegal);
+ last_ = BytecodeNode::Illegal(BytecodeSourceInfo());
}
bool BytecodePeepholeOptimizer::LastIsValid() const {
@@ -116,26 +117,42 @@ bool BytecodePeepholeOptimizer::CanElideLastBasedOnSourcePosition(
namespace {
-void TransformLdaSmiBinaryOpToBinaryOpWithSmi(Bytecode new_bytecode,
- BytecodeNode* const last,
- BytecodeNode* const current) {
+BytecodeNode TransformLdaSmiBinaryOpToBinaryOpWithSmi(
+ Bytecode new_bytecode, BytecodeNode* const last,
+ BytecodeNode* const current) {
DCHECK_EQ(last->bytecode(), Bytecode::kLdaSmi);
- current->set_bytecode(new_bytecode, last->operand(0), current->operand(0),
- current->operand(1));
+ BytecodeNode node(new_bytecode, last->operand(0), current->operand(0),
+ current->operand(1), current->source_info());
if (last->source_info().is_valid()) {
- current->set_source_info(last->source_info());
+ node.set_source_info(last->source_info());
}
+ return node;
}
-void TransformLdaZeroBinaryOpToBinaryOpWithZero(Bytecode new_bytecode,
- BytecodeNode* const last,
- BytecodeNode* const current) {
+BytecodeNode TransformLdaZeroBinaryOpToBinaryOpWithZero(
+ Bytecode new_bytecode, BytecodeNode* const last,
+ BytecodeNode* const current) {
DCHECK_EQ(last->bytecode(), Bytecode::kLdaZero);
- current->set_bytecode(new_bytecode, 0, current->operand(0),
- current->operand(1));
+ BytecodeNode node(new_bytecode, 0, current->operand(0), current->operand(1),
+ current->source_info());
if (last->source_info().is_valid()) {
- current->set_source_info(last->source_info());
+ node.set_source_info(last->source_info());
}
+ return node;
+}
+
+BytecodeNode TransformEqualityWithNullOrUndefined(Bytecode new_bytecode,
+ BytecodeNode* const last,
+ BytecodeNode* const current) {
+ DCHECK((last->bytecode() == Bytecode::kLdaNull) ||
+ (last->bytecode() == Bytecode::kLdaUndefined));
+ DCHECK((current->bytecode() == Bytecode::kTestEqual) ||
+ (current->bytecode() == Bytecode::kTestEqualStrict));
+ BytecodeNode node(new_bytecode, current->operand(0), current->source_info());
+ if (last->source_info().is_valid()) {
+ node.set_source_info(last->source_info());
+ }
+ return node;
}
} // namespace
@@ -175,8 +192,8 @@ void BytecodePeepholeOptimizer::ElideCurrentAction(
if (node->source_info().is_valid()) {
// Preserve the source information by replacing the node bytecode
// with a no op bytecode.
- node->set_bytecode(Bytecode::kNop);
- DefaultAction(node);
+ BytecodeNode new_node(BytecodeNode::Nop(node->source_info()));
+ DefaultAction(&new_node);
} else {
// Nothing to do, keep last and wait for next bytecode to pair with it.
}
@@ -228,9 +245,9 @@ void BytecodePeepholeOptimizer::TransformLdaSmiBinaryOpToBinaryOpWithSmiAction(
if (!node->source_info().is_valid() || !last()->source_info().is_valid()) {
// Fused last and current into current.
- TransformLdaSmiBinaryOpToBinaryOpWithSmi(action_data->bytecode, last(),
- node);
- SetLast(node);
+ BytecodeNode new_node(TransformLdaSmiBinaryOpToBinaryOpWithSmi(
+ action_data->bytecode, last(), node));
+ SetLast(&new_node);
} else {
DefaultAction(node);
}
@@ -243,14 +260,24 @@ void BytecodePeepholeOptimizer::
DCHECK(!Bytecodes::IsJump(node->bytecode()));
if (!node->source_info().is_valid() || !last()->source_info().is_valid()) {
// Fused last and current into current.
- TransformLdaZeroBinaryOpToBinaryOpWithZero(action_data->bytecode, last(),
- node);
- SetLast(node);
+ BytecodeNode new_node(TransformLdaZeroBinaryOpToBinaryOpWithZero(
+ action_data->bytecode, last(), node));
+ SetLast(&new_node);
} else {
DefaultAction(node);
}
}
+void BytecodePeepholeOptimizer::TransformEqualityWithNullOrUndefinedAction(
+ BytecodeNode* const node, const PeepholeActionAndData* action_data) {
+ DCHECK(LastIsValid());
+ DCHECK(!Bytecodes::IsJump(node->bytecode()));
+ // Fused last and current into current.
+ BytecodeNode new_node(TransformEqualityWithNullOrUndefined(
+ action_data->bytecode, last(), node));
+ SetLast(&new_node);
+}
+
void BytecodePeepholeOptimizer::DefaultJumpAction(
BytecodeNode* const node, const PeepholeActionAndData* action_data) {
DCHECK(LastIsValid());
@@ -273,7 +300,7 @@ void BytecodePeepholeOptimizer::ChangeJumpBytecodeAction(
next_stage()->Write(last());
InvalidateLast();
- node->set_bytecode(action_data->bytecode, node->operand(0));
+ node->replace_bytecode(action_data->bytecode);
}
void BytecodePeepholeOptimizer::ElideLastBeforeJumpAction(
diff --git a/deps/v8/src/interpreter/bytecode-peephole-table.h b/deps/v8/src/interpreter/bytecode-peephole-table.h
index 1790f5a109..fe46979fd9 100644
--- a/deps/v8/src/interpreter/bytecode-peephole-table.h
+++ b/deps/v8/src/interpreter/bytecode-peephole-table.h
@@ -11,16 +11,17 @@ namespace v8 {
namespace internal {
namespace interpreter {
-#define PEEPHOLE_NON_JUMP_ACTION_LIST(V) \
- V(DefaultAction) \
- V(UpdateLastAction) \
- V(UpdateLastIfSourceInfoPresentAction) \
- V(ElideCurrentAction) \
- V(ElideCurrentIfOperand0MatchesAction) \
- V(ElideLastAction) \
- V(ChangeBytecodeAction) \
- V(TransformLdaSmiBinaryOpToBinaryOpWithSmiAction) \
- V(TransformLdaZeroBinaryOpToBinaryOpWithZeroAction)
+#define PEEPHOLE_NON_JUMP_ACTION_LIST(V) \
+ V(DefaultAction) \
+ V(UpdateLastAction) \
+ V(UpdateLastIfSourceInfoPresentAction) \
+ V(ElideCurrentAction) \
+ V(ElideCurrentIfOperand0MatchesAction) \
+ V(ElideLastAction) \
+ V(ChangeBytecodeAction) \
+ V(TransformLdaSmiBinaryOpToBinaryOpWithSmiAction) \
+ V(TransformLdaZeroBinaryOpToBinaryOpWithZeroAction) \
+ V(TransformEqualityWithNullOrUndefinedAction)
#define PEEPHOLE_JUMP_ACTION_LIST(V) \
V(DefaultJumpAction) \
diff --git a/deps/v8/src/interpreter/bytecode-pipeline.h b/deps/v8/src/interpreter/bytecode-pipeline.h
index d508defea0..03d40f7344 100644
--- a/deps/v8/src/interpreter/bytecode-pipeline.h
+++ b/deps/v8/src/interpreter/bytecode-pipeline.h
@@ -191,6 +191,15 @@ class V8_EXPORT_PRIVATE BytecodeNode final : NON_EXPORTED_BASE(ZoneObject) {
SetOperand(3, operand3);
}
+#define DEFINE_BYTECODE_NODE_CREATOR(Name, ...) \
+ template <typename... Operands> \
+ INLINE(static BytecodeNode Name(BytecodeSourceInfo source_info, \
+ Operands... operands)) { \
+ return Create<Bytecode::k##Name, __VA_ARGS__>(source_info, operands...); \
+ }
+ BYTECODE_LIST(DEFINE_BYTECODE_NODE_CREATOR)
+#undef DEFINE_BYTECODE_NODE_CREATOR
+
// Replace the bytecode of this node with |bytecode| and keep the operands.
void replace_bytecode(Bytecode bytecode) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode_),
@@ -198,40 +207,7 @@ class V8_EXPORT_PRIVATE BytecodeNode final : NON_EXPORTED_BASE(ZoneObject) {
bytecode_ = bytecode;
}
- void set_bytecode(Bytecode bytecode) {
- DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
- bytecode_ = bytecode;
- operand_count_ = 0;
- operand_scale_ = OperandScale::kSingle;
- }
-
- void set_bytecode(Bytecode bytecode, uint32_t operand0) {
- DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 1);
- bytecode_ = bytecode;
- operand_count_ = 1;
- operand_scale_ = OperandScale::kSingle;
- SetOperand(0, operand0);
- }
-
- void set_bytecode(Bytecode bytecode, uint32_t operand0, uint32_t operand1) {
- DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 2);
- bytecode_ = bytecode;
- operand_count_ = 2;
- operand_scale_ = OperandScale::kSingle;
- SetOperand(0, operand0);
- SetOperand(1, operand1);
- }
-
- void set_bytecode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
- uint32_t operand2) {
- DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 3);
- bytecode_ = bytecode;
- operand_count_ = 3;
- operand_scale_ = OperandScale::kSingle;
- SetOperand(0, operand0);
- SetOperand(1, operand1);
- SetOperand(2, operand2);
- }
+ void update_operand0(uint32_t operand0) { SetOperand(0, operand0); }
// Print to stream |os|.
void Print(std::ostream& os) const;
@@ -277,6 +253,100 @@ class V8_EXPORT_PRIVATE BytecodeNode final : NON_EXPORTED_BASE(ZoneObject) {
bool operator!=(const BytecodeNode& other) const { return !(*this == other); }
private:
+ template <Bytecode bytecode, AccumulatorUse accumulator_use,
+ OperandType... operand_types>
+ friend class BytecodeNodeBuilder;
+
+ INLINE(BytecodeNode(Bytecode bytecode, int operand_count,
+ OperandScale operand_scale,
+ BytecodeSourceInfo source_info, uint32_t operand0 = 0,
+ uint32_t operand1 = 0, uint32_t operand2 = 0,
+ uint32_t operand3 = 0))
+ : bytecode_(bytecode),
+ operand_count_(operand_count),
+ operand_scale_(operand_scale),
+ source_info_(source_info) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count);
+ operands_[0] = operand0;
+ operands_[1] = operand1;
+ operands_[2] = operand2;
+ operands_[3] = operand3;
+ }
+
+ template <Bytecode bytecode, AccumulatorUse accum_use>
+ INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info)) {
+ return BytecodeNode(bytecode, 0, OperandScale::kSingle, source_info);
+ }
+
+ template <Bytecode bytecode, AccumulatorUse accum_use,
+ OperandType operand0_type>
+ INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info,
+ uint32_t operand0)) {
+ DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 0), operand0_type);
+ OperandScale scale = OperandScale::kSingle;
+ scale = std::max(scale, ScaleForOperand<operand0_type>(operand0));
+ return BytecodeNode(bytecode, 1, scale, source_info, operand0);
+ }
+
+ template <Bytecode bytecode, AccumulatorUse accum_use,
+ OperandType operand0_type, OperandType operand1_type>
+ INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info,
+ uint32_t operand0, uint32_t operand1)) {
+ DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 0), operand0_type);
+ DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 1), operand1_type);
+ OperandScale scale = OperandScale::kSingle;
+ scale = std::max(scale, ScaleForOperand<operand0_type>(operand0));
+ scale = std::max(scale, ScaleForOperand<operand1_type>(operand1));
+ return BytecodeNode(bytecode, 2, scale, source_info, operand0, operand1);
+ }
+
+ template <Bytecode bytecode, AccumulatorUse accum_use,
+ OperandType operand0_type, OperandType operand1_type,
+ OperandType operand2_type>
+ INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info,
+ uint32_t operand0, uint32_t operand1,
+ uint32_t operand2)) {
+ DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 0), operand0_type);
+ DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 1), operand1_type);
+ DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 2), operand2_type);
+ OperandScale scale = OperandScale::kSingle;
+ scale = std::max(scale, ScaleForOperand<operand0_type>(operand0));
+ scale = std::max(scale, ScaleForOperand<operand1_type>(operand1));
+ scale = std::max(scale, ScaleForOperand<operand2_type>(operand2));
+ return BytecodeNode(bytecode, 3, scale, source_info, operand0, operand1,
+ operand2);
+ }
+
+ template <Bytecode bytecode, AccumulatorUse accum_use,
+ OperandType operand0_type, OperandType operand1_type,
+ OperandType operand2_type, OperandType operand3_type>
+ INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info,
+ uint32_t operand0, uint32_t operand1,
+ uint32_t operand2, uint32_t operand3)) {
+ DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 0), operand0_type);
+ DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 1), operand1_type);
+ DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 2), operand2_type);
+ DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 3), operand3_type);
+ OperandScale scale = OperandScale::kSingle;
+ scale = std::max(scale, ScaleForOperand<operand0_type>(operand0));
+ scale = std::max(scale, ScaleForOperand<operand1_type>(operand1));
+ scale = std::max(scale, ScaleForOperand<operand2_type>(operand2));
+ scale = std::max(scale, ScaleForOperand<operand3_type>(operand3));
+ return BytecodeNode(bytecode, 4, scale, source_info, operand0, operand1,
+ operand2, operand3);
+ }
+
+ template <OperandType operand_type>
+ INLINE(static OperandScale ScaleForOperand(uint32_t operand)) {
+ if (BytecodeOperands::IsScalableUnsignedByte(operand_type)) {
+ return Bytecodes::ScaleForUnsignedOperand(operand);
+ } else if (BytecodeOperands::IsScalableSignedByte(operand_type)) {
+ return Bytecodes::ScaleForSignedOperand(operand);
+ } else {
+ return OperandScale::kSingle;
+ }
+ }
+
INLINE(void UpdateScaleForOperand(int operand_index, uint32_t operand)) {
if (Bytecodes::OperandIsScalableSignedByte(bytecode(), operand_index)) {
operand_scale_ =
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.cc b/deps/v8/src/interpreter/bytecode-register-optimizer.cc
index 563956e5c6..e1e38a6d16 100644
--- a/deps/v8/src/interpreter/bytecode-register-optimizer.cc
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.cc
@@ -265,16 +265,16 @@ void BytecodeRegisterOptimizer::OutputRegisterTransfer(
if (input == accumulator_) {
uint32_t operand = static_cast<uint32_t>(output.ToOperand());
- BytecodeNode node(Bytecode::kStar, operand, source_info);
+ BytecodeNode node = BytecodeNode::Star(source_info, operand);
next_stage_->Write(&node);
} else if (output == accumulator_) {
uint32_t operand = static_cast<uint32_t>(input.ToOperand());
- BytecodeNode node(Bytecode::kLdar, operand, source_info);
+ BytecodeNode node = BytecodeNode::Ldar(source_info, operand);
next_stage_->Write(&node);
} else {
uint32_t operand0 = static_cast<uint32_t>(input.ToOperand());
uint32_t operand1 = static_cast<uint32_t>(output.ToOperand());
- BytecodeNode node(Bytecode::kMov, operand0, operand1, source_info);
+ BytecodeNode node = BytecodeNode::Mov(source_info, operand0, operand1);
next_stage_->Write(&node);
}
if (output != accumulator_) {
@@ -365,7 +365,7 @@ void BytecodeRegisterOptimizer::RegisterTransfer(
void BytecodeRegisterOptimizer::EmitNopForSourceInfo(
BytecodeSourceInfo source_info) const {
DCHECK(source_info.is_valid());
- BytecodeNode nop(Bytecode::kNop, source_info);
+ BytecodeNode nop = BytecodeNode::Nop(source_info);
next_stage_->Write(&nop);
}
@@ -416,32 +416,6 @@ RegisterList BytecodeRegisterOptimizer::GetInputRegisterList(
}
}
-void BytecodeRegisterOptimizer::PrepareForBytecode(Bytecode bytecode) {
- if (Bytecodes::IsJump(bytecode) || bytecode == Bytecode::kDebugger ||
- bytecode == Bytecode::kSuspendGenerator) {
- // All state must be flushed before emitting
- // - a jump bytecode (as the register equivalents at the jump target aren't
- // known.
- // - a call to the debugger (as it can manipulate locals and parameters),
- // - a generator suspend (as this involves saving all registers).
- Flush();
- }
-
- // Materialize the accumulator if it is read by the bytecode. The
- // accumulator is special and no other register can be materialized
- // in it's place.
- if (Bytecodes::ReadsAccumulator(bytecode) &&
- !accumulator_info_->materialized()) {
- Materialize(accumulator_info_);
- }
-
- // Materialize an equivalent to the accumulator if it will be
- // clobbered when the bytecode is dispatched.
- if (Bytecodes::WritesAccumulator(bytecode)) {
- PrepareOutputRegister(accumulator_);
- }
-}
-
void BytecodeRegisterOptimizer::GrowRegisterMap(Register reg) {
DCHECK(RegisterIsTemporary(reg));
size_t index = GetRegisterInfoTableIndex(reg);
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.h b/deps/v8/src/interpreter/bytecode-register-optimizer.h
index e2a02cf594..80c2f2587f 100644
--- a/deps/v8/src/interpreter/bytecode-register-optimizer.h
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.h
@@ -46,7 +46,32 @@ class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final
void Flush();
// Prepares for |bytecode|.
- void PrepareForBytecode(Bytecode bytecode);
+ template <Bytecode bytecode, AccumulatorUse accumulator_use>
+ INLINE(void PrepareForBytecode()) {
+ if (Bytecodes::IsJump(bytecode) || bytecode == Bytecode::kDebugger ||
+ bytecode == Bytecode::kSuspendGenerator) {
+ // All state must be flushed before emitting
+ // - a jump bytecode (as the register equivalents at the jump target
+ // aren't
+ // known.
+ // - a call to the debugger (as it can manipulate locals and parameters),
+ // - a generator suspend (as this involves saving all registers).
+ Flush();
+ }
+
+ // Materialize the accumulator if it is read by the bytecode. The
+ // accumulator is special and no other register can be materialized
+ // in it's place.
+ if (BytecodeOperands::ReadsAccumulator(accumulator_use)) {
+ Materialize(accumulator_info_);
+ }
+
+ // Materialize an equivalent to the accumulator if it will be
+ // clobbered when the bytecode is dispatched.
+ if (BytecodeOperands::WritesAccumulator(accumulator_use)) {
+ PrepareOutputRegister(accumulator_);
+ }
+ }
// Prepares |reg| for being used as an output operand.
void PrepareOutputRegister(Register reg);
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index 23d77f0c33..f09af85be4 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -38,8 +38,9 @@ namespace interpreter {
V(LdaConstant, AccumulatorUse::kWrite, OperandType::kIdx) \
\
/* Globals */ \
- V(LdaGlobal, AccumulatorUse::kWrite, OperandType::kIdx) \
- V(LdaGlobalInsideTypeof, AccumulatorUse::kWrite, OperandType::kIdx) \
+ V(LdaGlobal, AccumulatorUse::kWrite, OperandType::kIdx, OperandType::kIdx) \
+ V(LdaGlobalInsideTypeof, AccumulatorUse::kWrite, OperandType::kIdx, \
+ OperandType::kIdx) \
V(StaGlobalSloppy, AccumulatorUse::kRead, OperandType::kIdx, \
OperandType::kIdx) \
V(StaGlobalStrict, AccumulatorUse::kRead, OperandType::kIdx, \
@@ -97,6 +98,8 @@ namespace interpreter {
OperandType::kReg, OperandType::kIdx) \
V(StaKeyedPropertyStrict, AccumulatorUse::kRead, OperandType::kReg, \
OperandType::kReg, OperandType::kIdx) \
+ V(StaDataPropertyInLiteral, AccumulatorUse::kRead, OperandType::kReg, \
+ OperandType::kReg, OperandType::kFlag8, OperandType::kIdx) \
\
/* Binary Operators */ \
V(Add, AccumulatorUse::kReadWrite, OperandType::kReg, OperandType::kIdx) \
@@ -140,6 +143,9 @@ namespace interpreter {
V(DeletePropertyStrict, AccumulatorUse::kReadWrite, OperandType::kReg) \
V(DeletePropertySloppy, AccumulatorUse::kReadWrite, OperandType::kReg) \
\
+ /* GetSuperConstructor operator */ \
+ V(GetSuperConstructor, AccumulatorUse::kRead, OperandType::kRegOut) \
+ \
/* Call operations */ \
V(Call, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kRegList, \
OperandType::kRegCount, OperandType::kIdx) \
@@ -158,9 +164,11 @@ namespace interpreter {
V(InvokeIntrinsic, AccumulatorUse::kWrite, OperandType::kIntrinsicId, \
OperandType::kRegList, OperandType::kRegCount) \
\
- /* New operator */ \
+ /* New operators */ \
V(New, AccumulatorUse::kReadWrite, OperandType::kReg, OperandType::kRegList, \
OperandType::kRegCount, OperandType::kIdx) \
+ V(NewWithSpread, AccumulatorUse::kWrite, OperandType::kRegList, \
+ OperandType::kRegCount) \
\
/* Test Operators */ \
V(TestEqual, AccumulatorUse::kReadWrite, OperandType::kReg, \
@@ -180,6 +188,11 @@ namespace interpreter {
V(TestInstanceOf, AccumulatorUse::kReadWrite, OperandType::kReg) \
V(TestIn, AccumulatorUse::kReadWrite, OperandType::kReg) \
\
+ /* TestEqual with Null or Undefined */ \
+ V(TestUndetectable, AccumulatorUse::kWrite, OperandType::kReg) \
+ V(TestNull, AccumulatorUse::kWrite, OperandType::kReg) \
+ V(TestUndefined, AccumulatorUse::kWrite, OperandType::kReg) \
+ \
/* Cast operators */ \
V(ToName, AccumulatorUse::kRead, OperandType::kRegOut) \
V(ToNumber, AccumulatorUse::kRead, OperandType::kRegOut) \
@@ -195,13 +208,14 @@ namespace interpreter {
\
/* Closure allocation */ \
V(CreateClosure, AccumulatorUse::kWrite, OperandType::kIdx, \
- OperandType::kFlag8) \
+ OperandType::kIdx, OperandType::kFlag8) \
\
/* Context allocation */ \
V(CreateBlockContext, AccumulatorUse::kReadWrite, OperandType::kIdx) \
V(CreateCatchContext, AccumulatorUse::kReadWrite, OperandType::kReg, \
OperandType::kIdx, OperandType::kIdx) \
V(CreateFunctionContext, AccumulatorUse::kWrite, OperandType::kUImm) \
+ V(CreateEvalContext, AccumulatorUse::kWrite, OperandType::kUImm) \
V(CreateWithContext, AccumulatorUse::kReadWrite, OperandType::kReg, \
OperandType::kIdx) \
\
@@ -210,24 +224,35 @@ namespace interpreter {
V(CreateUnmappedArguments, AccumulatorUse::kWrite) \
V(CreateRestParameter, AccumulatorUse::kWrite) \
\
- /* Control Flow */ \
+ /* Control Flow -- carefully ordered for efficient checks */ \
+ /* - [Unconditional jumps] */ \
+ V(JumpLoop, AccumulatorUse::kNone, OperandType::kImm, OperandType::kImm) \
+ /* - [Forward jumps] */ \
V(Jump, AccumulatorUse::kNone, OperandType::kImm) \
+ /* - [Start constant jumps] */ \
V(JumpConstant, AccumulatorUse::kNone, OperandType::kIdx) \
- V(JumpIfTrue, AccumulatorUse::kRead, OperandType::kImm) \
+ /* - [Conditional jumps] */ \
+ /* - [Conditional constant jumps] */ \
+ V(JumpIfNullConstant, AccumulatorUse::kRead, OperandType::kIdx) \
+ V(JumpIfUndefinedConstant, AccumulatorUse::kRead, OperandType::kIdx) \
V(JumpIfTrueConstant, AccumulatorUse::kRead, OperandType::kIdx) \
- V(JumpIfFalse, AccumulatorUse::kRead, OperandType::kImm) \
V(JumpIfFalseConstant, AccumulatorUse::kRead, OperandType::kIdx) \
- V(JumpIfToBooleanTrue, AccumulatorUse::kRead, OperandType::kImm) \
+ V(JumpIfJSReceiverConstant, AccumulatorUse::kRead, OperandType::kIdx) \
+ V(JumpIfNotHoleConstant, AccumulatorUse::kRead, OperandType::kIdx) \
+ /* - [Start ToBoolean jumps] */ \
V(JumpIfToBooleanTrueConstant, AccumulatorUse::kRead, OperandType::kIdx) \
- V(JumpIfToBooleanFalse, AccumulatorUse::kRead, OperandType::kImm) \
V(JumpIfToBooleanFalseConstant, AccumulatorUse::kRead, OperandType::kIdx) \
+ /* - [End constant jumps] */ \
+ /* - [Conditional immediate jumps] */ \
+ V(JumpIfToBooleanTrue, AccumulatorUse::kRead, OperandType::kImm) \
+ V(JumpIfToBooleanFalse, AccumulatorUse::kRead, OperandType::kImm) \
+ /* - [End ToBoolean jumps] */ \
+ V(JumpIfTrue, AccumulatorUse::kRead, OperandType::kImm) \
+ V(JumpIfFalse, AccumulatorUse::kRead, OperandType::kImm) \
V(JumpIfNull, AccumulatorUse::kRead, OperandType::kImm) \
- V(JumpIfNullConstant, AccumulatorUse::kRead, OperandType::kIdx) \
V(JumpIfUndefined, AccumulatorUse::kRead, OperandType::kImm) \
- V(JumpIfUndefinedConstant, AccumulatorUse::kRead, OperandType::kIdx) \
+ V(JumpIfJSReceiver, AccumulatorUse::kRead, OperandType::kImm) \
V(JumpIfNotHole, AccumulatorUse::kRead, OperandType::kImm) \
- V(JumpIfNotHoleConstant, AccumulatorUse::kRead, OperandType::kIdx) \
- V(JumpLoop, AccumulatorUse::kNone, OperandType::kImm, OperandType::kImm) \
\
/* Complex flow control For..in */ \
V(ForInPrepare, AccumulatorUse::kNone, OperandType::kReg, \
@@ -241,6 +266,9 @@ namespace interpreter {
/* Perform a stack guard check */ \
V(StackCheck, AccumulatorUse::kNone) \
\
+ /* Update the pending message */ \
+ V(SetPendingMessage, AccumulatorUse::kReadWrite) \
+ \
/* Non-local flow control */ \
V(Throw, AccumulatorUse::kRead) \
V(ReThrow, AccumulatorUse::kRead) \
@@ -294,6 +322,69 @@ namespace interpreter {
DEBUG_BREAK_PLAIN_BYTECODE_LIST(V) \
DEBUG_BREAK_PREFIX_BYTECODE_LIST(V)
+// Lists of jump bytecodes.
+
+#define JUMP_UNCONDITIONAL_IMMEDIATE_BYTECODE_LIST(V) \
+ V(JumpLoop) \
+ V(Jump)
+
+#define JUMP_UNCONDITIONAL_CONSTANT_BYTECODE_LIST(V) V(JumpConstant)
+
+#define JUMP_TOBOOLEAN_CONDITIONAL_IMMEDIATE_BYTECODE_LIST(V) \
+ V(JumpIfToBooleanTrue) \
+ V(JumpIfToBooleanFalse)
+
+#define JUMP_TOBOOLEAN_CONDITIONAL_CONSTANT_BYTECODE_LIST(V) \
+ V(JumpIfToBooleanTrueConstant) \
+ V(JumpIfToBooleanFalseConstant)
+
+#define JUMP_CONDITIONAL_IMMEDIATE_BYTECODE_LIST(V) \
+ JUMP_TOBOOLEAN_CONDITIONAL_IMMEDIATE_BYTECODE_LIST(V) \
+ V(JumpIfTrue) \
+ V(JumpIfFalse) \
+ V(JumpIfNull) \
+ V(JumpIfUndefined) \
+ V(JumpIfJSReceiver) \
+ V(JumpIfNotHole)
+
+#define JUMP_CONDITIONAL_CONSTANT_BYTECODE_LIST(V) \
+ JUMP_TOBOOLEAN_CONDITIONAL_CONSTANT_BYTECODE_LIST(V) \
+ V(JumpIfNullConstant) \
+ V(JumpIfUndefinedConstant) \
+ V(JumpIfTrueConstant) \
+ V(JumpIfFalseConstant) \
+ V(JumpIfJSReceiverConstant) \
+ V(JumpIfNotHoleConstant)
+
+#define JUMP_CONSTANT_BYTECODE_LIST(V) \
+ JUMP_UNCONDITIONAL_CONSTANT_BYTECODE_LIST(V) \
+ JUMP_CONDITIONAL_CONSTANT_BYTECODE_LIST(V)
+
+#define JUMP_IMMEDIATE_BYTECODE_LIST(V) \
+ JUMP_UNCONDITIONAL_IMMEDIATE_BYTECODE_LIST(V) \
+ JUMP_CONDITIONAL_IMMEDIATE_BYTECODE_LIST(V)
+
+#define JUMP_TO_BOOLEAN_BYTECODE_LIST(V) \
+ JUMP_TOBOOLEAN_CONDITIONAL_IMMEDIATE_BYTECODE_LIST(V) \
+ JUMP_TOBOOLEAN_CONDITIONAL_CONSTANT_BYTECODE_LIST(V)
+
+#define JUMP_UNCONDITIONAL_BYTECODE_LIST(V) \
+ JUMP_UNCONDITIONAL_IMMEDIATE_BYTECODE_LIST(V) \
+ JUMP_UNCONDITIONAL_CONSTANT_BYTECODE_LIST(V)
+
+#define JUMP_CONDITIONAL_BYTECODE_LIST(V) \
+ JUMP_CONDITIONAL_IMMEDIATE_BYTECODE_LIST(V) \
+ JUMP_CONDITIONAL_CONSTANT_BYTECODE_LIST(V)
+
+#define JUMP_FORWARD_BYTECODE_LIST(V) \
+ V(Jump) \
+ V(JumpConstant) \
+ JUMP_CONDITIONAL_BYTECODE_LIST(V)
+
+#define JUMP_BYTECODE_LIST(V) \
+ JUMP_FORWARD_BYTECODE_LIST(V) \
+ V(JumpLoop)
+
// Enumeration of interpreter bytecodes.
enum class Bytecode : uint8_t {
#define DECLARE_BYTECODE(Name, ...) k##Name,
@@ -306,14 +397,6 @@ enum class Bytecode : uint8_t {
#undef COUNT_BYTECODE
};
-// TODO(rmcilroy): Remove once we switch to MSVC 2015 which supports constexpr.
-// See crbug.com/603131.
-#if V8_CC_MSVC
-#define CONSTEXPR const
-#else
-#define CONSTEXPR constexpr
-#endif
-
class V8_EXPORT_PRIVATE Bytecodes final {
public:
// The maximum number of operands a bytecode may have.
@@ -381,14 +464,12 @@ class V8_EXPORT_PRIVATE Bytecodes final {
// Returns true if |bytecode| reads the accumulator.
static bool ReadsAccumulator(Bytecode bytecode) {
- return (GetAccumulatorUse(bytecode) & AccumulatorUse::kRead) ==
- AccumulatorUse::kRead;
+ return BytecodeOperands::ReadsAccumulator(GetAccumulatorUse(bytecode));
}
// Returns true if |bytecode| writes the accumulator.
static bool WritesAccumulator(Bytecode bytecode) {
- return (GetAccumulatorUse(bytecode) & AccumulatorUse::kWrite) ==
- AccumulatorUse::kWrite;
+ return BytecodeOperands::WritesAccumulator(GetAccumulatorUse(bytecode));
}
// Return true if |bytecode| writes the accumulator with a boolean value.
@@ -407,7 +488,10 @@ class V8_EXPORT_PRIVATE Bytecodes final {
case Bytecode::kTestGreaterThanOrEqual:
case Bytecode::kTestInstanceOf:
case Bytecode::kTestIn:
+ case Bytecode::kTestUndetectable:
case Bytecode::kForInContinue:
+ case Bytecode::kTestUndefined:
+ case Bytecode::kTestNull:
return true;
default:
return false;
@@ -416,7 +500,7 @@ class V8_EXPORT_PRIVATE Bytecodes final {
// Return true if |bytecode| is an accumulator load without effects,
// e.g. LdaConstant, LdaTrue, Ldar.
- static CONSTEXPR bool IsAccumulatorLoadWithoutEffects(Bytecode bytecode) {
+ static constexpr bool IsAccumulatorLoadWithoutEffects(Bytecode bytecode) {
return bytecode == Bytecode::kLdar || bytecode == Bytecode::kLdaZero ||
bytecode == Bytecode::kLdaSmi || bytecode == Bytecode::kLdaNull ||
bytecode == Bytecode::kLdaTrue || bytecode == Bytecode::kLdaFalse ||
@@ -429,123 +513,124 @@ class V8_EXPORT_PRIVATE Bytecodes final {
// Return true if |bytecode| is a register load without effects,
// e.g. Mov, Star.
- static CONSTEXPR bool IsRegisterLoadWithoutEffects(Bytecode bytecode) {
+ static constexpr bool IsRegisterLoadWithoutEffects(Bytecode bytecode) {
return bytecode == Bytecode::kMov || bytecode == Bytecode::kPopContext ||
bytecode == Bytecode::kPushContext || bytecode == Bytecode::kStar;
}
// Returns true if the bytecode is a conditional jump taking
// an immediate byte operand (OperandType::kImm).
- static CONSTEXPR bool IsConditionalJumpImmediate(Bytecode bytecode) {
- return bytecode == Bytecode::kJumpIfTrue ||
- bytecode == Bytecode::kJumpIfFalse ||
- bytecode == Bytecode::kJumpIfToBooleanTrue ||
- bytecode == Bytecode::kJumpIfToBooleanFalse ||
- bytecode == Bytecode::kJumpIfNotHole ||
- bytecode == Bytecode::kJumpIfNull ||
- bytecode == Bytecode::kJumpIfUndefined;
+ static constexpr bool IsConditionalJumpImmediate(Bytecode bytecode) {
+ return bytecode >= Bytecode::kJumpIfToBooleanTrue &&
+ bytecode <= Bytecode::kJumpIfNotHole;
}
// Returns true if the bytecode is a conditional jump taking
// a constant pool entry (OperandType::kIdx).
- static CONSTEXPR bool IsConditionalJumpConstant(Bytecode bytecode) {
- return bytecode == Bytecode::kJumpIfTrueConstant ||
- bytecode == Bytecode::kJumpIfFalseConstant ||
- bytecode == Bytecode::kJumpIfToBooleanTrueConstant ||
- bytecode == Bytecode::kJumpIfToBooleanFalseConstant ||
- bytecode == Bytecode::kJumpIfNotHoleConstant ||
- bytecode == Bytecode::kJumpIfNullConstant ||
- bytecode == Bytecode::kJumpIfUndefinedConstant;
+ static constexpr bool IsConditionalJumpConstant(Bytecode bytecode) {
+ return bytecode >= Bytecode::kJumpIfNullConstant &&
+ bytecode <= Bytecode::kJumpIfToBooleanFalseConstant;
}
// Returns true if the bytecode is a conditional jump taking
// any kind of operand.
- static CONSTEXPR bool IsConditionalJump(Bytecode bytecode) {
- return IsConditionalJumpImmediate(bytecode) ||
- IsConditionalJumpConstant(bytecode);
+ static constexpr bool IsConditionalJump(Bytecode bytecode) {
+ return bytecode >= Bytecode::kJumpIfNullConstant &&
+ bytecode <= Bytecode::kJumpIfNotHole;
+ }
+
+ // Returns true if the bytecode is an unconditional jump.
+ static constexpr bool IsUnconditionalJump(Bytecode bytecode) {
+ return bytecode >= Bytecode::kJumpLoop &&
+ bytecode <= Bytecode::kJumpConstant;
}
// Returns true if the bytecode is a jump or a conditional jump taking
// an immediate byte operand (OperandType::kImm).
- static CONSTEXPR bool IsJumpImmediate(Bytecode bytecode) {
+ static constexpr bool IsJumpImmediate(Bytecode bytecode) {
return bytecode == Bytecode::kJump || bytecode == Bytecode::kJumpLoop ||
IsConditionalJumpImmediate(bytecode);
}
// Returns true if the bytecode is a jump or conditional jump taking a
// constant pool entry (OperandType::kIdx).
- static CONSTEXPR bool IsJumpConstant(Bytecode bytecode) {
- return bytecode == Bytecode::kJumpConstant ||
- IsConditionalJumpConstant(bytecode);
+ static constexpr bool IsJumpConstant(Bytecode bytecode) {
+ return bytecode >= Bytecode::kJumpConstant &&
+ bytecode <= Bytecode::kJumpIfToBooleanFalseConstant;
}
// Returns true if the bytecode is a jump that internally coerces the
// accumulator to a boolean.
- static CONSTEXPR bool IsJumpIfToBoolean(Bytecode bytecode) {
- return bytecode == Bytecode::kJumpIfToBooleanTrue ||
- bytecode == Bytecode::kJumpIfToBooleanFalse ||
- bytecode == Bytecode::kJumpIfToBooleanTrueConstant ||
- bytecode == Bytecode::kJumpIfToBooleanFalseConstant;
+ static constexpr bool IsJumpIfToBoolean(Bytecode bytecode) {
+ return bytecode >= Bytecode::kJumpIfToBooleanTrueConstant &&
+ bytecode <= Bytecode::kJumpIfToBooleanFalse;
}
// Returns true if the bytecode is a jump or conditional jump taking
// any kind of operand.
- static CONSTEXPR bool IsJump(Bytecode bytecode) {
- return IsJumpImmediate(bytecode) || IsJumpConstant(bytecode);
+ static constexpr bool IsJump(Bytecode bytecode) {
+ return bytecode >= Bytecode::kJumpLoop &&
+ bytecode <= Bytecode::kJumpIfNotHole;
+ }
+
+ // Returns true if the bytecode is a forward jump or conditional jump taking
+ // any kind of operand.
+ static constexpr bool IsForwardJump(Bytecode bytecode) {
+ return bytecode >= Bytecode::kJump && bytecode <= Bytecode::kJumpIfNotHole;
}
// Returns true if the bytecode is a conditional jump, a jump, or a return.
- static CONSTEXPR bool IsJumpOrReturn(Bytecode bytecode) {
+ static constexpr bool IsJumpOrReturn(Bytecode bytecode) {
return bytecode == Bytecode::kReturn || IsJump(bytecode);
}
// Return true if |bytecode| is a jump without effects,
// e.g. any jump excluding those that include type coercion like
// JumpIfTrueToBoolean.
- static CONSTEXPR bool IsJumpWithoutEffects(Bytecode bytecode) {
+ static constexpr bool IsJumpWithoutEffects(Bytecode bytecode) {
return IsJump(bytecode) && !IsJumpIfToBoolean(bytecode);
}
// Returns true if |bytecode| has no effects. These bytecodes only manipulate
// interpreter frame state and will never throw.
- static CONSTEXPR bool IsWithoutExternalSideEffects(Bytecode bytecode) {
+ static constexpr bool IsWithoutExternalSideEffects(Bytecode bytecode) {
return (IsAccumulatorLoadWithoutEffects(bytecode) ||
IsRegisterLoadWithoutEffects(bytecode) ||
bytecode == Bytecode::kNop || IsJumpWithoutEffects(bytecode));
}
// Returns true if the bytecode is Ldar or Star.
- static CONSTEXPR bool IsLdarOrStar(Bytecode bytecode) {
+ static constexpr bool IsLdarOrStar(Bytecode bytecode) {
return bytecode == Bytecode::kLdar || bytecode == Bytecode::kStar;
}
// Returns true if |bytecode| puts a name in the accumulator.
- static CONSTEXPR bool PutsNameInAccumulator(Bytecode bytecode) {
+ static constexpr bool PutsNameInAccumulator(Bytecode bytecode) {
return bytecode == Bytecode::kTypeOf;
}
// Returns true if the bytecode is a call or a constructor call.
- static CONSTEXPR bool IsCallOrNew(Bytecode bytecode) {
+ static constexpr bool IsCallOrNew(Bytecode bytecode) {
return bytecode == Bytecode::kCall || bytecode == Bytecode::kCallProperty ||
bytecode == Bytecode::kTailCall || bytecode == Bytecode::kNew;
}
// Returns true if the bytecode is a call to the runtime.
- static CONSTEXPR bool IsCallRuntime(Bytecode bytecode) {
+ static constexpr bool IsCallRuntime(Bytecode bytecode) {
return bytecode == Bytecode::kCallRuntime ||
bytecode == Bytecode::kCallRuntimeForPair ||
bytecode == Bytecode::kInvokeIntrinsic;
}
// Returns true if the bytecode is a scaling prefix bytecode.
- static CONSTEXPR bool IsPrefixScalingBytecode(Bytecode bytecode) {
+ static constexpr bool IsPrefixScalingBytecode(Bytecode bytecode) {
return bytecode == Bytecode::kExtraWide || bytecode == Bytecode::kWide ||
bytecode == Bytecode::kDebugBreakExtraWide ||
bytecode == Bytecode::kDebugBreakWide;
}
// Returns the number of values which |bytecode| returns.
- static CONSTEXPR size_t ReturnCount(Bytecode bytecode) {
+ static constexpr size_t ReturnCount(Bytecode bytecode) {
return bytecode == Bytecode::kReturn ? 1 : 0;
}
@@ -730,10 +815,6 @@ class V8_EXPORT_PRIVATE Bytecodes final {
static const OperandSize* const kOperandSizes[][3];
};
-// TODO(rmcilroy): Remove once we switch to MSVC 2015 which supports constexpr.
-// See crbug.com/603131.
-#undef CONSTEXPR
-
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
const Bytecode& bytecode);
diff --git a/deps/v8/src/interpreter/constant-array-builder.cc b/deps/v8/src/interpreter/constant-array-builder.cc
index d2b7995623..6fd141e911 100644
--- a/deps/v8/src/interpreter/constant-array-builder.cc
+++ b/deps/v8/src/interpreter/constant-array-builder.cc
@@ -56,14 +56,20 @@ void ConstantArrayBuilder::ConstantArraySlice::InsertAt(size_t index,
constants_[index - start_index()] = object;
}
-bool ConstantArrayBuilder::ConstantArraySlice::AllElementsAreUnique() const {
+#if DEBUG
+void ConstantArrayBuilder::ConstantArraySlice::CheckAllElementsAreUnique()
+ const {
std::set<Object*> elements;
for (auto constant : constants_) {
- if (elements.find(*constant) != elements.end()) return false;
+ if (elements.find(*constant) != elements.end()) {
+ std::ostringstream os;
+ os << "Duplicate constant found: " << Brief(*constant);
+ FATAL(os.str().c_str());
+ }
elements.insert(*constant);
}
- return true;
}
+#endif
STATIC_CONST_MEMBER_DEFINITION const size_t ConstantArrayBuilder::k8BitCapacity;
STATIC_CONST_MEMBER_DEFINITION const size_t
@@ -126,32 +132,30 @@ Handle<FixedArray> ConstantArrayBuilder::ToFixedArray(Isolate* isolate) {
handle(reserved_smi.first, isolate));
}
- Handle<FixedArray> fixed_array = isolate->factory()->NewFixedArray(
+ Handle<FixedArray> fixed_array = isolate->factory()->NewFixedArrayWithHoles(
static_cast<int>(size()), PretenureFlag::TENURED);
int array_index = 0;
for (const ConstantArraySlice* slice : idx_slice_) {
- if (array_index == fixed_array->length()) {
- break;
- }
DCHECK(array_index == 0 ||
base::bits::IsPowerOfTwo32(static_cast<uint32_t>(array_index)));
+#if DEBUG
// Different slices might contain the same element due to reservations, but
// all elements within a slice should be unique. If this DCHECK fails, then
// the AST nodes are not being internalized within a CanonicalHandleScope.
- DCHECK(slice->AllElementsAreUnique());
+ slice->CheckAllElementsAreUnique();
+#endif
// Copy objects from slice into array.
for (size_t i = 0; i < slice->size(); ++i) {
fixed_array->set(array_index++, *slice->At(slice->start_index() + i));
}
- // Insert holes where reservations led to unused slots.
- size_t padding =
- std::min(static_cast<size_t>(fixed_array->length() - array_index),
- slice->capacity() - slice->size());
- for (size_t i = 0; i < padding; i++) {
- fixed_array->set(array_index++, *the_hole_value());
+ // Leave holes where reservations led to unused slots.
+ size_t padding = slice->capacity() - slice->size();
+ if (static_cast<size_t>(fixed_array->length() - array_index) <= padding) {
+ break;
}
+ array_index += padding;
}
- DCHECK_EQ(array_index, fixed_array->length());
+ DCHECK_GE(array_index, fixed_array->length());
return fixed_array;
}
diff --git a/deps/v8/src/interpreter/constant-array-builder.h b/deps/v8/src/interpreter/constant-array-builder.h
index 8e95913e57..c99c8e7c59 100644
--- a/deps/v8/src/interpreter/constant-array-builder.h
+++ b/deps/v8/src/interpreter/constant-array-builder.h
@@ -82,7 +82,10 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final BASE_EMBEDDED {
size_t Allocate(Handle<Object> object);
Handle<Object> At(size_t index) const;
void InsertAt(size_t index, Handle<Object> object);
- bool AllElementsAreUnique() const;
+
+#if DEBUG
+ void CheckAllElementsAreUnique() const;
+#endif
inline size_t available() const { return capacity() - reserved() - size(); }
inline size_t reserved() const { return reserved_; }
diff --git a/deps/v8/src/interpreter/control-flow-builders.cc b/deps/v8/src/interpreter/control-flow-builders.cc
index 0e71b96cce..41d1ad82d9 100644
--- a/deps/v8/src/interpreter/control-flow-builders.cc
+++ b/deps/v8/src/interpreter/control-flow-builders.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/interpreter/control-flow-builders.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -55,8 +56,10 @@ void LoopBuilder::LoopHeader(ZoneVector<BytecodeLabel>* additional_labels) {
// and misplaced between the headers.
DCHECK(break_labels_.empty() && continue_labels_.empty());
builder()->Bind(&loop_header_);
- for (auto& label : *additional_labels) {
- builder()->Bind(&label);
+ if (additional_labels != nullptr) {
+ for (auto& label : *additional_labels) {
+ builder()->Bind(&label);
+ }
}
}
diff --git a/deps/v8/src/interpreter/control-flow-builders.h b/deps/v8/src/interpreter/control-flow-builders.h
index 3174db5da1..68c28c70d1 100644
--- a/deps/v8/src/interpreter/control-flow-builders.h
+++ b/deps/v8/src/interpreter/control-flow-builders.h
@@ -14,7 +14,7 @@ namespace v8 {
namespace internal {
namespace interpreter {
-class ControlFlowBuilder BASE_EMBEDDED {
+class V8_EXPORT_PRIVATE ControlFlowBuilder BASE_EMBEDDED {
public:
explicit ControlFlowBuilder(BytecodeArrayBuilder* builder)
: builder_(builder) {}
@@ -29,7 +29,8 @@ class ControlFlowBuilder BASE_EMBEDDED {
DISALLOW_COPY_AND_ASSIGN(ControlFlowBuilder);
};
-class BreakableControlFlowBuilder : public ControlFlowBuilder {
+class V8_EXPORT_PRIVATE BreakableControlFlowBuilder
+ : public ControlFlowBuilder {
public:
explicit BreakableControlFlowBuilder(BytecodeArrayBuilder* builder)
: ControlFlowBuilder(builder), break_labels_(builder->zone()) {}
@@ -63,7 +64,8 @@ class BreakableControlFlowBuilder : public ControlFlowBuilder {
// Class to track control flow for block statements (which can break in JS).
-class BlockBuilder final : public BreakableControlFlowBuilder {
+class V8_EXPORT_PRIVATE BlockBuilder final
+ : public BreakableControlFlowBuilder {
public:
explicit BlockBuilder(BytecodeArrayBuilder* builder)
: BreakableControlFlowBuilder(builder) {}
@@ -77,7 +79,7 @@ class BlockBuilder final : public BreakableControlFlowBuilder {
// A class to help with co-ordinating break and continue statements with
// their loop.
-class LoopBuilder final : public BreakableControlFlowBuilder {
+class V8_EXPORT_PRIVATE LoopBuilder final : public BreakableControlFlowBuilder {
public:
explicit LoopBuilder(BytecodeArrayBuilder* builder)
: BreakableControlFlowBuilder(builder),
@@ -85,7 +87,7 @@ class LoopBuilder final : public BreakableControlFlowBuilder {
header_labels_(builder->zone()) {}
~LoopBuilder();
- void LoopHeader(ZoneVector<BytecodeLabel>* additional_labels);
+ void LoopHeader(ZoneVector<BytecodeLabel>* additional_labels = nullptr);
void JumpToHeader(int loop_depth);
void BindContinueTarget();
void EndLoop();
@@ -109,7 +111,8 @@ class LoopBuilder final : public BreakableControlFlowBuilder {
// A class to help with co-ordinating break statements with their switch.
-class SwitchBuilder final : public BreakableControlFlowBuilder {
+class V8_EXPORT_PRIVATE SwitchBuilder final
+ : public BreakableControlFlowBuilder {
public:
explicit SwitchBuilder(BytecodeArrayBuilder* builder, int number_of_cases)
: BreakableControlFlowBuilder(builder),
@@ -139,7 +142,7 @@ class SwitchBuilder final : public BreakableControlFlowBuilder {
// A class to help with co-ordinating control flow in try-catch statements.
-class TryCatchBuilder final : public ControlFlowBuilder {
+class V8_EXPORT_PRIVATE TryCatchBuilder final : public ControlFlowBuilder {
public:
explicit TryCatchBuilder(BytecodeArrayBuilder* builder,
HandlerTable::CatchPrediction catch_prediction)
@@ -160,7 +163,7 @@ class TryCatchBuilder final : public ControlFlowBuilder {
// A class to help with co-ordinating control flow in try-finally statements.
-class TryFinallyBuilder final : public ControlFlowBuilder {
+class V8_EXPORT_PRIVATE TryFinallyBuilder final : public ControlFlowBuilder {
public:
explicit TryFinallyBuilder(BytecodeArrayBuilder* builder,
HandlerTable::CatchPrediction catch_prediction)
diff --git a/deps/v8/src/interpreter/handler-table-builder.h b/deps/v8/src/interpreter/handler-table-builder.h
index 25147ca26b..50061949dc 100644
--- a/deps/v8/src/interpreter/handler-table-builder.h
+++ b/deps/v8/src/interpreter/handler-table-builder.h
@@ -19,7 +19,7 @@ class Isolate;
namespace interpreter {
// A helper class for constructing exception handler tables for the interpreter.
-class HandlerTableBuilder final BASE_EMBEDDED {
+class V8_EXPORT_PRIVATE HandlerTableBuilder final BASE_EMBEDDED {
public:
explicit HandlerTableBuilder(Zone* zone);
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index c8ce5539e9..1ccd342f06 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -20,15 +20,13 @@ namespace v8 {
namespace internal {
namespace interpreter {
+using compiler::CodeAssemblerState;
using compiler::Node;
-InterpreterAssembler::InterpreterAssembler(Isolate* isolate, Zone* zone,
+InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
Bytecode bytecode,
OperandScale operand_scale)
- : CodeStubAssembler(isolate, zone, InterpreterDispatchDescriptor(isolate),
- Code::ComputeFlags(Code::BYTECODE_HANDLER),
- Bytecodes::ToString(bytecode),
- Bytecodes::ReturnCount(bytecode)),
+ : CodeStubAssembler(state),
bytecode_(bytecode),
operand_scale_(operand_scale),
bytecode_offset_(this, MachineType::PointerRepresentation()),
@@ -44,6 +42,8 @@ InterpreterAssembler::InterpreterAssembler(Isolate* isolate, Zone* zone,
if (FLAG_trace_ignition) {
TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
}
+ RegisterCallGenerationCallbacks([this] { CallPrologue(); },
+ [this] { CallEpilogue(); });
}
InterpreterAssembler::~InterpreterAssembler() {
@@ -51,6 +51,7 @@ InterpreterAssembler::~InterpreterAssembler() {
// accumulator in the way described in the bytecode definitions in
// bytecodes.h.
DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
+ UnregisterCallGenerationCallbacks();
}
Node* InterpreterAssembler::GetInterpretedFramePointer() {
@@ -222,14 +223,8 @@ Node* InterpreterAssembler::BytecodeOperandSignedByte(int operand_index) {
DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
bytecode_, operand_index, operand_scale()));
Node* operand_offset = OperandOffset(operand_index);
- Node* load = Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), operand_offset));
-
- // Ensure that we sign extend to full pointer size
- if (kPointerSize == 8) {
- load = ChangeInt32ToInt64(load);
- }
- return load;
+ return Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(), operand_offset));
}
compiler::Node* InterpreterAssembler::BytecodeOperandReadUnaligned(
@@ -305,19 +300,12 @@ Node* InterpreterAssembler::BytecodeOperandSignedShort(int operand_index) {
Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
int operand_offset =
Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
- Node* load;
if (TargetSupportsUnalignedAccess()) {
- load = Load(MachineType::Int16(), BytecodeArrayTaggedPointer(),
+ return Load(MachineType::Int16(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
} else {
- load = BytecodeOperandReadUnaligned(operand_offset, MachineType::Int16());
+ return BytecodeOperandReadUnaligned(operand_offset, MachineType::Int16());
}
-
- // Ensure that we sign extend to full pointer size
- if (kPointerSize == 8) {
- load = ChangeInt32ToInt64(load);
- }
- return load;
}
Node* InterpreterAssembler::BytecodeOperandUnsignedQuad(int operand_index) {
@@ -340,19 +328,12 @@ Node* InterpreterAssembler::BytecodeOperandSignedQuad(int operand_index) {
bytecode_, operand_index, operand_scale()));
int operand_offset =
Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
- Node* load;
if (TargetSupportsUnalignedAccess()) {
- load = Load(MachineType::Int32(), BytecodeArrayTaggedPointer(),
+ return Load(MachineType::Int32(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
} else {
- load = BytecodeOperandReadUnaligned(operand_offset, MachineType::Int32());
- }
-
- // Ensure that we sign extend to full pointer size
- if (kPointerSize == 8) {
- load = ChangeInt32ToInt64(load);
+ return BytecodeOperandReadUnaligned(operand_offset, MachineType::Int32());
}
- return load;
}
Node* InterpreterAssembler::BytecodeSignedOperand(int operand_index,
@@ -422,12 +403,25 @@ Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
return BytecodeSignedOperand(operand_index, operand_size);
}
+Node* InterpreterAssembler::BytecodeOperandImmIntPtr(int operand_index) {
+ return ChangeInt32ToIntPtr(BytecodeOperandImm(operand_index));
+}
+
+Node* InterpreterAssembler::BytecodeOperandImmSmi(int operand_index) {
+ return SmiFromWord32(BytecodeOperandImm(operand_index));
+}
+
Node* InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
DCHECK(OperandType::kIdx ==
Bytecodes::GetOperandType(bytecode_, operand_index));
OperandSize operand_size =
Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
- return BytecodeUnsignedOperand(operand_index, operand_size);
+ return ChangeUint32ToWord(
+ BytecodeUnsignedOperand(operand_index, operand_size));
+}
+
+Node* InterpreterAssembler::BytecodeOperandIdxSmi(int operand_index) {
+ return SmiTag(BytecodeOperandIdx(operand_index));
}
Node* InterpreterAssembler::BytecodeOperandReg(int operand_index) {
@@ -435,7 +429,8 @@ Node* InterpreterAssembler::BytecodeOperandReg(int operand_index) {
Bytecodes::GetOperandType(bytecode_, operand_index)));
OperandSize operand_size =
Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
- return BytecodeSignedOperand(operand_index, operand_size);
+ return ChangeInt32ToIntPtr(
+ BytecodeSignedOperand(operand_index, operand_size));
}
Node* InterpreterAssembler::BytecodeOperandRuntimeId(int operand_index) {
@@ -459,30 +454,11 @@ Node* InterpreterAssembler::BytecodeOperandIntrinsicId(int operand_index) {
Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(),
BytecodeArray::kConstantPoolOffset);
- Node* entry_offset =
- IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
- WordShl(index, kPointerSizeLog2));
- return Load(MachineType::AnyTagged(), constant_pool, entry_offset);
+ return LoadFixedArrayElement(constant_pool, index);
}
Node* InterpreterAssembler::LoadAndUntagConstantPoolEntry(Node* index) {
- Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(),
- BytecodeArray::kConstantPoolOffset);
- int offset = FixedArray::kHeaderSize - kHeapObjectTag;
-#if V8_TARGET_LITTLE_ENDIAN
- if (Is64()) {
- offset += kPointerSize / 2;
- }
-#endif
- Node* entry_offset =
- IntPtrAdd(IntPtrConstant(offset), WordShl(index, kPointerSizeLog2));
- if (Is64()) {
- return ChangeInt32ToInt64(
- Load(MachineType::Int32(), constant_pool, entry_offset));
- } else {
- return SmiUntag(
- Load(MachineType::AnyTagged(), constant_pool, entry_offset));
- }
+ return SmiUntag(LoadConstantPoolEntry(index));
}
Node* InterpreterAssembler::LoadTypeFeedbackVector() {
@@ -519,7 +495,7 @@ Node* InterpreterAssembler::IncrementCallCount(Node* type_feedback_vector,
Node* call_count_slot = IntPtrAdd(slot_id, IntPtrConstant(1));
Node* call_count =
LoadFixedArrayElement(type_feedback_vector, call_count_slot);
- Node* new_count = SmiAdd(call_count, SmiTag(Int32Constant(1)));
+ Node* new_count = SmiAdd(call_count, SmiConstant(1));
// Count is Smi, so we don't need a write barrier.
return StoreFixedArrayElement(type_feedback_vector, call_count_slot,
new_count, SKIP_WRITE_BARRIER);
@@ -588,14 +564,12 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
GotoIf(is_megamorphic, &call);
Comment("check if it is an allocation site");
- Node* is_allocation_site = WordEqual(
- LoadMap(feedback_element), LoadRoot(Heap::kAllocationSiteMapRootIndex));
- GotoUnless(is_allocation_site, &check_initialized);
+ GotoUnless(IsAllocationSiteMap(LoadMap(feedback_element)),
+ &check_initialized);
// If it is not the Array() function, mark megamorphic.
- Node* context_slot =
- LoadFixedArrayElement(LoadNativeContext(context),
- Int32Constant(Context::ARRAY_FUNCTION_INDEX));
+ Node* context_slot = LoadContextElement(LoadNativeContext(context),
+ Context::ARRAY_FUNCTION_INDEX);
Node* is_array_function = WordEqual(context_slot, function);
GotoUnless(is_array_function, &mark_megamorphic);
@@ -629,13 +603,12 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
// Check if function is an object of JSFunction type.
Node* instance_type = LoadInstanceType(function);
Node* is_js_function =
- WordEqual(instance_type, Int32Constant(JS_FUNCTION_TYPE));
+ Word32Equal(instance_type, Int32Constant(JS_FUNCTION_TYPE));
GotoUnless(is_js_function, &mark_megamorphic);
// Check if it is the Array() function.
- Node* context_slot =
- LoadFixedArrayElement(LoadNativeContext(context),
- Int32Constant(Context::ARRAY_FUNCTION_INDEX));
+ Node* context_slot = LoadContextElement(LoadNativeContext(context),
+ Context::ARRAY_FUNCTION_INDEX);
Node* is_array_function = WordEqual(context_slot, function);
GotoIf(is_array_function, &create_allocation_site);
@@ -704,6 +677,7 @@ Node* InterpreterAssembler::CallJS(Node* function, Node* context,
Callable callable = CodeFactory::InterpreterPushArgsAndCall(
isolate(), tail_call_mode, CallableType::kAny);
Node* code_target = HeapConstant(callable.code());
+
return CallStub(callable.descriptor(), code_target, context, arg_count,
first_arg, function);
}
@@ -719,7 +693,7 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
// Slot id of 0 is used to indicate no type feedback is available.
STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
- Node* is_feedback_unavailable = Word32Equal(slot_id, Int32Constant(0));
+ Node* is_feedback_unavailable = WordEqual(slot_id, IntPtrConstant(0));
GotoIf(is_feedback_unavailable, &call_construct);
// Check that the constructor is not a smi.
@@ -729,7 +703,7 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
// Check that constructor is a JSFunction.
Node* instance_type = LoadInstanceType(constructor);
Node* is_js_function =
- WordEqual(instance_type, Int32Constant(JS_FUNCTION_TYPE));
+ Word32Equal(instance_type, Int32Constant(JS_FUNCTION_TYPE));
GotoUnless(is_js_function, &call_construct);
// Check if it is a monomorphic constructor.
@@ -784,9 +758,8 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
GotoUnless(is_allocation_site, &check_initialized);
// Make sure the function is the Array() function.
- Node* context_slot =
- LoadFixedArrayElement(LoadNativeContext(context),
- Int32Constant(Context::ARRAY_FUNCTION_INDEX));
+ Node* context_slot = LoadContextElement(LoadNativeContext(context),
+ Context::ARRAY_FUNCTION_INDEX);
Node* is_array_function = WordEqual(context_slot, constructor);
GotoUnless(is_array_function, &mark_megamorphic);
@@ -809,9 +782,8 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
Comment("initialize the feedback element");
// Create an allocation site if the function is an array function,
// otherwise create a weak cell.
- Node* context_slot =
- LoadFixedArrayElement(LoadNativeContext(context),
- Int32Constant(Context::ARRAY_FUNCTION_INDEX));
+ Node* context_slot = LoadContextElement(LoadNativeContext(context),
+ Context::ARRAY_FUNCTION_INDEX);
Node* is_array_function = WordEqual(context_slot, constructor);
Branch(is_array_function, &create_allocation_site, &create_weak_cell);
@@ -872,13 +844,14 @@ Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
ExternalReference::runtime_function_table_address(isolate()));
Node* function_offset =
Int32Mul(function_id, Int32Constant(sizeof(Runtime::Function)));
- Node* function = IntPtrAdd(function_table, function_offset);
+ Node* function =
+ IntPtrAdd(function_table, ChangeUint32ToWord(function_offset));
Node* function_entry =
Load(MachineType::Pointer(), function,
IntPtrConstant(offsetof(Runtime::Function, entry)));
- return CallStub(callable.descriptor(), code_target, context, arg_count,
- first_arg, function_entry, result_size);
+ return CallStubR(callable.descriptor(), result_size, code_target, context,
+ arg_count, first_arg, function_entry);
}
void InterpreterAssembler::UpdateInterruptBudget(Node* weight) {
@@ -933,7 +906,7 @@ Node* InterpreterAssembler::Advance(Node* delta) {
Node* InterpreterAssembler::Jump(Node* delta) {
DCHECK(!Bytecodes::IsStarLookahead(bytecode_, operand_scale_));
- UpdateInterruptBudget(delta);
+ UpdateInterruptBudget(TruncateWordToWord32(delta));
Node* new_bytecode_offset = Advance(delta);
Node* target_bytecode = LoadBytecode(new_bytecode_offset);
return DispatchToBytecode(target_bytecode, new_bytecode_offset);
@@ -961,10 +934,7 @@ void InterpreterAssembler::JumpIfWordNotEqual(Node* lhs, Node* rhs,
Node* InterpreterAssembler::LoadBytecode(compiler::Node* bytecode_offset) {
Node* bytecode =
Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(), bytecode_offset);
- if (kPointerSize == 8) {
- bytecode = ChangeUint32ToUint64(bytecode);
- }
- return bytecode;
+ return ChangeUint32ToWord(bytecode);
}
Node* InterpreterAssembler::StarDispatchLookahead(Node* target_bytecode) {
@@ -1007,6 +977,7 @@ void InterpreterAssembler::InlineStar() {
}
Node* InterpreterAssembler::Dispatch() {
+ Comment("========= Dispatch");
Node* target_offset = Advance();
Node* target_bytecode = LoadBytecode(target_offset);
@@ -1031,17 +1002,19 @@ Node* InterpreterAssembler::DispatchToBytecode(Node* target_bytecode,
Node* InterpreterAssembler::DispatchToBytecodeHandler(Node* handler,
Node* bytecode_offset) {
+ // TODO(ishell): Add CSA::CodeEntryPoint(code).
Node* handler_entry =
- IntPtrAdd(handler, IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
+ IntPtrAdd(BitcastTaggedToWord(handler),
+ IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
return DispatchToBytecodeHandlerEntry(handler_entry, bytecode_offset);
}
Node* InterpreterAssembler::DispatchToBytecodeHandlerEntry(
Node* handler_entry, Node* bytecode_offset) {
InterpreterDispatchDescriptor descriptor(isolate());
- Node* args[] = {GetAccumulatorUnchecked(), bytecode_offset,
- BytecodeArrayTaggedPointer(), DispatchTableRawPointer()};
- return TailCallBytecodeDispatch(descriptor, handler_entry, args);
+ return TailCallBytecodeDispatch(
+ descriptor, handler_entry, GetAccumulatorUnchecked(), bytecode_offset,
+ BytecodeArrayTaggedPointer(), DispatchTableRawPointer());
}
void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
@@ -1087,7 +1060,7 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
Variable* loop_vars[] = {&var_value, var_type_feedback};
Label loop(this, 2, loop_vars), done_loop(this, &var_result);
var_value.Bind(value);
- var_type_feedback->Bind(Int32Constant(BinaryOperationFeedback::kNone));
+ var_type_feedback->Bind(SmiConstant(BinaryOperationFeedback::kNone));
Goto(&loop);
Bind(&loop);
{
@@ -1103,8 +1076,8 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
// Convert the Smi {value}.
var_result.Bind(SmiToWord32(value));
var_type_feedback->Bind(
- Word32Or(var_type_feedback->value(),
- Int32Constant(BinaryOperationFeedback::kSignedSmall)));
+ SmiOr(var_type_feedback->value(),
+ SmiConstant(BinaryOperationFeedback::kSignedSmall)));
Goto(&done_loop);
}
@@ -1114,16 +1087,16 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
Label if_valueisheapnumber(this),
if_valueisnotheapnumber(this, Label::kDeferred);
Node* value_map = LoadMap(value);
- Branch(WordEqual(value_map, HeapNumberMapConstant()),
- &if_valueisheapnumber, &if_valueisnotheapnumber);
+ Branch(IsHeapNumberMap(value_map), &if_valueisheapnumber,
+ &if_valueisnotheapnumber);
Bind(&if_valueisheapnumber);
{
// Truncate the floating point value.
var_result.Bind(TruncateHeapNumberValueToWord32(value));
var_type_feedback->Bind(
- Word32Or(var_type_feedback->value(),
- Int32Constant(BinaryOperationFeedback::kNumber)));
+ SmiOr(var_type_feedback->value(),
+ SmiConstant(BinaryOperationFeedback::kNumber)));
Goto(&done_loop);
}
@@ -1132,9 +1105,8 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
// We do not require an Or with earlier feedback here because once we
// convert the value to a number, we cannot reach this path. We can
// only reach this path on the first pass when the feedback is kNone.
- CSA_ASSERT(this,
- Word32Equal(var_type_feedback->value(),
- Int32Constant(BinaryOperationFeedback::kNone)));
+ CSA_ASSERT(this, SmiEqual(var_type_feedback->value(),
+ SmiConstant(BinaryOperationFeedback::kNone)));
Label if_valueisoddball(this),
if_valueisnotoddball(this, Label::kDeferred);
@@ -1147,7 +1119,7 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
// Convert Oddball to a Number and perform checks again.
var_value.Bind(LoadObjectField(value, Oddball::kToNumberOffset));
var_type_feedback->Bind(
- Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+ SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
Goto(&loop);
}
@@ -1156,7 +1128,7 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
// Convert the {value} to a Number first.
Callable callable = CodeFactory::NonNumberToNumber(isolate());
var_value.Bind(CallStub(callable, context, value));
- var_type_feedback->Bind(Int32Constant(BinaryOperationFeedback::kAny));
+ var_type_feedback->Bind(SmiConstant(BinaryOperationFeedback::kAny));
Goto(&loop);
}
}
@@ -1174,7 +1146,7 @@ void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
// function.
Node* profiling_weight =
Int32Sub(Int32Constant(kHeapObjectTag + BytecodeArray::kHeaderSize),
- BytecodeOffset());
+ TruncateWordToWord32(BytecodeOffset()));
UpdateInterruptBudget(profiling_weight);
}
@@ -1187,9 +1159,9 @@ Node* InterpreterAssembler::StackCheckTriggeredInterrupt() {
}
Node* InterpreterAssembler::LoadOSRNestingLevel() {
- Node* offset =
- IntPtrConstant(BytecodeArray::kOSRNestingLevelOffset - kHeapObjectTag);
- return Load(MachineType::Int8(), BytecodeArrayTaggedPointer(), offset);
+ return LoadObjectField(BytecodeArrayTaggedPointer(),
+ BytecodeArray::kOSRNestingLevelOffset,
+ MachineType::Int8());
}
void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
@@ -1261,19 +1233,21 @@ bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
Node* InterpreterAssembler::RegisterCount() {
Node* bytecode_array = LoadRegister(Register::bytecode_array());
Node* frame_size = LoadObjectField(
- bytecode_array, BytecodeArray::kFrameSizeOffset, MachineType::Int32());
- return Word32Sar(frame_size, Int32Constant(kPointerSizeLog2));
+ bytecode_array, BytecodeArray::kFrameSizeOffset, MachineType::Uint32());
+ return WordShr(ChangeUint32ToWord(frame_size),
+ IntPtrConstant(kPointerSizeLog2));
}
Node* InterpreterAssembler::ExportRegisterFile(Node* array) {
+ Node* register_count = RegisterCount();
if (FLAG_debug_code) {
Node* array_size = LoadAndUntagFixedArrayBaseLength(array);
- AbortIfWordNotEqual(
- array_size, RegisterCount(), kInvalidRegisterFileInGenerator);
+ AbortIfWordNotEqual(array_size, register_count,
+ kInvalidRegisterFileInGenerator);
}
- Variable var_index(this, MachineRepresentation::kWord32);
- var_index.Bind(Int32Constant(0));
+ Variable var_index(this, MachineType::PointerRepresentation());
+ var_index.Bind(IntPtrConstant(0));
// Iterate over register file and write values into array.
// The mapping of register to array index must match that used in
@@ -1283,16 +1257,14 @@ Node* InterpreterAssembler::ExportRegisterFile(Node* array) {
Bind(&loop);
{
Node* index = var_index.value();
- Node* condition = Int32LessThan(index, RegisterCount());
- GotoUnless(condition, &done_loop);
+ GotoUnless(UintPtrLessThan(index, register_count), &done_loop);
- Node* reg_index =
- Int32Sub(Int32Constant(Register(0).ToOperand()), index);
- Node* value = LoadRegister(ChangeInt32ToIntPtr(reg_index));
+ Node* reg_index = IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
+ Node* value = LoadRegister(reg_index);
StoreFixedArrayElement(array, index, value);
- var_index.Bind(Int32Add(index, Int32Constant(1)));
+ var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
Goto(&loop);
}
Bind(&done_loop);
@@ -1301,14 +1273,15 @@ Node* InterpreterAssembler::ExportRegisterFile(Node* array) {
}
Node* InterpreterAssembler::ImportRegisterFile(Node* array) {
+ Node* register_count = RegisterCount();
if (FLAG_debug_code) {
Node* array_size = LoadAndUntagFixedArrayBaseLength(array);
- AbortIfWordNotEqual(
- array_size, RegisterCount(), kInvalidRegisterFileInGenerator);
+ AbortIfWordNotEqual(array_size, register_count,
+ kInvalidRegisterFileInGenerator);
}
- Variable var_index(this, MachineRepresentation::kWord32);
- var_index.Bind(Int32Constant(0));
+ Variable var_index(this, MachineType::PointerRepresentation());
+ var_index.Bind(IntPtrConstant(0));
// Iterate over array and write values into register file. Also erase the
// array contents to not keep them alive artificially.
@@ -1317,18 +1290,16 @@ Node* InterpreterAssembler::ImportRegisterFile(Node* array) {
Bind(&loop);
{
Node* index = var_index.value();
- Node* condition = Int32LessThan(index, RegisterCount());
- GotoUnless(condition, &done_loop);
+ GotoUnless(UintPtrLessThan(index, register_count), &done_loop);
Node* value = LoadFixedArrayElement(array, index);
- Node* reg_index =
- Int32Sub(Int32Constant(Register(0).ToOperand()), index);
- StoreRegister(value, ChangeInt32ToIntPtr(reg_index));
+ Node* reg_index = IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
+ StoreRegister(value, reg_index);
StoreFixedArrayElement(array, index, StaleRegisterConstant());
- var_index.Bind(Int32Add(index, Int32Constant(1)));
+ var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
Goto(&loop);
}
Bind(&done_loop);
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index aefd2bc053..5183f3efed 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -20,32 +20,41 @@ namespace interpreter {
class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
public:
- InterpreterAssembler(Isolate* isolate, Zone* zone, Bytecode bytecode,
+ InterpreterAssembler(compiler::CodeAssemblerState* state, Bytecode bytecode,
OperandScale operand_scale);
- virtual ~InterpreterAssembler();
+ ~InterpreterAssembler();
- // Returns the count immediate for bytecode operand |operand_index| in the
- // current bytecode.
+ // Returns the 32-bit unsigned count immediate for bytecode operand
+ // |operand_index| in the current bytecode.
compiler::Node* BytecodeOperandCount(int operand_index);
- // Returns the 8-bit flag for bytecode operand |operand_index| in the
- // current bytecode.
+ // Returns the 32-bit unsigned flag for bytecode operand |operand_index|
+ // in the current bytecode.
compiler::Node* BytecodeOperandFlag(int operand_index);
- // Returns the index immediate for bytecode operand |operand_index| in the
- // current bytecode.
+ // Returns the 32-bit zero-extended index immediate for bytecode operand
+ // |operand_index| in the current bytecode.
compiler::Node* BytecodeOperandIdx(int operand_index);
- // Returns the UImm8 immediate for bytecode operand |operand_index| in the
- // current bytecode.
+ // Returns the smi index immediate for bytecode operand |operand_index|
+ // in the current bytecode.
+ compiler::Node* BytecodeOperandIdxSmi(int operand_index);
+ // Returns the 32-bit unsigned immediate for bytecode operand |operand_index|
+ // in the current bytecode.
compiler::Node* BytecodeOperandUImm(int operand_index);
- // Returns the Imm8 immediate for bytecode operand |operand_index| in the
- // current bytecode.
+ // Returns the 32-bit signed immediate for bytecode operand |operand_index|
+ // in the current bytecode.
compiler::Node* BytecodeOperandImm(int operand_index);
- // Returns the register index for bytecode operand |operand_index| in the
+ // Returns the word-size signed immediate for bytecode operand |operand_index|
+ // in the current bytecode.
+ compiler::Node* BytecodeOperandImmIntPtr(int operand_index);
+ // Returns the smi immediate for bytecode operand |operand_index| in the
// current bytecode.
+ compiler::Node* BytecodeOperandImmSmi(int operand_index);
+ // Returns the word-size sign-extended register index for bytecode operand
+ // |operand_index| in the current bytecode.
compiler::Node* BytecodeOperandReg(int operand_index);
- // Returns the runtime id immediate for bytecode operand
+ // Returns the 32-bit unsigned runtime id immediate for bytecode operand
// |operand_index| in the current bytecode.
compiler::Node* BytecodeOperandRuntimeId(int operand_index);
- // Returns the intrinsic id immediate for bytecode operand
+ // Returns the 32-bit unsigned intrinsic id immediate for bytecode operand
// |operand_index| in the current bytecode.
compiler::Node* BytecodeOperandIntrinsicId(int operand_index);
@@ -209,8 +218,8 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Saves and restores interpreter bytecode offset to the interpreter stack
// frame when performing a call.
- void CallPrologue() override;
- void CallEpilogue() override;
+ void CallPrologue();
+ void CallEpilogue();
// Increment the dispatch counter for the (current, next) bytecode pair.
void TraceBytecodeDispatch(compiler::Node* target_index);
@@ -218,8 +227,8 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Traces the current bytecode by calling |function_id|.
void TraceBytecode(Runtime::FunctionId function_id);
- // Updates the bytecode array's interrupt budget by |weight| and calls
- // Runtime::kInterrupt if counter reaches zero.
+ // Updates the bytecode array's interrupt budget by a 32-bit signed |weight|
+ // and calls Runtime::kInterrupt if counter reaches zero.
void UpdateInterruptBudget(compiler::Node* weight);
// Returns the offset of register |index| relative to RegisterFilePointer().
@@ -236,6 +245,7 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
compiler::Node* BytecodeOperandReadUnaligned(int relative_offset,
MachineType result_type);
+ // Returns zero- or sign-extended to word32 value of the operand.
compiler::Node* BytecodeOperandUnsignedByte(int operand_index);
compiler::Node* BytecodeOperandSignedByte(int operand_index);
compiler::Node* BytecodeOperandUnsignedShort(int operand_index);
@@ -243,6 +253,8 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
compiler::Node* BytecodeOperandUnsignedQuad(int operand_index);
compiler::Node* BytecodeOperandSignedQuad(int operand_index);
+ // Returns zero- or sign-extended to word32 value of the operand of
+ // given size.
compiler::Node* BytecodeSignedOperand(int operand_index,
OperandSize operand_size);
compiler::Node* BytecodeUnsignedOperand(int operand_index,
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics.cc b/deps/v8/src/interpreter/interpreter-intrinsics.cc
index b46ca878cc..a2820fb128 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics.cc
+++ b/deps/v8/src/interpreter/interpreter-intrinsics.cc
@@ -105,12 +105,8 @@ Node* IntrinsicsHelper::InvokeIntrinsic(Node* function_id, Node* context,
Node* IntrinsicsHelper::CompareInstanceType(Node* object, int type,
InstanceTypeCompareMode mode) {
- InterpreterAssembler::Variable return_value(assembler_,
- MachineRepresentation::kTagged);
Node* instance_type = __ LoadInstanceType(object);
- InterpreterAssembler::Label if_true(assembler_), if_false(assembler_),
- end(assembler_);
if (mode == kInstanceTypeEqual) {
return __ Word32Equal(instance_type, __ Int32Constant(type));
} else {
@@ -122,6 +118,7 @@ Node* IntrinsicsHelper::CompareInstanceType(Node* object, int type,
Node* IntrinsicsHelper::IsInstanceType(Node* input, int type) {
InterpreterAssembler::Variable return_value(assembler_,
MachineRepresentation::kTagged);
+ // TODO(ishell): Use Select here.
InterpreterAssembler::Label if_not_smi(assembler_), return_true(assembler_),
return_false(assembler_), end(assembler_);
Node* arg = __ LoadRegister(input);
@@ -148,6 +145,8 @@ Node* IntrinsicsHelper::IsInstanceType(Node* input, int type) {
Node* IntrinsicsHelper::IsJSReceiver(Node* input, Node* arg_count,
Node* context) {
+ // TODO(ishell): Use Select here.
+ // TODO(ishell): Use CSA::IsJSReceiverInstanceType here.
InterpreterAssembler::Variable return_value(assembler_,
MachineRepresentation::kTagged);
InterpreterAssembler::Label return_true(assembler_), return_false(assembler_),
@@ -185,16 +184,13 @@ Node* IntrinsicsHelper::IsJSProxy(Node* input, Node* arg_count, Node* context) {
return IsInstanceType(input, JS_PROXY_TYPE);
}
-Node* IntrinsicsHelper::IsRegExp(Node* input, Node* arg_count, Node* context) {
- return IsInstanceType(input, JS_REGEXP_TYPE);
-}
-
Node* IntrinsicsHelper::IsTypedArray(Node* input, Node* arg_count,
Node* context) {
return IsInstanceType(input, JS_TYPED_ARRAY_TYPE);
}
Node* IntrinsicsHelper::IsSmi(Node* input, Node* arg_count, Node* context) {
+ // TODO(ishell): Use SelectBooleanConstant here.
InterpreterAssembler::Variable return_value(assembler_,
MachineRepresentation::kTagged);
InterpreterAssembler::Label if_smi(assembler_), if_not_smi(assembler_),
@@ -222,14 +218,22 @@ Node* IntrinsicsHelper::IsSmi(Node* input, Node* arg_count, Node* context) {
Node* IntrinsicsHelper::IntrinsicAsStubCall(Node* args_reg, Node* context,
Callable const& callable) {
int param_count = callable.descriptor().GetParameterCount();
- Node** args = zone()->NewArray<Node*>(param_count + 1); // 1 for context
+ int input_count = param_count + 2; // +2 for target and context
+ Node** args = zone()->NewArray<Node*>(input_count);
+ int index = 0;
+ args[index++] = __ HeapConstant(callable.code());
for (int i = 0; i < param_count; i++) {
- args[i] = __ LoadRegister(args_reg);
+ args[index++] = __ LoadRegister(args_reg);
args_reg = __ NextRegister(args_reg);
}
- args[param_count] = context;
+ args[index++] = context;
+ return __ CallStubN(callable.descriptor(), 1, input_count, args);
+}
- return __ CallStubN(callable, args);
+Node* IntrinsicsHelper::CreateIterResultObject(Node* input, Node* arg_count,
+ Node* context) {
+ return IntrinsicAsStubCall(input, context,
+ CodeFactory::CreateIterResultObject(isolate()));
}
Node* IntrinsicsHelper::HasProperty(Node* input, Node* arg_count,
@@ -238,11 +242,6 @@ Node* IntrinsicsHelper::HasProperty(Node* input, Node* arg_count,
CodeFactory::HasProperty(isolate()));
}
-Node* IntrinsicsHelper::NewObject(Node* input, Node* arg_count, Node* context) {
- return IntrinsicAsStubCall(input, context,
- CodeFactory::FastNewObject(isolate()));
-}
-
Node* IntrinsicsHelper::NumberToString(Node* input, Node* arg_count,
Node* context) {
return IntrinsicAsStubCall(input, context,
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics.h b/deps/v8/src/interpreter/interpreter-intrinsics.h
index 70ff291df3..825e2b9a98 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics.h
+++ b/deps/v8/src/interpreter/interpreter-intrinsics.h
@@ -23,25 +23,24 @@ namespace interpreter {
// List of supported intrisics, with upper case name, lower case name and
// expected number of arguments (-1 denoting argument count is variable).
-#define INTRINSICS_LIST(V) \
- V(Call, call, -1) \
- V(ClassOf, class_of, 1) \
- V(HasProperty, has_property, 2) \
- V(IsArray, is_array, 1) \
- V(IsJSProxy, is_js_proxy, 1) \
- V(IsJSReceiver, is_js_receiver, 1) \
- V(IsRegExp, is_regexp, 1) \
- V(IsSmi, is_smi, 1) \
- V(IsTypedArray, is_typed_array, 1) \
- V(NewObject, new_object, 2) \
- V(NumberToString, number_to_string, 1) \
- V(RegExpExec, reg_exp_exec, 4) \
- V(SubString, sub_string, 3) \
- V(ToString, to_string, 1) \
- V(ToLength, to_length, 1) \
- V(ToInteger, to_integer, 1) \
- V(ToNumber, to_number, 1) \
- V(ToObject, to_object, 1) \
+#define INTRINSICS_LIST(V) \
+ V(Call, call, -1) \
+ V(ClassOf, class_of, 1) \
+ V(CreateIterResultObject, create_iter_result_object, 2) \
+ V(HasProperty, has_property, 2) \
+ V(IsArray, is_array, 1) \
+ V(IsJSProxy, is_js_proxy, 1) \
+ V(IsJSReceiver, is_js_receiver, 1) \
+ V(IsSmi, is_smi, 1) \
+ V(IsTypedArray, is_typed_array, 1) \
+ V(NumberToString, number_to_string, 1) \
+ V(RegExpExec, reg_exp_exec, 4) \
+ V(SubString, sub_string, 3) \
+ V(ToString, to_string, 1) \
+ V(ToLength, to_length, 1) \
+ V(ToInteger, to_integer, 1) \
+ V(ToNumber, to_number, 1) \
+ V(ToObject, to_object, 1) \
V(ValueOf, value_of, 1)
class IntrinsicsHelper {
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index 81aecafecf..60c5e595af 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -8,6 +8,7 @@
#include <memory>
#include "src/ast/prettyprinter.h"
+#include "src/builtins/builtins-constructor.h"
#include "src/code-factory.h"
#include "src/compilation-info.h"
#include "src/compiler.h"
@@ -27,7 +28,6 @@ namespace interpreter {
using compiler::Node;
typedef CodeStubAssembler::Label Label;
typedef CodeStubAssembler::Variable Variable;
-typedef InterpreterAssembler::Arg Arg;
#define __ assembler->
@@ -41,9 +41,41 @@ class InterpreterCompilationJob final : public CompilationJob {
Status FinalizeJobImpl() final;
private:
+ class TimerScope final {
+ public:
+ TimerScope(RuntimeCallStats* stats, RuntimeCallStats::CounterId counter_id)
+ : stats_(stats) {
+ if (V8_UNLIKELY(FLAG_runtime_stats)) {
+ RuntimeCallStats::Enter(stats_, &timer_, counter_id);
+ }
+ }
+
+ explicit TimerScope(RuntimeCallCounter* counter) : stats_(nullptr) {
+ if (V8_UNLIKELY(FLAG_runtime_stats)) {
+ timer_.Start(counter, nullptr);
+ }
+ }
+
+ ~TimerScope() {
+ if (V8_UNLIKELY(FLAG_runtime_stats)) {
+ if (stats_) {
+ RuntimeCallStats::Leave(stats_, &timer_);
+ } else {
+ timer_.Stop();
+ }
+ }
+ }
+
+ private:
+ RuntimeCallStats* stats_;
+ RuntimeCallTimer timer_;
+ };
+
BytecodeGenerator* generator() { return &generator_; }
BytecodeGenerator generator_;
+ RuntimeCallStats* runtime_call_stats_;
+ RuntimeCallCounter background_execute_counter_;
DISALLOW_COPY_AND_ASSIGN(InterpreterCompilationJob);
};
@@ -73,24 +105,9 @@ void Interpreter::Initialize() {
};
for (OperandScale operand_scale : kOperandScales) {
-#define GENERATE_CODE(Name, ...) \
- { \
- if (Bytecodes::BytecodeHasHandler(Bytecode::k##Name, operand_scale)) { \
- InterpreterAssembler assembler(isolate_, &zone, Bytecode::k##Name, \
- operand_scale); \
- Do##Name(&assembler); \
- Handle<Code> code = assembler.GenerateCode(); \
- size_t index = GetDispatchTableIndex(Bytecode::k##Name, operand_scale); \
- dispatch_table_[index] = code->entry(); \
- TraceCodegen(code); \
- PROFILE( \
- isolate_, \
- CodeCreateEvent( \
- CodeEventListener::BYTECODE_HANDLER_TAG, \
- AbstractCode::cast(*code), \
- Bytecodes::ToString(Bytecode::k##Name, operand_scale).c_str())); \
- } \
- }
+#define GENERATE_CODE(Name, ...) \
+ InstallBytecodeHandler(&zone, Bytecode::k##Name, operand_scale, \
+ &Interpreter::Do##Name);
BYTECODE_LIST(GENERATE_CODE)
#undef GENERATE_CODE
}
@@ -108,6 +125,27 @@ void Interpreter::Initialize() {
DCHECK(IsDispatchTableInitialized());
}
+void Interpreter::InstallBytecodeHandler(Zone* zone, Bytecode bytecode,
+ OperandScale operand_scale,
+ BytecodeGeneratorFunc generator) {
+ if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return;
+
+ InterpreterDispatchDescriptor descriptor(isolate_);
+ compiler::CodeAssemblerState state(
+ isolate_, zone, descriptor, Code::ComputeFlags(Code::BYTECODE_HANDLER),
+ Bytecodes::ToString(bytecode), Bytecodes::ReturnCount(bytecode));
+ InterpreterAssembler assembler(&state, bytecode, operand_scale);
+ (this->*generator)(&assembler);
+ Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state);
+ size_t index = GetDispatchTableIndex(bytecode, operand_scale);
+ dispatch_table_[index] = code->entry();
+ TraceCodegen(code);
+ PROFILE(isolate_, CodeCreateEvent(
+ CodeEventListener::BYTECODE_HANDLER_TAG,
+ AbstractCode::cast(*code),
+ Bytecodes::ToString(bytecode, operand_scale).c_str()));
+}
+
Code* Interpreter::GetBytecodeHandler(Bytecode bytecode,
OperandScale operand_scale) {
DCHECK(IsDispatchTableInitialized());
@@ -154,10 +192,14 @@ int Interpreter::InterruptBudget() {
}
InterpreterCompilationJob::InterpreterCompilationJob(CompilationInfo* info)
- : CompilationJob(info->isolate(), info, "Ignition"), generator_(info) {}
+ : CompilationJob(info->isolate(), info, "Ignition"),
+ generator_(info),
+ runtime_call_stats_(info->isolate()->counters()->runtime_call_stats()),
+ background_execute_counter_("CompileBackgroundIgnition") {}
InterpreterCompilationJob::Status InterpreterCompilationJob::PrepareJobImpl() {
- if (FLAG_print_bytecode || FLAG_print_ast) {
+ CodeGenerator::MakeCodePrologue(info(), "interpreter");
+ if (FLAG_print_bytecode) {
OFStream os(stdout);
std::unique_ptr<char[]> name = info()->GetDebugName();
os << "[generating bytecode for function: " << info()->GetDebugName().get()
@@ -165,25 +207,15 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::PrepareJobImpl() {
<< std::flush;
}
-#ifdef DEBUG
- if (info()->parse_info() && FLAG_print_ast) {
- OFStream os(stdout);
- os << "--- AST ---" << std::endl
- << AstPrinter(info()->isolate()).PrintProgram(info()->literal())
- << std::endl
- << std::flush;
- }
-#endif // DEBUG
-
return SUCCEEDED;
}
InterpreterCompilationJob::Status InterpreterCompilationJob::ExecuteJobImpl() {
- // TODO(5203): These timers aren't thread safe, move to using the CompilerJob
- // timers.
- RuntimeCallTimerScope runtimeTimer(info()->isolate(),
- &RuntimeCallStats::CompileIgnition);
- TimerEventScope<TimerEventCompileIgnition> timer(info()->isolate());
+ TimerScope runtimeTimer =
+ executed_on_background_thread()
+ ? TimerScope(&background_execute_counter_)
+ : TimerScope(runtime_call_stats_, &RuntimeCallStats::CompileIgnition);
+ // TODO(lpy): add support for background compilation RCS trace.
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileIgnition");
generator()->GenerateBytecode(stack_limit());
@@ -195,13 +227,20 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::ExecuteJobImpl() {
}
InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl() {
+ // Add background runtime call stats.
+ if (V8_UNLIKELY(FLAG_runtime_stats && executed_on_background_thread())) {
+ runtime_call_stats_->CompileBackgroundIgnition.Add(
+ &background_execute_counter_);
+ }
+
+ RuntimeCallTimerScope runtimeTimer(
+ runtime_call_stats_, &RuntimeCallStats::CompileIgnitionFinalization);
+
Handle<BytecodeArray> bytecodes = generator()->FinalizeBytecode(isolate());
if (generator()->HasStackOverflow()) {
return FAILED;
}
- CodeGenerator::MakeCodePrologue(info(), "interpreter");
-
if (FLAG_print_bytecode) {
OFStream os(stdout);
bytecodes->Print(os);
@@ -326,8 +365,7 @@ void Interpreter::DoLdaZero(InterpreterAssembler* assembler) {
//
// Load an integer literal into the accumulator as a Smi.
void Interpreter::DoLdaSmi(InterpreterAssembler* assembler) {
- Node* raw_int = __ BytecodeOperandImm(0);
- Node* smi_int = __ SmiTag(raw_int);
+ Node* smi_int = __ BytecodeOperandImmSmi(0);
__ SetAccumulator(smi_int);
__ Dispatch();
}
@@ -419,21 +457,19 @@ void Interpreter::DoMov(InterpreterAssembler* assembler) {
__ Dispatch();
}
-Node* Interpreter::BuildLoadGlobal(Callable ic, Node* context,
+Node* Interpreter::BuildLoadGlobal(Callable ic, Node* context, Node* name_index,
Node* feedback_slot,
InterpreterAssembler* assembler) {
- typedef LoadGlobalWithVectorDescriptor Descriptor;
-
// Load the global via the LoadGlobalIC.
Node* code_target = __ HeapConstant(ic.code());
+ Node* name = __ LoadConstantPoolEntry(name_index);
Node* smi_slot = __ SmiTag(feedback_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
- return __ CallStub(ic.descriptor(), code_target, context,
- Arg(Descriptor::kSlot, smi_slot),
- Arg(Descriptor::kVector, type_feedback_vector));
+ return __ CallStub(ic.descriptor(), code_target, context, name, smi_slot,
+ type_feedback_vector);
}
-// LdaGlobal <slot>
+// LdaGlobal <name_index> <slot>
//
// Load the global with name in constant pool entry <name_index> into the
// accumulator using FeedBackVector slot <slot> outside of a typeof.
@@ -443,13 +479,14 @@ void Interpreter::DoLdaGlobal(InterpreterAssembler* assembler) {
Node* context = __ GetContext();
- Node* raw_slot = __ BytecodeOperandIdx(0);
- Node* result = BuildLoadGlobal(ic, context, raw_slot, assembler);
+ Node* name_index = __ BytecodeOperandIdx(0);
+ Node* raw_slot = __ BytecodeOperandIdx(1);
+ Node* result = BuildLoadGlobal(ic, context, name_index, raw_slot, assembler);
__ SetAccumulator(result);
__ Dispatch();
}
-// LdaGlobalInsideTypeof <slot>
+// LdaGlobalInsideTypeof <name_index> <slot>
//
// Load the global with name in constant pool entry <name_index> into the
// accumulator using FeedBackVector slot <slot> inside of a typeof.
@@ -459,14 +496,14 @@ void Interpreter::DoLdaGlobalInsideTypeof(InterpreterAssembler* assembler) {
Node* context = __ GetContext();
- Node* raw_slot = __ BytecodeOperandIdx(0);
- Node* result = BuildLoadGlobal(ic, context, raw_slot, assembler);
+ Node* name_index = __ BytecodeOperandIdx(0);
+ Node* raw_slot = __ BytecodeOperandIdx(1);
+ Node* result = BuildLoadGlobal(ic, context, name_index, raw_slot, assembler);
__ SetAccumulator(result);
__ Dispatch();
}
void Interpreter::DoStaGlobal(Callable ic, InterpreterAssembler* assembler) {
- typedef StoreWithVectorDescriptor Descriptor;
// Get the global object.
Node* context = __ GetContext();
Node* native_context = __ LoadNativeContext(context);
@@ -481,10 +518,8 @@ void Interpreter::DoStaGlobal(Callable ic, InterpreterAssembler* assembler) {
Node* raw_slot = __ BytecodeOperandIdx(1);
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
- __ CallStub(ic.descriptor(), code_target, context,
- Arg(Descriptor::kReceiver, global), Arg(Descriptor::kName, name),
- Arg(Descriptor::kValue, value), Arg(Descriptor::kSlot, smi_slot),
- Arg(Descriptor::kVector, type_feedback_vector));
+ __ CallStub(ic.descriptor(), code_target, context, global, name, value,
+ smi_slot, type_feedback_vector);
__ Dispatch();
}
@@ -650,7 +685,8 @@ void Interpreter::DoLdaLookupGlobalSlot(Runtime::FunctionId function_id,
isolate_, function_id == Runtime::kLoadLookupSlotInsideTypeof
? INSIDE_TYPEOF
: NOT_INSIDE_TYPEOF);
- Node* result = BuildLoadGlobal(ic, context, feedback_slot, assembler);
+ Node* result =
+ BuildLoadGlobal(ic, context, name_index, feedback_slot, assembler);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -717,7 +753,6 @@ void Interpreter::DoStaLookupSlotStrict(InterpreterAssembler* assembler) {
// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
// constant pool entry <name_index>.
void Interpreter::DoLdaNamedProperty(InterpreterAssembler* assembler) {
- typedef LoadWithVectorDescriptor Descriptor;
Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_);
Node* code_target = __ HeapConstant(ic.code());
Node* register_index = __ BytecodeOperandReg(0);
@@ -728,10 +763,8 @@ void Interpreter::DoLdaNamedProperty(InterpreterAssembler* assembler) {
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
Node* context = __ GetContext();
- Node* result = __ CallStub(
- ic.descriptor(), code_target, context, Arg(Descriptor::kReceiver, object),
- Arg(Descriptor::kName, name), Arg(Descriptor::kSlot, smi_slot),
- Arg(Descriptor::kVector, type_feedback_vector));
+ Node* result = __ CallStub(ic.descriptor(), code_target, context, object,
+ name, smi_slot, type_feedback_vector);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -741,7 +774,6 @@ void Interpreter::DoLdaNamedProperty(InterpreterAssembler* assembler) {
// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
// in the accumulator.
void Interpreter::DoLdaKeyedProperty(InterpreterAssembler* assembler) {
- typedef LoadWithVectorDescriptor Descriptor;
Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_);
Node* code_target = __ HeapConstant(ic.code());
Node* reg_index = __ BytecodeOperandReg(0);
@@ -751,16 +783,13 @@ void Interpreter::DoLdaKeyedProperty(InterpreterAssembler* assembler) {
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
Node* context = __ GetContext();
- Node* result = __ CallStub(
- ic.descriptor(), code_target, context, Arg(Descriptor::kReceiver, object),
- Arg(Descriptor::kName, name), Arg(Descriptor::kSlot, smi_slot),
- Arg(Descriptor::kVector, type_feedback_vector));
+ Node* result = __ CallStub(ic.descriptor(), code_target, context, object,
+ name, smi_slot, type_feedback_vector);
__ SetAccumulator(result);
__ Dispatch();
}
void Interpreter::DoStoreIC(Callable ic, InterpreterAssembler* assembler) {
- typedef StoreWithVectorDescriptor Descriptor;
Node* code_target = __ HeapConstant(ic.code());
Node* object_reg_index = __ BytecodeOperandReg(0);
Node* object = __ LoadRegister(object_reg_index);
@@ -771,10 +800,8 @@ void Interpreter::DoStoreIC(Callable ic, InterpreterAssembler* assembler) {
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
Node* context = __ GetContext();
- __ CallStub(ic.descriptor(), code_target, context,
- Arg(Descriptor::kReceiver, object), Arg(Descriptor::kName, name),
- Arg(Descriptor::kValue, value), Arg(Descriptor::kSlot, smi_slot),
- Arg(Descriptor::kVector, type_feedback_vector));
+ __ CallStub(ic.descriptor(), code_target, context, object, name, value,
+ smi_slot, type_feedback_vector);
__ Dispatch();
}
@@ -799,7 +826,6 @@ void Interpreter::DoStaNamedPropertyStrict(InterpreterAssembler* assembler) {
}
void Interpreter::DoKeyedStoreIC(Callable ic, InterpreterAssembler* assembler) {
- typedef StoreWithVectorDescriptor Descriptor;
Node* code_target = __ HeapConstant(ic.code());
Node* object_reg_index = __ BytecodeOperandReg(0);
Node* object = __ LoadRegister(object_reg_index);
@@ -810,10 +836,8 @@ void Interpreter::DoKeyedStoreIC(Callable ic, InterpreterAssembler* assembler) {
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
Node* context = __ GetContext();
- __ CallStub(ic.descriptor(), code_target, context,
- Arg(Descriptor::kReceiver, object), Arg(Descriptor::kName, name),
- Arg(Descriptor::kValue, value), Arg(Descriptor::kSlot, smi_slot),
- Arg(Descriptor::kVector, type_feedback_vector));
+ __ CallStub(ic.descriptor(), code_target, context, object, name, value,
+ smi_slot, type_feedback_vector);
__ Dispatch();
}
@@ -835,13 +859,36 @@ void Interpreter::DoStaKeyedPropertyStrict(InterpreterAssembler* assembler) {
DoKeyedStoreIC(ic, assembler);
}
+// StaDataPropertyInLiteral <object> <name> <flags>
+//
+// Define a property <name> with value from the accumulator in <object>.
+// Property attributes and whether set_function_name are stored in
+// DataPropertyInLiteralFlags <flags>.
+//
+// This definition is not observable and is used only for definitions
+// in object or class literals.
+void Interpreter::DoStaDataPropertyInLiteral(InterpreterAssembler* assembler) {
+ Node* object = __ LoadRegister(__ BytecodeOperandReg(0));
+ Node* name = __ LoadRegister(__ BytecodeOperandReg(1));
+ Node* value = __ GetAccumulator();
+ Node* flags = __ SmiFromWord32(__ BytecodeOperandFlag(2));
+ Node* vector_index = __ SmiTag(__ BytecodeOperandIdx(3));
+
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+ Node* context = __ GetContext();
+
+ __ CallRuntime(Runtime::kDefineDataPropertyInLiteral, context, object, name,
+ value, flags, type_feedback_vector, vector_index);
+ __ Dispatch();
+}
+
// LdaModuleVariable <cell_index> <depth>
//
// Load the contents of a module variable into the accumulator. The variable is
// identified by <cell_index>. <depth> is the depth of the current context
// relative to the module context.
void Interpreter::DoLdaModuleVariable(InterpreterAssembler* assembler) {
- Node* cell_index = __ BytecodeOperandImm(0);
+ Node* cell_index = __ BytecodeOperandImmIntPtr(0);
Node* depth = __ BytecodeOperandUImm(1);
Node* module_context = __ GetContextAtDepth(__ GetContext(), depth);
@@ -884,7 +931,7 @@ void Interpreter::DoLdaModuleVariable(InterpreterAssembler* assembler) {
// <depth> is the depth of the current context relative to the module context.
void Interpreter::DoStaModuleVariable(InterpreterAssembler* assembler) {
Node* value = __ GetAccumulator();
- Node* cell_index = __ BytecodeOperandImm(0);
+ Node* cell_index = __ BytecodeOperandImmIntPtr(0);
Node* depth = __ BytecodeOperandUImm(1);
Node* module_context = __ GetContextAtDepth(__ GetContext(), depth);
@@ -989,62 +1036,147 @@ void Interpreter::DoCompareOpWithFeedback(Token::Value compare_op,
// sometimes emit comparisons that shouldn't collect feedback (e.g.
// try-finally blocks and generators), and we could get rid of this by
// introducing Smi equality tests.
- Label skip_feedback_update(assembler);
- __ GotoIf(__ WordEqual(slot_index, __ IntPtrConstant(0)),
- &skip_feedback_update);
-
- Variable var_type_feedback(assembler, MachineRepresentation::kWord32);
- Label lhs_is_smi(assembler), lhs_is_not_smi(assembler),
- gather_rhs_type(assembler), do_compare(assembler);
- __ Branch(__ TaggedIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
+ Label gather_type_feedback(assembler), do_compare(assembler);
+ __ Branch(__ WordEqual(slot_index, __ IntPtrConstant(0)), &do_compare,
+ &gather_type_feedback);
- __ Bind(&lhs_is_smi);
- var_type_feedback.Bind(
- __ Int32Constant(CompareOperationFeedback::kSignedSmall));
- __ Goto(&gather_rhs_type);
-
- __ Bind(&lhs_is_not_smi);
+ __ Bind(&gather_type_feedback);
{
- Label lhs_is_number(assembler), lhs_is_not_number(assembler);
- Node* lhs_map = __ LoadMap(lhs);
- __ Branch(__ WordEqual(lhs_map, __ HeapNumberMapConstant()), &lhs_is_number,
- &lhs_is_not_number);
+ Variable var_type_feedback(assembler, MachineRepresentation::kTaggedSigned);
+ Label lhs_is_not_smi(assembler), lhs_is_not_number(assembler),
+ lhs_is_not_string(assembler), gather_rhs_type(assembler),
+ update_feedback(assembler);
+
+ __ GotoUnless(__ TaggedIsSmi(lhs), &lhs_is_not_smi);
- __ Bind(&lhs_is_number);
- var_type_feedback.Bind(__ Int32Constant(CompareOperationFeedback::kNumber));
+ var_type_feedback.Bind(
+ __ SmiConstant(CompareOperationFeedback::kSignedSmall));
__ Goto(&gather_rhs_type);
- __ Bind(&lhs_is_not_number);
- var_type_feedback.Bind(__ Int32Constant(CompareOperationFeedback::kAny));
- __ Goto(&do_compare);
- }
+ __ Bind(&lhs_is_not_smi);
+ {
+ Node* lhs_map = __ LoadMap(lhs);
+ __ GotoUnless(__ IsHeapNumberMap(lhs_map), &lhs_is_not_number);
+
+ var_type_feedback.Bind(__ SmiConstant(CompareOperationFeedback::kNumber));
+ __ Goto(&gather_rhs_type);
+
+ __ Bind(&lhs_is_not_number);
+ {
+ Node* lhs_instance_type = __ LoadInstanceType(lhs);
+ if (Token::IsOrderedRelationalCompareOp(compare_op)) {
+ Label lhs_is_not_oddball(assembler);
+ __ GotoUnless(
+ __ Word32Equal(lhs_instance_type, __ Int32Constant(ODDBALL_TYPE)),
+ &lhs_is_not_oddball);
+
+ var_type_feedback.Bind(
+ __ SmiConstant(CompareOperationFeedback::kNumberOrOddball));
+ __ Goto(&gather_rhs_type);
+
+ __ Bind(&lhs_is_not_oddball);
+ }
+
+ Label lhs_is_not_string(assembler);
+ __ GotoUnless(__ IsStringInstanceType(lhs_instance_type),
+ &lhs_is_not_string);
+
+ if (Token::IsOrderedRelationalCompareOp(compare_op)) {
+ var_type_feedback.Bind(
+ __ SmiConstant(CompareOperationFeedback::kString));
+ } else {
+ var_type_feedback.Bind(__ SelectSmiConstant(
+ __ Word32Equal(
+ __ Word32And(lhs_instance_type,
+ __ Int32Constant(kIsNotInternalizedMask)),
+ __ Int32Constant(kInternalizedTag)),
+ CompareOperationFeedback::kInternalizedString,
+ CompareOperationFeedback::kString));
+ }
+ __ Goto(&gather_rhs_type);
+
+ __ Bind(&lhs_is_not_string);
+ var_type_feedback.Bind(__ SmiConstant(CompareOperationFeedback::kAny));
+ __ Goto(&gather_rhs_type);
+ }
+ }
- __ Bind(&gather_rhs_type);
- {
- Label rhs_is_smi(assembler);
- __ GotoIf(__ TaggedIsSmi(rhs), &rhs_is_smi);
-
- Node* rhs_map = __ LoadMap(rhs);
- Node* rhs_type =
- __ Select(__ WordEqual(rhs_map, __ HeapNumberMapConstant()),
- __ Int32Constant(CompareOperationFeedback::kNumber),
- __ Int32Constant(CompareOperationFeedback::kAny));
- var_type_feedback.Bind(__ Word32Or(var_type_feedback.value(), rhs_type));
- __ Goto(&do_compare);
-
- __ Bind(&rhs_is_smi);
- var_type_feedback.Bind(
- __ Word32Or(var_type_feedback.value(),
- __ Int32Constant(CompareOperationFeedback::kSignedSmall)));
- __ Goto(&do_compare);
+ __ Bind(&gather_rhs_type);
+ {
+ Label rhs_is_not_smi(assembler), rhs_is_not_number(assembler);
+
+ __ GotoUnless(__ TaggedIsSmi(rhs), &rhs_is_not_smi);
+
+ var_type_feedback.Bind(
+ __ SmiOr(var_type_feedback.value(),
+ __ SmiConstant(CompareOperationFeedback::kSignedSmall)));
+ __ Goto(&update_feedback);
+
+ __ Bind(&rhs_is_not_smi);
+ {
+ Node* rhs_map = __ LoadMap(rhs);
+ __ GotoUnless(__ IsHeapNumberMap(rhs_map), &rhs_is_not_number);
+
+ var_type_feedback.Bind(
+ __ SmiOr(var_type_feedback.value(),
+ __ SmiConstant(CompareOperationFeedback::kNumber)));
+ __ Goto(&update_feedback);
+
+ __ Bind(&rhs_is_not_number);
+ {
+ Node* rhs_instance_type = __ LoadInstanceType(rhs);
+ if (Token::IsOrderedRelationalCompareOp(compare_op)) {
+ Label rhs_is_not_oddball(assembler);
+ __ GotoUnless(__ Word32Equal(rhs_instance_type,
+ __ Int32Constant(ODDBALL_TYPE)),
+ &rhs_is_not_oddball);
+
+ var_type_feedback.Bind(__ SmiOr(
+ var_type_feedback.value(),
+ __ SmiConstant(CompareOperationFeedback::kNumberOrOddball)));
+ __ Goto(&update_feedback);
+
+ __ Bind(&rhs_is_not_oddball);
+ }
+
+ Label rhs_is_not_string(assembler);
+ __ GotoUnless(__ IsStringInstanceType(rhs_instance_type),
+ &rhs_is_not_string);
+
+ if (Token::IsOrderedRelationalCompareOp(compare_op)) {
+ var_type_feedback.Bind(
+ __ SmiOr(var_type_feedback.value(),
+ __ SmiConstant(CompareOperationFeedback::kString)));
+ } else {
+ var_type_feedback.Bind(__ SmiOr(
+ var_type_feedback.value(),
+ __ SelectSmiConstant(
+ __ Word32Equal(
+ __ Word32And(rhs_instance_type,
+ __ Int32Constant(kIsNotInternalizedMask)),
+ __ Int32Constant(kInternalizedTag)),
+ CompareOperationFeedback::kInternalizedString,
+ CompareOperationFeedback::kString)));
+ }
+ __ Goto(&update_feedback);
+
+ __ Bind(&rhs_is_not_string);
+ var_type_feedback.Bind(
+ __ SmiConstant(CompareOperationFeedback::kAny));
+ __ Goto(&update_feedback);
+ }
+ }
+ }
+
+ __ Bind(&update_feedback);
+ {
+ __ UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
+ slot_index);
+ __ Goto(&do_compare);
+ }
}
__ Bind(&do_compare);
- __ UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
- slot_index);
- __ Goto(&skip_feedback_update);
-
- __ Bind(&skip_feedback_update);
Node* result;
switch (compare_op) {
case Token::EQ:
@@ -1126,8 +1258,9 @@ void Interpreter::DoBitwiseBinaryOp(Token::Value bitwise_op,
Node* slot_index = __ BytecodeOperandIdx(1);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
- Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32),
- var_rhs_type_feedback(assembler, MachineRepresentation::kWord32);
+ Variable var_lhs_type_feedback(assembler,
+ MachineRepresentation::kTaggedSigned),
+ var_rhs_type_feedback(assembler, MachineRepresentation::kTaggedSigned);
Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
context, lhs, &var_lhs_type_feedback);
Node* rhs_value = __ TruncateTaggedToWord32WithFeedback(
@@ -1166,10 +1299,9 @@ void Interpreter::DoBitwiseBinaryOp(Token::Value bitwise_op,
UNREACHABLE();
}
- Node* result_type =
- __ Select(__ TaggedIsSmi(result),
- __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
- __ Int32Constant(BinaryOperationFeedback::kNumber));
+ Node* result_type = __ SelectSmiConstant(
+ __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
+ BinaryOperationFeedback::kNumber);
if (FLAG_debug_code) {
Label ok(assembler);
@@ -1182,9 +1314,9 @@ void Interpreter::DoBitwiseBinaryOp(Token::Value bitwise_op,
}
Node* input_feedback =
- __ Word32Or(var_lhs_type_feedback.value(), var_rhs_type_feedback.value());
- __ UpdateFeedback(__ Word32Or(result_type, input_feedback),
- type_feedback_vector, slot_index);
+ __ SmiOr(var_lhs_type_feedback.value(), var_rhs_type_feedback.value());
+ __ UpdateFeedback(__ SmiOr(result_type, input_feedback), type_feedback_vector,
+ slot_index);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -1251,8 +1383,7 @@ void Interpreter::DoAddSmi(InterpreterAssembler* assembler) {
Node* reg_index = __ BytecodeOperandReg(1);
Node* left = __ LoadRegister(reg_index);
- Node* raw_int = __ BytecodeOperandImm(0);
- Node* right = __ SmiTag(raw_int);
+ Node* right = __ BytecodeOperandImmSmi(0);
Node* slot_index = __ BytecodeOperandIdx(2);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
@@ -1271,7 +1402,7 @@ void Interpreter::DoAddSmi(InterpreterAssembler* assembler) {
__ Branch(overflow, &slowpath, &if_notoverflow);
__ Bind(&if_notoverflow);
{
- __ UpdateFeedback(__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
+ __ UpdateFeedback(__ SmiConstant(BinaryOperationFeedback::kSignedSmall),
type_feedback_vector, slot_index);
var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair)));
__ Goto(&end);
@@ -1283,8 +1414,9 @@ void Interpreter::DoAddSmi(InterpreterAssembler* assembler) {
AddWithFeedbackStub stub(__ isolate());
Callable callable =
Callable(stub.GetCode(), AddWithFeedbackStub::Descriptor(__ isolate()));
- Node* args[] = {left, right, slot_index, type_feedback_vector, context};
- var_result.Bind(__ CallStubN(callable, args, 1));
+ var_result.Bind(__ CallStub(callable, context, left, right,
+ __ TruncateWordToWord32(slot_index),
+ type_feedback_vector));
__ Goto(&end);
}
__ Bind(&end);
@@ -1305,8 +1437,7 @@ void Interpreter::DoSubSmi(InterpreterAssembler* assembler) {
Node* reg_index = __ BytecodeOperandReg(1);
Node* left = __ LoadRegister(reg_index);
- Node* raw_int = __ BytecodeOperandImm(0);
- Node* right = __ SmiTag(raw_int);
+ Node* right = __ BytecodeOperandImmSmi(0);
Node* slot_index = __ BytecodeOperandIdx(2);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
@@ -1325,7 +1456,7 @@ void Interpreter::DoSubSmi(InterpreterAssembler* assembler) {
__ Branch(overflow, &slowpath, &if_notoverflow);
__ Bind(&if_notoverflow);
{
- __ UpdateFeedback(__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
+ __ UpdateFeedback(__ SmiConstant(BinaryOperationFeedback::kSignedSmall),
type_feedback_vector, slot_index);
var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair)));
__ Goto(&end);
@@ -1337,8 +1468,9 @@ void Interpreter::DoSubSmi(InterpreterAssembler* assembler) {
SubtractWithFeedbackStub stub(__ isolate());
Callable callable = Callable(
stub.GetCode(), SubtractWithFeedbackStub::Descriptor(__ isolate()));
- Node* args[] = {left, right, slot_index, type_feedback_vector, context};
- var_result.Bind(__ CallStubN(callable, args, 1));
+ var_result.Bind(__ CallStub(callable, context, left, right,
+ __ TruncateWordToWord32(slot_index),
+ type_feedback_vector));
__ Goto(&end);
}
__ Bind(&end);
@@ -1355,22 +1487,21 @@ void Interpreter::DoSubSmi(InterpreterAssembler* assembler) {
void Interpreter::DoBitwiseOrSmi(InterpreterAssembler* assembler) {
Node* reg_index = __ BytecodeOperandReg(1);
Node* left = __ LoadRegister(reg_index);
- Node* raw_int = __ BytecodeOperandImm(0);
- Node* right = __ SmiTag(raw_int);
+ Node* right = __ BytecodeOperandImmSmi(0);
Node* context = __ GetContext();
Node* slot_index = __ BytecodeOperandIdx(2);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
- Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32);
+ Variable var_lhs_type_feedback(assembler,
+ MachineRepresentation::kTaggedSigned);
Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
context, left, &var_lhs_type_feedback);
Node* rhs_value = __ SmiToWord32(right);
Node* value = __ Word32Or(lhs_value, rhs_value);
Node* result = __ ChangeInt32ToTagged(value);
- Node* result_type =
- __ Select(__ TaggedIsSmi(result),
- __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
- __ Int32Constant(BinaryOperationFeedback::kNumber));
- __ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
+ Node* result_type = __ SelectSmiConstant(
+ __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
+ BinaryOperationFeedback::kNumber);
+ __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()),
type_feedback_vector, slot_index);
__ SetAccumulator(result);
__ Dispatch();
@@ -1383,22 +1514,21 @@ void Interpreter::DoBitwiseOrSmi(InterpreterAssembler* assembler) {
void Interpreter::DoBitwiseAndSmi(InterpreterAssembler* assembler) {
Node* reg_index = __ BytecodeOperandReg(1);
Node* left = __ LoadRegister(reg_index);
- Node* raw_int = __ BytecodeOperandImm(0);
- Node* right = __ SmiTag(raw_int);
+ Node* right = __ BytecodeOperandImmSmi(0);
Node* context = __ GetContext();
Node* slot_index = __ BytecodeOperandIdx(2);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
- Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32);
+ Variable var_lhs_type_feedback(assembler,
+ MachineRepresentation::kTaggedSigned);
Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
context, left, &var_lhs_type_feedback);
Node* rhs_value = __ SmiToWord32(right);
Node* value = __ Word32And(lhs_value, rhs_value);
Node* result = __ ChangeInt32ToTagged(value);
- Node* result_type =
- __ Select(__ TaggedIsSmi(result),
- __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
- __ Int32Constant(BinaryOperationFeedback::kNumber));
- __ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
+ Node* result_type = __ SelectSmiConstant(
+ __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
+ BinaryOperationFeedback::kNumber);
+ __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()),
type_feedback_vector, slot_index);
__ SetAccumulator(result);
__ Dispatch();
@@ -1412,23 +1542,22 @@ void Interpreter::DoBitwiseAndSmi(InterpreterAssembler* assembler) {
void Interpreter::DoShiftLeftSmi(InterpreterAssembler* assembler) {
Node* reg_index = __ BytecodeOperandReg(1);
Node* left = __ LoadRegister(reg_index);
- Node* raw_int = __ BytecodeOperandImm(0);
- Node* right = __ SmiTag(raw_int);
+ Node* right = __ BytecodeOperandImmSmi(0);
Node* context = __ GetContext();
Node* slot_index = __ BytecodeOperandIdx(2);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
- Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32);
+ Variable var_lhs_type_feedback(assembler,
+ MachineRepresentation::kTaggedSigned);
Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
context, left, &var_lhs_type_feedback);
Node* rhs_value = __ SmiToWord32(right);
Node* shift_count = __ Word32And(rhs_value, __ Int32Constant(0x1f));
Node* value = __ Word32Shl(lhs_value, shift_count);
Node* result = __ ChangeInt32ToTagged(value);
- Node* result_type =
- __ Select(__ TaggedIsSmi(result),
- __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
- __ Int32Constant(BinaryOperationFeedback::kNumber));
- __ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
+ Node* result_type = __ SelectSmiConstant(
+ __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
+ BinaryOperationFeedback::kNumber);
+ __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()),
type_feedback_vector, slot_index);
__ SetAccumulator(result);
__ Dispatch();
@@ -1442,23 +1571,22 @@ void Interpreter::DoShiftLeftSmi(InterpreterAssembler* assembler) {
void Interpreter::DoShiftRightSmi(InterpreterAssembler* assembler) {
Node* reg_index = __ BytecodeOperandReg(1);
Node* left = __ LoadRegister(reg_index);
- Node* raw_int = __ BytecodeOperandImm(0);
- Node* right = __ SmiTag(raw_int);
+ Node* right = __ BytecodeOperandImmSmi(0);
Node* context = __ GetContext();
Node* slot_index = __ BytecodeOperandIdx(2);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
- Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32);
+ Variable var_lhs_type_feedback(assembler,
+ MachineRepresentation::kTaggedSigned);
Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
context, left, &var_lhs_type_feedback);
Node* rhs_value = __ SmiToWord32(right);
Node* shift_count = __ Word32And(rhs_value, __ Int32Constant(0x1f));
Node* value = __ Word32Sar(lhs_value, shift_count);
Node* result = __ ChangeInt32ToTagged(value);
- Node* result_type =
- __ Select(__ TaggedIsSmi(result),
- __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
- __ Int32Constant(BinaryOperationFeedback::kNumber));
- __ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
+ Node* result_type = __ SelectSmiConstant(
+ __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
+ BinaryOperationFeedback::kNumber);
+ __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()),
type_feedback_vector, slot_index);
__ SetAccumulator(result);
__ Dispatch();
@@ -1519,14 +1647,276 @@ void Interpreter::DoToObject(InterpreterAssembler* assembler) {
//
// Increments value in the accumulator by one.
void Interpreter::DoInc(InterpreterAssembler* assembler) {
- DoUnaryOpWithFeedback<IncStub>(assembler);
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Node* value = __ GetAccumulator();
+ Node* context = __ GetContext();
+ Node* slot_index = __ BytecodeOperandIdx(0);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+
+ // Shared entry for floating point increment.
+ Label do_finc(assembler), end(assembler);
+ Variable var_finc_value(assembler, MachineRepresentation::kFloat64);
+
+ // We might need to try again due to ToNumber conversion.
+ Variable value_var(assembler, MachineRepresentation::kTagged);
+ Variable result_var(assembler, MachineRepresentation::kTagged);
+ Variable var_type_feedback(assembler, MachineRepresentation::kTaggedSigned);
+ Variable* loop_vars[] = {&value_var, &var_type_feedback};
+ Label start(assembler, 2, loop_vars);
+ value_var.Bind(value);
+ var_type_feedback.Bind(
+ assembler->SmiConstant(BinaryOperationFeedback::kNone));
+ assembler->Goto(&start);
+ assembler->Bind(&start);
+ {
+ value = value_var.value();
+
+ Label if_issmi(assembler), if_isnotsmi(assembler);
+ assembler->Branch(assembler->TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
+
+ assembler->Bind(&if_issmi);
+ {
+ // Try fast Smi addition first.
+ Node* one = assembler->SmiConstant(Smi::FromInt(1));
+ Node* pair = assembler->IntPtrAddWithOverflow(
+ assembler->BitcastTaggedToWord(value),
+ assembler->BitcastTaggedToWord(one));
+ Node* overflow = assembler->Projection(1, pair);
+
+ // Check if the Smi addition overflowed.
+ Label if_overflow(assembler), if_notoverflow(assembler);
+ assembler->Branch(overflow, &if_overflow, &if_notoverflow);
+
+ assembler->Bind(&if_notoverflow);
+ var_type_feedback.Bind(assembler->SmiOr(
+ var_type_feedback.value(),
+ assembler->SmiConstant(BinaryOperationFeedback::kSignedSmall)));
+ result_var.Bind(
+ assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair)));
+ assembler->Goto(&end);
+
+ assembler->Bind(&if_overflow);
+ {
+ var_finc_value.Bind(assembler->SmiToFloat64(value));
+ assembler->Goto(&do_finc);
+ }
+ }
+
+ assembler->Bind(&if_isnotsmi);
+ {
+ // Check if the value is a HeapNumber.
+ Label if_valueisnumber(assembler),
+ if_valuenotnumber(assembler, Label::kDeferred);
+ Node* value_map = assembler->LoadMap(value);
+ assembler->Branch(assembler->IsHeapNumberMap(value_map),
+ &if_valueisnumber, &if_valuenotnumber);
+
+ assembler->Bind(&if_valueisnumber);
+ {
+ // Load the HeapNumber value.
+ var_finc_value.Bind(assembler->LoadHeapNumberValue(value));
+ assembler->Goto(&do_finc);
+ }
+
+ assembler->Bind(&if_valuenotnumber);
+ {
+ // We do not require an Or with earlier feedback here because once we
+ // convert the value to a number, we cannot reach this path. We can
+ // only reach this path on the first pass when the feedback is kNone.
+ CSA_ASSERT(assembler,
+ assembler->SmiEqual(
+ var_type_feedback.value(),
+ assembler->SmiConstant(BinaryOperationFeedback::kNone)));
+
+ Label if_valueisoddball(assembler), if_valuenotoddball(assembler);
+ Node* instance_type = assembler->LoadMapInstanceType(value_map);
+ Node* is_oddball = assembler->Word32Equal(
+ instance_type, assembler->Int32Constant(ODDBALL_TYPE));
+ assembler->Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball);
+
+ assembler->Bind(&if_valueisoddball);
+ {
+ // Convert Oddball to Number and check again.
+ value_var.Bind(
+ assembler->LoadObjectField(value, Oddball::kToNumberOffset));
+ var_type_feedback.Bind(assembler->SmiConstant(
+ BinaryOperationFeedback::kNumberOrOddball));
+ assembler->Goto(&start);
+ }
+
+ assembler->Bind(&if_valuenotoddball);
+ {
+ // Convert to a Number first and try again.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_type_feedback.Bind(
+ assembler->SmiConstant(BinaryOperationFeedback::kAny));
+ value_var.Bind(assembler->CallStub(callable, context, value));
+ assembler->Goto(&start);
+ }
+ }
+ }
+ }
+
+ assembler->Bind(&do_finc);
+ {
+ Node* finc_value = var_finc_value.value();
+ Node* one = assembler->Float64Constant(1.0);
+ Node* finc_result = assembler->Float64Add(finc_value, one);
+ var_type_feedback.Bind(assembler->SmiOr(
+ var_type_feedback.value(),
+ assembler->SmiConstant(BinaryOperationFeedback::kNumber)));
+ result_var.Bind(assembler->AllocateHeapNumberWithValue(finc_result));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&end);
+ assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
+ slot_index);
+
+ __ SetAccumulator(result_var.value());
+ __ Dispatch();
}
// Dec
//
// Decrements value in the accumulator by one.
void Interpreter::DoDec(InterpreterAssembler* assembler) {
- DoUnaryOpWithFeedback<DecStub>(assembler);
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Node* value = __ GetAccumulator();
+ Node* context = __ GetContext();
+ Node* slot_index = __ BytecodeOperandIdx(0);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+
+ // Shared entry for floating point decrement.
+ Label do_fdec(assembler), end(assembler);
+ Variable var_fdec_value(assembler, MachineRepresentation::kFloat64);
+
+ // We might need to try again due to ToNumber conversion.
+ Variable value_var(assembler, MachineRepresentation::kTagged);
+ Variable result_var(assembler, MachineRepresentation::kTagged);
+ Variable var_type_feedback(assembler, MachineRepresentation::kTaggedSigned);
+ Variable* loop_vars[] = {&value_var, &var_type_feedback};
+ Label start(assembler, 2, loop_vars);
+ var_type_feedback.Bind(
+ assembler->SmiConstant(BinaryOperationFeedback::kNone));
+ value_var.Bind(value);
+ assembler->Goto(&start);
+ assembler->Bind(&start);
+ {
+ value = value_var.value();
+
+ Label if_issmi(assembler), if_isnotsmi(assembler);
+ assembler->Branch(assembler->TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
+
+ assembler->Bind(&if_issmi);
+ {
+ // Try fast Smi subtraction first.
+ Node* one = assembler->SmiConstant(Smi::FromInt(1));
+ Node* pair = assembler->IntPtrSubWithOverflow(
+ assembler->BitcastTaggedToWord(value),
+ assembler->BitcastTaggedToWord(one));
+ Node* overflow = assembler->Projection(1, pair);
+
+ // Check if the Smi subtraction overflowed.
+ Label if_overflow(assembler), if_notoverflow(assembler);
+ assembler->Branch(overflow, &if_overflow, &if_notoverflow);
+
+ assembler->Bind(&if_notoverflow);
+ var_type_feedback.Bind(assembler->SmiOr(
+ var_type_feedback.value(),
+ assembler->SmiConstant(BinaryOperationFeedback::kSignedSmall)));
+ result_var.Bind(
+ assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair)));
+ assembler->Goto(&end);
+
+ assembler->Bind(&if_overflow);
+ {
+ var_fdec_value.Bind(assembler->SmiToFloat64(value));
+ assembler->Goto(&do_fdec);
+ }
+ }
+
+ assembler->Bind(&if_isnotsmi);
+ {
+ // Check if the value is a HeapNumber.
+ Label if_valueisnumber(assembler),
+ if_valuenotnumber(assembler, Label::kDeferred);
+ Node* value_map = assembler->LoadMap(value);
+ assembler->Branch(assembler->IsHeapNumberMap(value_map),
+ &if_valueisnumber, &if_valuenotnumber);
+
+ assembler->Bind(&if_valueisnumber);
+ {
+ // Load the HeapNumber value.
+ var_fdec_value.Bind(assembler->LoadHeapNumberValue(value));
+ assembler->Goto(&do_fdec);
+ }
+
+ assembler->Bind(&if_valuenotnumber);
+ {
+ // We do not require an Or with earlier feedback here because once we
+ // convert the value to a number, we cannot reach this path. We can
+ // only reach this path on the first pass when the feedback is kNone.
+ CSA_ASSERT(assembler,
+ assembler->SmiEqual(
+ var_type_feedback.value(),
+ assembler->SmiConstant(BinaryOperationFeedback::kNone)));
+
+ Label if_valueisoddball(assembler), if_valuenotoddball(assembler);
+ Node* instance_type = assembler->LoadMapInstanceType(value_map);
+ Node* is_oddball = assembler->Word32Equal(
+ instance_type, assembler->Int32Constant(ODDBALL_TYPE));
+ assembler->Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball);
+
+ assembler->Bind(&if_valueisoddball);
+ {
+ // Convert Oddball to Number and check again.
+ value_var.Bind(
+ assembler->LoadObjectField(value, Oddball::kToNumberOffset));
+ var_type_feedback.Bind(assembler->SmiConstant(
+ BinaryOperationFeedback::kNumberOrOddball));
+ assembler->Goto(&start);
+ }
+
+ assembler->Bind(&if_valuenotoddball);
+ {
+ // Convert to a Number first and try again.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_type_feedback.Bind(
+ assembler->SmiConstant(BinaryOperationFeedback::kAny));
+ value_var.Bind(assembler->CallStub(callable, context, value));
+ assembler->Goto(&start);
+ }
+ }
+ }
+ }
+
+ assembler->Bind(&do_fdec);
+ {
+ Node* fdec_value = var_fdec_value.value();
+ Node* one = assembler->Float64Constant(1.0);
+ Node* fdec_result = assembler->Float64Sub(fdec_value, one);
+ var_type_feedback.Bind(assembler->SmiOr(
+ var_type_feedback.value(),
+ assembler->SmiConstant(BinaryOperationFeedback::kNumber)));
+ result_var.Bind(assembler->AllocateHeapNumberWithValue(fdec_result));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&end);
+ assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
+ slot_index);
+
+ __ SetAccumulator(result_var.value());
+ __ Dispatch();
}
// LogicalNot
@@ -1625,6 +2015,19 @@ void Interpreter::DoDeletePropertySloppy(InterpreterAssembler* assembler) {
DoDelete(Runtime::kDeleteProperty_Sloppy, assembler);
}
+// GetSuperConstructor
+//
+// Get the super constructor from the object referenced by the accumulator.
+// The result is stored in register |reg|.
+void Interpreter::DoGetSuperConstructor(InterpreterAssembler* assembler) {
+ Node* active_function = __ GetAccumulator();
+ Node* context = __ GetContext();
+ Node* result = __ GetSuperConstructor(active_function, context);
+ Node* reg = __ BytecodeOperandReg(0);
+ __ StoreRegister(result, reg);
+ __ Dispatch();
+}
+
void Interpreter::DoJSCall(InterpreterAssembler* assembler,
TailCallMode tail_call_mode) {
Node* function_reg = __ BytecodeOperandReg(0);
@@ -1756,6 +2159,26 @@ void Interpreter::DoCallJSRuntime(InterpreterAssembler* assembler) {
__ Dispatch();
}
+// NewWithSpread <first_arg> <arg_count>
+//
+// Call the constructor in |first_arg| with the new.target in |first_arg + 1|
+// for the |arg_count - 2| following arguments. The final argument is always a
+// spread.
+//
+void Interpreter::DoNewWithSpread(InterpreterAssembler* assembler) {
+ Node* first_arg_reg = __ BytecodeOperandReg(0);
+ Node* first_arg = __ RegisterLocation(first_arg_reg);
+ Node* args_count = __ BytecodeOperandCount(1);
+ Node* context = __ GetContext();
+
+ // Call into Runtime function NewWithSpread which does everything.
+ Node* runtime_function = __ Int32Constant(Runtime::kNewWithSpread);
+ Node* result =
+ __ CallRuntimeN(runtime_function, context, first_arg, args_count);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
// New <constructor> <first_arg> <arg_count>
//
// Call operator new with |constructor| and the first argument in
@@ -1763,7 +2186,6 @@ void Interpreter::DoCallJSRuntime(InterpreterAssembler* assembler) {
// registers. The new.target is in the accumulator.
//
void Interpreter::DoNew(InterpreterAssembler* assembler) {
- Callable ic = CodeFactory::InterpreterPushArgsAndConstruct(isolate_);
Node* new_target = __ GetAccumulator();
Node* constructor_reg = __ BytecodeOperandReg(0);
Node* constructor = __ LoadRegister(constructor_reg);
@@ -1846,11 +2268,90 @@ void Interpreter::DoTestInstanceOf(InterpreterAssembler* assembler) {
DoCompareOp(Token::INSTANCEOF, assembler);
}
+// TestUndetectable <src>
+//
+// Test if the value in the <src> register equals to null/undefined. This is
+// done by checking undetectable bit on the map of the object.
+void Interpreter::DoTestUndetectable(InterpreterAssembler* assembler) {
+ Node* reg_index = __ BytecodeOperandReg(0);
+ Node* object = __ LoadRegister(reg_index);
+
+ Label not_equal(assembler), end(assembler);
+ // If the object is an Smi then return false.
+ __ GotoIf(__ TaggedIsSmi(object), &not_equal);
+
+ // If it is a HeapObject, load the map and check for undetectable bit.
+ Node* map = __ LoadMap(object);
+ Node* map_bitfield = __ LoadMapBitField(map);
+ Node* map_undetectable =
+ __ Word32And(map_bitfield, __ Int32Constant(1 << Map::kIsUndetectable));
+ __ GotoIf(__ Word32Equal(map_undetectable, __ Int32Constant(0)), &not_equal);
+
+ __ SetAccumulator(__ BooleanConstant(true));
+ __ Goto(&end);
+
+ __ Bind(&not_equal);
+ {
+ __ SetAccumulator(__ BooleanConstant(false));
+ __ Goto(&end);
+ }
+
+ __ Bind(&end);
+ __ Dispatch();
+}
+
+// TestNull <src>
+//
+// Test if the value in the <src> register is strictly equal to null.
+void Interpreter::DoTestNull(InterpreterAssembler* assembler) {
+ Node* reg_index = __ BytecodeOperandReg(0);
+ Node* object = __ LoadRegister(reg_index);
+ Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
+
+ Label equal(assembler), end(assembler);
+ __ GotoIf(__ WordEqual(object, null_value), &equal);
+ __ SetAccumulator(__ BooleanConstant(false));
+ __ Goto(&end);
+
+ __ Bind(&equal);
+ {
+ __ SetAccumulator(__ BooleanConstant(true));
+ __ Goto(&end);
+ }
+
+ __ Bind(&end);
+ __ Dispatch();
+}
+
+// TestUndefined <src>
+//
+// Test if the value in the <src> register is strictly equal to undefined.
+void Interpreter::DoTestUndefined(InterpreterAssembler* assembler) {
+ Node* reg_index = __ BytecodeOperandReg(0);
+ Node* object = __ LoadRegister(reg_index);
+ Node* undefined_value =
+ __ HeapConstant(isolate_->factory()->undefined_value());
+
+ Label equal(assembler), end(assembler);
+ __ GotoIf(__ WordEqual(object, undefined_value), &equal);
+ __ SetAccumulator(__ BooleanConstant(false));
+ __ Goto(&end);
+
+ __ Bind(&equal);
+ {
+ __ SetAccumulator(__ BooleanConstant(true));
+ __ Goto(&end);
+ }
+
+ __ Bind(&end);
+ __ Dispatch();
+}
+
// Jump <imm>
//
// Jump by number of bytes represented by the immediate operand |imm|.
void Interpreter::DoJump(InterpreterAssembler* assembler) {
- Node* relative_jump = __ BytecodeOperandImm(0);
+ Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
__ Jump(relative_jump);
}
@@ -1869,7 +2370,7 @@ void Interpreter::DoJumpConstant(InterpreterAssembler* assembler) {
// accumulator contains true.
void Interpreter::DoJumpIfTrue(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
- Node* relative_jump = __ BytecodeOperandImm(0);
+ Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
Node* true_value = __ BooleanConstant(true);
__ JumpIfWordEqual(accumulator, true_value, relative_jump);
}
@@ -1892,7 +2393,7 @@ void Interpreter::DoJumpIfTrueConstant(InterpreterAssembler* assembler) {
// accumulator contains false.
void Interpreter::DoJumpIfFalse(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
- Node* relative_jump = __ BytecodeOperandImm(0);
+ Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
Node* false_value = __ BooleanConstant(false);
__ JumpIfWordEqual(accumulator, false_value, relative_jump);
}
@@ -1915,7 +2416,7 @@ void Interpreter::DoJumpIfFalseConstant(InterpreterAssembler* assembler) {
// referenced by the accumulator is true when the object is cast to boolean.
void Interpreter::DoJumpIfToBooleanTrue(InterpreterAssembler* assembler) {
Node* value = __ GetAccumulator();
- Node* relative_jump = __ BytecodeOperandImm(0);
+ Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
Label if_true(assembler), if_false(assembler);
__ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
__ Bind(&if_true);
@@ -1948,7 +2449,7 @@ void Interpreter::DoJumpIfToBooleanTrueConstant(
// referenced by the accumulator is false when the object is cast to boolean.
void Interpreter::DoJumpIfToBooleanFalse(InterpreterAssembler* assembler) {
Node* value = __ GetAccumulator();
- Node* relative_jump = __ BytecodeOperandImm(0);
+ Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
Label if_true(assembler), if_false(assembler);
__ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
__ Bind(&if_true);
@@ -1982,7 +2483,7 @@ void Interpreter::DoJumpIfToBooleanFalseConstant(
void Interpreter::DoJumpIfNull(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
- Node* relative_jump = __ BytecodeOperandImm(0);
+ Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
__ JumpIfWordEqual(accumulator, null_value, relative_jump);
}
@@ -2006,7 +2507,7 @@ void Interpreter::DoJumpIfUndefined(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
Node* undefined_value =
__ HeapConstant(isolate_->factory()->undefined_value());
- Node* relative_jump = __ BytecodeOperandImm(0);
+ Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
__ JumpIfWordEqual(accumulator, undefined_value, relative_jump);
}
@@ -2023,6 +2524,49 @@ void Interpreter::DoJumpIfUndefinedConstant(InterpreterAssembler* assembler) {
__ JumpIfWordEqual(accumulator, undefined_value, relative_jump);
}
+// JumpIfJSReceiver <imm>
+//
+// Jump by number of bytes represented by an immediate operand if the object
+// referenced by the accumulator is a JSReceiver.
+void Interpreter::DoJumpIfJSReceiver(InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
+
+ Label if_object(assembler), if_notobject(assembler, Label::kDeferred),
+ if_notsmi(assembler);
+ __ Branch(__ TaggedIsSmi(accumulator), &if_notobject, &if_notsmi);
+
+ __ Bind(&if_notsmi);
+ __ Branch(__ IsJSReceiver(accumulator), &if_object, &if_notobject);
+ __ Bind(&if_object);
+ __ Jump(relative_jump);
+
+ __ Bind(&if_notobject);
+ __ Dispatch();
+}
+
+// JumpIfJSReceiverConstant <idx>
+//
+// Jump by number of bytes in the Smi in the |idx| entry in the constant pool if
+// the object referenced by the accumulator is a JSReceiver.
+void Interpreter::DoJumpIfJSReceiverConstant(InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
+
+ Label if_object(assembler), if_notobject(assembler), if_notsmi(assembler);
+ __ Branch(__ TaggedIsSmi(accumulator), &if_notobject, &if_notsmi);
+
+ __ Bind(&if_notsmi);
+ __ Branch(__ IsJSReceiver(accumulator), &if_object, &if_notobject);
+
+ __ Bind(&if_object);
+ __ Jump(relative_jump);
+
+ __ Bind(&if_notobject);
+ __ Dispatch();
+}
+
// JumpIfNotHole <imm>
//
// Jump by number of bytes represented by an immediate operand if the object
@@ -2030,7 +2574,7 @@ void Interpreter::DoJumpIfUndefinedConstant(InterpreterAssembler* assembler) {
void Interpreter::DoJumpIfNotHole(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
- Node* relative_jump = __ BytecodeOperandImm(0);
+ Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
__ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
}
@@ -2052,7 +2596,7 @@ void Interpreter::DoJumpIfNotHoleConstant(InterpreterAssembler* assembler) {
// performs a loop nesting check and potentially triggers OSR in case the
// current OSR level matches (or exceeds) the specified |loop_depth|.
void Interpreter::DoJumpLoop(InterpreterAssembler* assembler) {
- Node* relative_jump = __ BytecodeOperandImm(0);
+ Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
Node* loop_depth = __ BytecodeOperandImm(1);
Node* osr_level = __ LoadOSRNestingLevel();
@@ -2082,14 +2626,13 @@ void Interpreter::DoJumpLoop(InterpreterAssembler* assembler) {
void Interpreter::DoCreateRegExpLiteral(InterpreterAssembler* assembler) {
Node* index = __ BytecodeOperandIdx(0);
Node* pattern = __ LoadConstantPoolEntry(index);
- Node* literal_index_raw = __ BytecodeOperandIdx(1);
- Node* literal_index = __ SmiTag(literal_index_raw);
- Node* flags_raw = __ BytecodeOperandFlag(2);
- Node* flags = __ SmiTag(flags_raw);
+ Node* literal_index = __ BytecodeOperandIdxSmi(1);
+ Node* flags = __ SmiFromWord32(__ BytecodeOperandFlag(2));
Node* closure = __ LoadRegister(Register::function_closure());
Node* context = __ GetContext();
- Node* result = FastCloneRegExpStub::Generate(
- assembler, closure, literal_index, pattern, flags, context);
+ ConstructorBuiltinsAssembler constructor_assembler(assembler->state());
+ Node* result = constructor_assembler.EmitFastCloneRegExp(
+ closure, literal_index, pattern, flags, context);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -2099,35 +2642,32 @@ void Interpreter::DoCreateRegExpLiteral(InterpreterAssembler* assembler) {
// Creates an array literal for literal index <literal_idx> with
// CreateArrayLiteral flags <flags> and constant elements in <element_idx>.
void Interpreter::DoCreateArrayLiteral(InterpreterAssembler* assembler) {
- Node* literal_index_raw = __ BytecodeOperandIdx(1);
- Node* literal_index = __ SmiTag(literal_index_raw);
+ Node* literal_index = __ BytecodeOperandIdxSmi(1);
Node* closure = __ LoadRegister(Register::function_closure());
Node* context = __ GetContext();
Node* bytecode_flags = __ BytecodeOperandFlag(2);
Label fast_shallow_clone(assembler),
call_runtime(assembler, Label::kDeferred);
- Node* use_fast_shallow_clone = __ Word32And(
- bytecode_flags,
- __ Int32Constant(CreateArrayLiteralFlags::FastShallowCloneBit::kMask));
- __ Branch(use_fast_shallow_clone, &fast_shallow_clone, &call_runtime);
+ __ Branch(__ IsSetWord32<CreateArrayLiteralFlags::FastShallowCloneBit>(
+ bytecode_flags),
+ &fast_shallow_clone, &call_runtime);
__ Bind(&fast_shallow_clone);
{
DCHECK(FLAG_allocation_site_pretenuring);
- Node* result = FastCloneShallowArrayStub::Generate(
- assembler, closure, literal_index, context, &call_runtime,
- TRACK_ALLOCATION_SITE);
+ ConstructorBuiltinsAssembler constructor_assembler(assembler->state());
+ Node* result = constructor_assembler.EmitFastCloneShallowArray(
+ closure, literal_index, context, &call_runtime, TRACK_ALLOCATION_SITE);
__ SetAccumulator(result);
__ Dispatch();
}
__ Bind(&call_runtime);
{
- STATIC_ASSERT(CreateArrayLiteralFlags::FlagsBits::kShift == 0);
- Node* flags_raw = __ Word32And(
- bytecode_flags,
- __ Int32Constant(CreateArrayLiteralFlags::FlagsBits::kMask));
+ Node* flags_raw =
+ __ DecodeWordFromWord32<CreateArrayLiteralFlags::FlagsBits>(
+ bytecode_flags);
Node* flags = __ SmiTag(flags_raw);
Node* index = __ BytecodeOperandIdx(0);
Node* constant_elements = __ LoadConstantPoolEntry(index);
@@ -2144,24 +2684,24 @@ void Interpreter::DoCreateArrayLiteral(InterpreterAssembler* assembler) {
// Creates an object literal for literal index <literal_idx> with
// CreateObjectLiteralFlags <flags> and constant elements in <element_idx>.
void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) {
- Node* literal_index_raw = __ BytecodeOperandIdx(1);
- Node* literal_index = __ SmiTag(literal_index_raw);
+ Node* literal_index = __ BytecodeOperandIdxSmi(1);
Node* bytecode_flags = __ BytecodeOperandFlag(2);
Node* closure = __ LoadRegister(Register::function_closure());
// Check if we can do a fast clone or have to call the runtime.
Label if_fast_clone(assembler),
if_not_fast_clone(assembler, Label::kDeferred);
- Node* fast_clone_properties_count =
- __ DecodeWord32<CreateObjectLiteralFlags::FastClonePropertiesCountBits>(
- bytecode_flags);
- __ Branch(fast_clone_properties_count, &if_fast_clone, &if_not_fast_clone);
+ Node* fast_clone_properties_count = __ DecodeWordFromWord32<
+ CreateObjectLiteralFlags::FastClonePropertiesCountBits>(bytecode_flags);
+ __ Branch(__ WordNotEqual(fast_clone_properties_count, __ IntPtrConstant(0)),
+ &if_fast_clone, &if_not_fast_clone);
__ Bind(&if_fast_clone);
{
// If we can do a fast clone do the fast-path in FastCloneShallowObjectStub.
- Node* result = FastCloneShallowObjectStub::GenerateFastPath(
- assembler, &if_not_fast_clone, closure, literal_index,
+ ConstructorBuiltinsAssembler constructor_assembler(assembler->state());
+ Node* result = constructor_assembler.EmitFastCloneShallowObject(
+ &if_not_fast_clone, closure, literal_index,
fast_clone_properties_count);
__ StoreRegister(result, __ BytecodeOperandReg(3));
__ Dispatch();
@@ -2174,10 +2714,9 @@ void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) {
Node* constant_elements = __ LoadConstantPoolEntry(index);
Node* context = __ GetContext();
- STATIC_ASSERT(CreateObjectLiteralFlags::FlagsBits::kShift == 0);
- Node* flags_raw = __ Word32And(
- bytecode_flags,
- __ Int32Constant(CreateObjectLiteralFlags::FlagsBits::kMask));
+ Node* flags_raw =
+ __ DecodeWordFromWord32<CreateObjectLiteralFlags::FlagsBits>(
+ bytecode_flags);
Node* flags = __ SmiTag(flags_raw);
Node* result =
@@ -2189,31 +2728,38 @@ void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) {
}
}
-// CreateClosure <index> <tenured>
+// CreateClosure <index> <slot> <tenured>
//
// Creates a new closure for SharedFunctionInfo at position |index| in the
// constant pool and with the PretenureFlag <tenured>.
void Interpreter::DoCreateClosure(InterpreterAssembler* assembler) {
Node* index = __ BytecodeOperandIdx(0);
Node* shared = __ LoadConstantPoolEntry(index);
- Node* flags = __ BytecodeOperandFlag(1);
+ Node* flags = __ BytecodeOperandFlag(2);
Node* context = __ GetContext();
Label call_runtime(assembler, Label::kDeferred);
- Node* fast_new_closure = __ Word32And(
- flags, __ Int32Constant(CreateClosureFlags::FastNewClosureBit::kMask));
- __ GotoUnless(fast_new_closure, &call_runtime);
- __ SetAccumulator(FastNewClosureStub::Generate(assembler, shared, context));
+ __ GotoUnless(__ IsSetWord32<CreateClosureFlags::FastNewClosureBit>(flags),
+ &call_runtime);
+ ConstructorBuiltinsAssembler constructor_assembler(assembler->state());
+ Node* vector_index = __ BytecodeOperandIdx(1);
+ vector_index = __ SmiTag(vector_index);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+ __ SetAccumulator(constructor_assembler.EmitFastNewClosure(
+ shared, type_feedback_vector, vector_index, context));
__ Dispatch();
__ Bind(&call_runtime);
{
- STATIC_ASSERT(CreateClosureFlags::PretenuredBit::kShift == 0);
- Node* tenured_raw = __ Word32And(
- flags, __ Int32Constant(CreateClosureFlags::PretenuredBit::kMask));
+ Node* tenured_raw =
+ __ DecodeWordFromWord32<CreateClosureFlags::PretenuredBit>(flags);
Node* tenured = __ SmiTag(tenured_raw);
- Node* result = __ CallRuntime(Runtime::kInterpreterNewClosure, context,
- shared, tenured);
+ type_feedback_vector = __ LoadTypeFeedbackVector();
+ vector_index = __ BytecodeOperandIdx(1);
+ vector_index = __ SmiTag(vector_index);
+ Node* result =
+ __ CallRuntime(Runtime::kInterpreterNewClosure, context, shared,
+ type_feedback_vector, vector_index, tenured);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -2259,8 +2805,22 @@ void Interpreter::DoCreateFunctionContext(InterpreterAssembler* assembler) {
Node* closure = __ LoadRegister(Register::function_closure());
Node* slots = __ BytecodeOperandUImm(0);
Node* context = __ GetContext();
- __ SetAccumulator(
- FastNewFunctionContextStub::Generate(assembler, closure, slots, context));
+ ConstructorBuiltinsAssembler constructor_assembler(assembler->state());
+ __ SetAccumulator(constructor_assembler.EmitFastNewFunctionContext(
+ closure, slots, context, FUNCTION_SCOPE));
+ __ Dispatch();
+}
+
+// CreateEvalContext <slots>
+//
+// Creates a new context with number of |slots| for an eval closure.
+void Interpreter::DoCreateEvalContext(InterpreterAssembler* assembler) {
+ Node* closure = __ LoadRegister(Register::function_closure());
+ Node* slots = __ BytecodeOperandUImm(0);
+ Node* context = __ GetContext();
+ ConstructorBuiltinsAssembler constructor_assembler(assembler->state());
+ __ SetAccumulator(constructor_assembler.EmitFastNewFunctionContext(
+ closure, slots, context, EVAL_SCOPE));
__ Dispatch();
}
@@ -2371,6 +2931,22 @@ void Interpreter::DoStackCheck(InterpreterAssembler* assembler) {
}
}
+// SetPendingMessage
+//
+// Sets the pending message to the value in the accumulator, and returns the
+// previous pending message in the accumulator.
+void Interpreter::DoSetPendingMessage(InterpreterAssembler* assembler) {
+ Node* pending_message = __ ExternalConstant(
+ ExternalReference::address_of_pending_message_obj(isolate_));
+ Node* previous_message =
+ __ Load(MachineType::TaggedPointer(), pending_message);
+ Node* new_message = __ GetAccumulator();
+ __ StoreNoWriteBarrier(MachineRepresentation::kTaggedPointer, pending_message,
+ new_message);
+ __ SetAccumulator(previous_message);
+ __ Dispatch();
+}
+
// Throw
//
// Throws the exception in the accumulator.
@@ -2530,7 +3106,7 @@ void Interpreter::DoForInNext(InterpreterAssembler* assembler) {
// Check if we can use the for-in fast path potentially using the enum cache.
Label if_fast(assembler), if_slow(assembler, Label::kDeferred);
- Node* receiver_map = __ LoadObjectField(receiver, HeapObject::kMapOffset);
+ Node* receiver_map = __ LoadMap(receiver);
__ Branch(__ WordEqual(receiver_map, cache_type), &if_fast, &if_slow);
__ Bind(&if_fast);
{
@@ -2643,7 +3219,7 @@ void Interpreter::DoSuspendGenerator(InterpreterAssembler* assembler) {
__ Bind(&ok);
Node* array =
- __ LoadObjectField(generator, JSGeneratorObject::kOperandStackOffset);
+ __ LoadObjectField(generator, JSGeneratorObject::kRegisterFileOffset);
Node* context = __ GetContext();
Node* state = __ GetAccumulator();
@@ -2660,7 +3236,7 @@ void Interpreter::DoSuspendGenerator(InterpreterAssembler* assembler) {
__ Bind(&if_stepping);
{
Node* context = __ GetContext();
- __ CallRuntime(Runtime::kDebugRecordAsyncFunction, context, generator);
+ __ CallRuntime(Runtime::kDebugRecordGenerator, context, generator);
__ Goto(&ok);
}
}
@@ -2675,7 +3251,7 @@ void Interpreter::DoResumeGenerator(InterpreterAssembler* assembler) {
Node* generator = __ LoadRegister(generator_reg);
__ ImportRegisterFile(
- __ LoadObjectField(generator, JSGeneratorObject::kOperandStackOffset));
+ __ LoadObjectField(generator, JSGeneratorObject::kRegisterFileOffset));
Node* old_state =
__ LoadObjectField(generator, JSGeneratorObject::kContinuationOffset);
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index b10ae2e451..04f7e85b39 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -76,6 +76,14 @@ class Interpreter {
BYTECODE_LIST(DECLARE_BYTECODE_HANDLER_GENERATOR)
#undef DECLARE_BYTECODE_HANDLER_GENERATOR
+ typedef void (Interpreter::*BytecodeGeneratorFunc)(InterpreterAssembler*);
+
+ // Generates handler for given |bytecode| and |operand_scale| using
+ // |generator| and installs it into the dispatch table.
+ void InstallBytecodeHandler(Zone* zone, Bytecode bytecode,
+ OperandScale operand_scale,
+ BytecodeGeneratorFunc generator);
+
// Generates code to perform the binary operation via |Generator|.
template <class Generator>
void DoBinaryOpWithFeedback(InterpreterAssembler* assembler);
@@ -141,6 +149,7 @@ class Interpreter {
// Generates code to load a global.
compiler::Node* BuildLoadGlobal(Callable ic, compiler::Node* context,
+ compiler::Node* name_index,
compiler::Node* feedback_slot,
InterpreterAssembler* assembler);
diff --git a/deps/v8/src/interpreter/mkpeephole.cc b/deps/v8/src/interpreter/mkpeephole.cc
index 62d3a77e02..e6c3b76f28 100644
--- a/deps/v8/src/interpreter/mkpeephole.cc
+++ b/deps/v8/src/interpreter/mkpeephole.cc
@@ -192,6 +192,28 @@ PeepholeActionAndData PeepholeActionTableWriter::LookupActionAndData(
}
}
+ // Fuse LdaNull/LdaUndefined followed by a equality comparison with test
+ // undetectable. Testing undetectable is a simple check on the map which is
+ // more efficient than the full comparison operation.
+ if (last == Bytecode::kLdaNull || last == Bytecode::kLdaUndefined) {
+ if (current == Bytecode::kTestEqual) {
+ return {PeepholeAction::kTransformEqualityWithNullOrUndefinedAction,
+ Bytecode::kTestUndetectable};
+ }
+ }
+
+ // Fuse LdaNull/LdaUndefined followed by a strict equals with
+ // TestNull/TestUndefined.
+ if (current == Bytecode::kTestEqualStrict) {
+ if (last == Bytecode::kLdaNull) {
+ return {PeepholeAction::kTransformEqualityWithNullOrUndefinedAction,
+ Bytecode::kTestNull};
+ } else if (last == Bytecode::kLdaUndefined) {
+ return {PeepholeAction::kTransformEqualityWithNullOrUndefinedAction,
+ Bytecode::kTestUndefined};
+ }
+ }
+
// If there is no last bytecode to optimize against, store the incoming
// bytecode or for jumps emit incoming bytecode immediately.
if (last == Bytecode::kIllegal) {