summaryrefslogtreecommitdiff
path: root/deps/v8/src/builtins
diff options
context:
space:
mode:
authorMyles Borins <mylesborins@google.com>2018-04-10 21:39:51 -0400
committerMyles Borins <mylesborins@google.com>2018-04-11 13:22:42 -0400
commit12a1b9b8049462e47181a298120243dc83e81c55 (patch)
tree8605276308c8b4e3597516961266bae1af57557a /deps/v8/src/builtins
parent78cd8263354705b767ef8c6a651740efe4931ba0 (diff)
downloadandroid-node-v8-12a1b9b8049462e47181a298120243dc83e81c55.tar.gz
android-node-v8-12a1b9b8049462e47181a298120243dc83e81c55.tar.bz2
android-node-v8-12a1b9b8049462e47181a298120243dc83e81c55.zip
deps: update V8 to 6.6.346.23
PR-URL: https://github.com/nodejs/node/pull/19201 Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com> Reviewed-By: Myles Borins <myles.borins@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
Diffstat (limited to 'deps/v8/src/builtins')
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc129
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc167
-rw-r--r--deps/v8/src/builtins/builtins-api.cc16
-rw-r--r--deps/v8/src/builtins/builtins-arguments-gen.cc2
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.cc1355
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.h156
-rw-r--r--deps/v8/src/builtins/builtins-array.cc7
-rw-r--r--deps/v8/src/builtins/builtins-async-function-gen.cc96
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.cc211
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.h49
-rw-r--r--deps/v8/src/builtins/builtins-async-generator-gen.cc223
-rw-r--r--deps/v8/src/builtins/builtins-async-iterator-gen.cc39
-rw-r--r--deps/v8/src/builtins/builtins-bigint.cc61
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.cc10
-rw-r--r--deps/v8/src/builtins/builtins-collections-gen.cc405
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.cc135
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.h2
-rw-r--r--deps/v8/src/builtins/builtins-conversion-gen.cc19
-rw-r--r--deps/v8/src/builtins/builtins-dataview.cc88
-rw-r--r--deps/v8/src/builtins/builtins-date-gen.cc16
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h150
-rw-r--r--deps/v8/src/builtins/builtins-function.cc20
-rw-r--r--deps/v8/src/builtins/builtins-generator-gen.cc10
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc359
-rw-r--r--deps/v8/src/builtins/builtins-intl.h2
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.cc23
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.h6
-rw-r--r--deps/v8/src/builtins/builtins-math-gen.cc2
-rw-r--r--deps/v8/src/builtins/builtins-number-gen.cc14
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.cc332
-rw-r--r--deps/v8/src/builtins/builtins-object.cc25
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.cc1755
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.h81
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.cc63
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc371
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.h20
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc79
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.cc328
-rw-r--r--deps/v8/src/builtins/builtins-typedarray-gen.cc1278
-rw-r--r--deps/v8/src/builtins/builtins-typedarray-gen.h133
-rw-r--r--deps/v8/src/builtins/builtins-typedarray.cc70
-rw-r--r--deps/v8/src/builtins/builtins.cc1138
-rw-r--r--deps/v8/src/builtins/builtins.h22
-rw-r--r--deps/v8/src/builtins/constants-table-builder.cc83
-rw-r--r--deps/v8/src/builtins/constants-table-builder.h48
-rw-r--r--deps/v8/src/builtins/growable-fixed-array-gen.cc100
-rw-r--r--deps/v8/src/builtins/growable-fixed-array-gen.h56
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc107
-rw-r--r--deps/v8/src/builtins/mips/OWNERS1
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc121
-rw-r--r--deps/v8/src/builtins/mips64/OWNERS1
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc128
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc136
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc134
-rw-r--r--deps/v8/src/builtins/setup-builtins-internal.cc4
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc115
56 files changed, 6720 insertions, 3781 deletions
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index 2b2b9c2b34..1ea0bb733b 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -156,13 +156,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
- __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(r2);
-}
-
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
@@ -190,6 +183,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ pop(r0);
__ SmiUntag(r0, r0);
}
+ static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(r2);
}
@@ -297,7 +291,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(r4, Operand(SharedFunctionInfo::kDerivedConstructorMask));
+ __ tst(r4, Operand(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
__ b(ne, &not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
@@ -417,7 +411,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ ldr(r4, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ ldr(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(r4, Operand(SharedFunctionInfo::kClassConstructorMask));
+ __ tst(r4, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
@@ -559,9 +553,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// undefined because generator functions are non-constructable.
__ Move(r3, r1);
__ Move(r1, r4);
- __ ldr(scratch, FieldMemOperand(r1, JSFunction::kCodeOffset));
- __ add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(scratch);
+ static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
+ __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(r2);
}
__ bind(&prepare_step_in_if_stepping);
@@ -828,9 +823,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
- __ add(optimized_code_entry, optimized_code_entry,
+ static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
+ __ add(r2, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(optimized_code_entry);
+ __ Jump(r2);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
@@ -844,10 +840,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
// Advance the current bytecode offset. This simulates what all bytecode
-// handlers do upon completion of the underlying operation.
-static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
- Register bytecode_offset, Register bytecode,
- Register scratch1) {
+// handlers do upon completion of the underlying operation. Will bail out to a
+// label if the bytecode (without prefix) is a return bytecode.
+static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
+ Register bytecode_array,
+ Register bytecode_offset,
+ Register bytecode, Register scratch1,
+ Label* if_return) {
Register bytecode_size_table = scratch1;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
@@ -857,11 +856,11 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
- Label load_size, extra_wide;
+ Label process_bytecode, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
__ cmp(bytecode, Operand(0x1));
- __ b(hi, &load_size);
+ __ b(hi, &process_bytecode);
__ b(eq, &extra_wide);
// Load the next bytecode and update table to the wide scaled table.
@@ -869,7 +868,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ add(bytecode_size_table, bytecode_size_table,
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ jmp(&load_size);
+ __ jmp(&process_bytecode);
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
@@ -878,8 +877,16 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ add(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- // Load the size of the current bytecode.
- __ bind(&load_size);
+ __ bind(&process_bytecode);
+
+// Bailout to the return label if this is a return bytecode.
+#define JUMP_IF_EQUAL(NAME) \
+ __ cmp(bytecode, Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
+ __ b(if_return, eq);
+ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
+#undef JUMP_IF_EQUAL
+
+ // Otherwise, load the size of the current bytecode and advance the offset.
__ ldr(scratch1, MemOperand(bytecode_size_table, bytecode, LSL, 2));
__ add(bytecode_offset, bytecode_offset, scratch1);
}
@@ -907,7 +914,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load the feedback vector from the closure.
__ ldr(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
@@ -1008,11 +1015,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ mov(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
- __ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
+ __ ldrb(r4, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ ldr(r4, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
- kPointerSizeLog2));
- __ Call(r4);
+ __ ldr(
+ kJavaScriptCallCodeStartRegister,
+ MemOperand(kInterpreterDispatchTableRegister, r4, LSL, kPointerSizeLog2));
+ __ Call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are either due to the return bytecode
@@ -1025,16 +1033,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
- // Check if we should return.
+ // Either return, or advance to the next bytecode and dispatch.
Label do_return;
__ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ cmp(r1, Operand(static_cast<int>(interpreter::Bytecode::kReturn)));
- __ b(&do_return, eq);
-
- // Advance to the next bytecode and dispatch.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, r1, r2);
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, r1, r2,
+ &do_return);
__ jmp(&do_dispatch);
__ bind(&do_return);
@@ -1215,13 +1220,14 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
- __ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister));
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
- __ ldr(scratch, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
- kPointerSizeLog2));
- __ Jump(scratch);
+ __ ldrb(scratch, MemOperand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister));
+ __ ldr(kJavaScriptCallCodeStartRegister,
+ MemOperand(kInterpreterDispatchTableRegister, scratch, LSL,
+ kPointerSizeLog2));
+ __ Jump(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
@@ -1237,14 +1243,20 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister));
// Advance to the next bytecode.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, r1, r2);
+ Label if_return;
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, r1, r2,
+ &if_return);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(r2, kInterpreterBytecodeOffsetRegister);
__ str(r2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
+
+ // We should never take the if_return path.
+ __ bind(&if_return);
+ __ Abort(AbortReason::kInvalidBytecodeAdvance);
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1262,7 +1274,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// Get the feedback vector.
Register feedback_vector = r2;
__ ldr(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
@@ -1275,7 +1287,11 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6, r5);
// Otherwise, tail call the SFI code.
- GenerateTailCallToSharedCode(masm);
+ static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
+ __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(r2);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
@@ -1304,7 +1320,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Do we have a valid feedback vector?
__ ldr(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
@@ -1491,9 +1507,10 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
}
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
- __ ldr(r4, FieldMemOperand(r1, JSFunction::kCodeOffset));
- __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(r4);
+ static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
+ __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(r2);
}
namespace {
@@ -1978,7 +1995,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(r3, Operand(SharedFunctionInfo::kClassConstructorMask));
+ __ tst(r3, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ b(ne, &class_constructor);
// Enter the context of the function; ToObject has to run in the function
@@ -2449,9 +2466,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r0 : expected number of arguments
// r1 : function (passed through to callee)
// r3 : new target (passed through to callee)
- __ ldr(r4, FieldMemOperand(r1, JSFunction::kCodeOffset));
- __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(r4);
+ static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
+ __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(r2);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -2464,9 +2482,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
- __ ldr(r4, FieldMemOperand(r1, JSFunction::kCodeOffset));
- __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(r4);
+ static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
+ __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(r2);
__ bind(&stack_overflow);
{
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index dd92af89bb..54d2524d6e 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -150,13 +150,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(x2, FieldMemOperand(x2, SharedFunctionInfo::kCodeOffset));
- __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
- __ Br(x2);
-}
-
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
@@ -180,6 +173,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ SmiUntag(x0);
}
+ static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
__ Br(x2);
}
@@ -332,7 +326,8 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestAndBranchIfAnySet(w4, SharedFunctionInfo::kDerivedConstructorMask,
+ __ TestAndBranchIfAnySet(w4,
+ SharedFunctionInfo::IsDerivedConstructorBit::kMask,
&not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
@@ -460,11 +455,11 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
- __ TestAndBranchIfAllClear(w4, SharedFunctionInfo::kClassConstructorMask,
- &use_receiver);
+ __ TestAndBranchIfAllClear(
+ w4, SharedFunctionInfo::IsClassConstructorBit::kMask, &use_receiver);
} else {
- __ TestAndBranchIfAnySet(w4, SharedFunctionInfo::kClassConstructorMask,
- &use_receiver);
+ __ TestAndBranchIfAnySet(
+ w4, SharedFunctionInfo::IsClassConstructorBit::kMask, &use_receiver);
__ CallRuntime(
Runtime::kIncrementUseCounterConstructorReturnNonUndefinedPrimitive);
__ B(&use_receiver);
@@ -552,7 +547,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- __ CompareRoot(__ StackPointer(), Heap::kRealStackLimitRootIndex);
+ __ CompareRoot(sp, Heap::kRealStackLimitRootIndex);
__ B(lo, &stack_overflow);
// Get number of arguments for generator function.
@@ -617,9 +612,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// undefined because generator functions are non-constructable.
__ Move(x3, x1);
__ Move(x1, x4);
- __ Ldr(x5, FieldMemOperand(x1, JSFunction::kCodeOffset));
- __ Add(x5, x5, Code::kHeaderSize - kHeapObjectTag);
- __ Jump(x5);
+ static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
+ __ Ldr(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
+ __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(x2);
}
__ Bind(&prepare_step_in_if_stepping);
@@ -663,7 +659,7 @@ static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
__ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
// Make scratch the space we have left. The stack might already be overflowed
// here which will cause scratch to become negative.
- __ Sub(scratch, masm->StackPointer(), scratch);
+ __ Sub(scratch, sp, scratch);
// Check if the arguments will overflow the stack.
__ Cmp(scratch, Operand(num_args, LSL, kPointerSizeLog2));
__ B(le, stack_overflow);
@@ -745,7 +741,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Poke the result into the stack.
__ Str(x11, MemOperand(scratch, -kPointerSize, PreIndex));
// Loop if we've not reached the end of copy marker.
- __ Cmp(__ StackPointer(), scratch);
+ __ Cmp(sp, scratch);
__ B(lt, &loop);
__ Bind(&done);
@@ -920,9 +916,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
- __ Add(optimized_code_entry, optimized_code_entry,
+ static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
+ __ Add(x2, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(optimized_code_entry);
+ __ Jump(x2);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
@@ -936,10 +933,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
// Advance the current bytecode offset. This simulates what all bytecode
-// handlers do upon completion of the underlying operation.
-static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
- Register bytecode_offset, Register bytecode,
- Register scratch1) {
+// handlers do upon completion of the underlying operation. Will bail out to a
+// label if the bytecode (without prefix) is a return bytecode.
+static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
+ Register bytecode_array,
+ Register bytecode_offset,
+ Register bytecode, Register scratch1,
+ Label* if_return) {
Register bytecode_size_table = scratch1;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
@@ -949,11 +949,11 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
- Label load_size, extra_wide;
+ Label process_bytecode, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
__ Cmp(bytecode, Operand(0x1));
- __ B(hi, &load_size);
+ __ B(hi, &process_bytecode);
__ B(eq, &extra_wide);
// Load the next bytecode and update table to the wide scaled table.
@@ -961,7 +961,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ Ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ Add(bytecode_size_table, bytecode_size_table,
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ B(&load_size);
+ __ B(&process_bytecode);
__ Bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
@@ -970,8 +970,16 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ Add(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- // Load the size of the current bytecode.
- __ Bind(&load_size);
+ __ Bind(&process_bytecode);
+
+// Bailout to the return label if this is a return bytecode.
+#define JUMP_IF_EQUAL(NAME) \
+ __ Cmp(x1, Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
+ __ B(if_return, eq);
+ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
+#undef JUMP_IF_EQUAL
+
+ // Otherwise, load the size of the current bytecode and advance the offset.
__ Ldr(scratch1.W(), MemOperand(bytecode_size_table, bytecode, LSL, 2));
__ Add(bytecode_offset, bytecode_offset, scratch1);
}
@@ -998,7 +1006,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load the feedback vector from the closure.
__ Ldr(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
@@ -1009,7 +1017,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ Push(lr, fp, cp, closure);
- __ Add(fp, __ StackPointer(), StandardFrameConstants::kFixedFrameSizeFromFp);
+ __ Add(fp, sp, StandardFrameConstants::kFixedFrameSizeFromFp);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
@@ -1022,7 +1030,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Bind(&bytecode_array_loaded);
// Increment invocation count for the function.
- __ Ldr(x11, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ Ldr(x11, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ldr(x11, FieldMemOperand(x11, Cell::kValueOffset));
__ Ldr(w10, FieldMemOperand(x11, FeedbackVector::kInvocationCountOffset));
__ Add(w10, w10, Operand(1));
@@ -1060,7 +1068,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
Label ok;
- __ Sub(x10, __ StackPointer(), Operand(x11));
+ __ Sub(x10, sp, Operand(x11));
__ CompareRoot(x10, Heap::kRealStackLimitRootIndex);
__ B(hs, &ok);
__ CallRuntime(Runtime::kThrowStackOverflow);
@@ -1101,11 +1109,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Mov(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
- __ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister));
- __ Mov(x1, Operand(x1, LSL, kPointerSizeLog2));
- __ Ldr(ip0, MemOperand(kInterpreterDispatchTableRegister, x1));
- __ Call(ip0);
+ __ Ldrb(x18, MemOperand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister));
+ __ Mov(x1, Operand(x18, LSL, kPointerSizeLog2));
+ __ Ldr(kJavaScriptCallCodeStartRegister,
+ MemOperand(kInterpreterDispatchTableRegister, x1));
+ __ Call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are either due to the return bytecode
@@ -1118,16 +1127,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
- // Check if we should return.
+ // Either return, or advance to the next bytecode and dispatch.
Label do_return;
__ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ Cmp(x1, Operand(static_cast<int>(interpreter::Bytecode::kReturn)));
- __ B(&do_return, eq);
-
- // Advance to the next bytecode and dispatch.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, x1, x2);
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, x1, x2,
+ &do_return);
__ B(&do_dispatch);
__ bind(&do_return);
@@ -1336,11 +1342,12 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
- __ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister));
- __ Mov(x1, Operand(x1, LSL, kPointerSizeLog2));
- __ Ldr(ip0, MemOperand(kInterpreterDispatchTableRegister, x1));
- __ Jump(ip0);
+ __ Ldrb(x18, MemOperand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister));
+ __ Mov(x1, Operand(x18, LSL, kPointerSizeLog2));
+ __ Ldr(kJavaScriptCallCodeStartRegister,
+ MemOperand(kInterpreterDispatchTableRegister, x1));
+ __ Jump(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
@@ -1356,14 +1363,20 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister));
// Advance to the next bytecode.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, x1, x2);
+ Label if_return;
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, x1, x2,
+ &if_return);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(x2, kInterpreterBytecodeOffsetRegister);
__ Str(x2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
+
+ // We should never take the if_return path.
+ __ bind(&if_return);
+ __ Abort(AbortReason::kInvalidBytecodeAdvance);
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1381,7 +1394,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// Get the feedback vector.
Register feedback_vector = x2;
__ Ldr(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
@@ -1394,7 +1407,11 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4, x5);
// Otherwise, tail call the SFI code.
- GenerateTailCallToSharedCode(masm);
+ static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
+ __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x2, FieldMemOperand(x2, SharedFunctionInfo::kCodeOffset));
+ __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(x2);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
@@ -1423,7 +1440,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Do we have a valid feedback vector?
__ Ldr(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
@@ -1613,7 +1630,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ JumpIfSmi(x0, &failed);
// Peek the argument count from the stack, untagging at the same time.
- __ Ldr(w4, UntagSmiMemOperand(__ StackPointer(), 3 * kPointerSize));
+ __ Ldr(w4, UntagSmiMemOperand(sp, 3 * kPointerSize));
__ Drop(4);
scope.GenerateLeaveFrame();
@@ -1646,7 +1663,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
kPointerSize;
// Set up frame pointer.
- __ Add(fp, __ StackPointer(), frame_size);
+ __ Add(fp, sp, frame_size);
if (with_result) {
// Overwrite the hole inserted by the deoptimizer with the return value from
@@ -1682,7 +1699,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
MemOperand(fp, BuiltinContinuationFrameConstants::kBuiltinOffset));
// Restore fp, lr.
- __ Mov(__ StackPointer(), fp);
+ __ Mov(sp, fp);
__ Pop(fp, lr);
// Call builtin.
@@ -2090,8 +2107,7 @@ void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ Push(x11, x1); // x1: function
__ SmiTag(x11, x0); // x0: number of arguments.
__ Push(x11, padreg);
- __ Add(fp, __ StackPointer(),
- ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp);
+ __ Add(fp, sp, ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp);
}
void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
@@ -2101,7 +2117,7 @@ void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// Get the number of arguments passed (as a smi), tear down the frame and
// then drop the parameters and the receiver.
__ Ldr(x10, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ Mov(__ StackPointer(), fp);
+ __ Mov(sp, fp);
__ Pop(fp, lr);
// Drop actual parameters and receiver.
@@ -2194,7 +2210,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
// Make x10 the space we have left. The stack might already be overflowed
// here which will cause x10 to become negative.
- __ Sub(x10, masm->StackPointer(), x10);
+ __ Sub(x10, sp, x10);
// Check if the arguments will overflow the stack.
__ Cmp(x10, Operand(len, LSL, kPointerSizeLog2));
__ B(gt, &done); // Signed comparison.
@@ -2341,7 +2357,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestAndBranchIfAnySet(w3, SharedFunctionInfo::kClassConstructorMask,
+ __ TestAndBranchIfAnySet(w3, SharedFunctionInfo::IsClassConstructorBit::kMask,
&class_constructor);
// Enter the context of the function; ToObject has to run in the function
@@ -2467,7 +2483,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
// Make x10 the space we have left. The stack might already be overflowed
// here which will cause x10 to become negative.
- __ Sub(x10, masm->StackPointer(), x10);
+ __ Sub(x10, sp, x10);
// Check if the arguments will overflow the stack.
__ Cmp(x10, Operand(bound_argc, LSL, kPointerSizeLog2));
__ B(gt, &done); // Signed comparison.
@@ -2539,8 +2555,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
Register scratch = x10;
__ Tbz(bound_argc, 0, &done);
// Store receiver.
- __ Add(scratch, __ StackPointer(),
- Operand(total_argc, LSL, kPointerSizeLog2));
+ __ Add(scratch, sp, Operand(total_argc, LSL, kPointerSizeLog2));
__ Str(receiver, MemOperand(scratch, kPointerSize, PostIndex));
__ Tbnz(total_argc, 0, &done);
// Store padding.
@@ -2825,7 +2840,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Register argc_actual = x0; // Excluding the receiver.
Register argc_expected = x2; // Excluding the receiver.
Register function = x1;
- Register code_entry = x10;
Label dont_adapt_arguments, stack_overflow;
@@ -2854,7 +2868,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Bic(scratch1, scratch1, 1);
__ Claim(scratch1, kPointerSize);
- __ Mov(copy_to, __ StackPointer());
+ __ Mov(copy_to, sp);
// Preparing the expected arguments is done in four steps, the order of
// which is chosen so we can use LDP/STP and avoid conditional branches as
@@ -2918,8 +2932,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ RecordComment("-- Store receiver --");
__ Add(copy_from, fp, 2 * kPointerSize);
__ Ldr(scratch1, MemOperand(copy_from, argc_actual, LSL, kPointerSizeLog2));
- __ Str(scratch1,
- MemOperand(__ StackPointer(), argc_expected, LSL, kPointerSizeLog2));
+ __ Str(scratch1, MemOperand(sp, argc_expected, LSL, kPointerSizeLog2));
// Arguments have been adapted. Now call the entry point.
__ RecordComment("-- Call entry point --");
@@ -2927,9 +2940,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// x0 : expected number of arguments
// x1 : function (passed through to callee)
// x3 : new target (passed through to callee)
- __ Ldr(code_entry, FieldMemOperand(function, JSFunction::kCodeOffset));
- __ Add(code_entry, code_entry, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(code_entry);
+ static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
+ __ Ldr(x2, FieldMemOperand(function, JSFunction::kCodeOffset));
+ __ Add(x2, x2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(x2);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -2941,9 +2955,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Call the entry point without adapting the arguments.
__ RecordComment("-- Call without adapting args --");
__ Bind(&dont_adapt_arguments);
- __ Ldr(code_entry, FieldMemOperand(function, JSFunction::kCodeOffset));
- __ Add(code_entry, code_entry, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(code_entry);
+ static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
+ __ Ldr(x2, FieldMemOperand(function, JSFunction::kCodeOffset));
+ __ Add(x2, x2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(x2);
__ Bind(&stack_overflow);
__ RecordComment("-- Stack overflow --");
diff --git a/deps/v8/src/builtins/builtins-api.cc b/deps/v8/src/builtins/builtins-api.cc
index d50e045069..971fb7c678 100644
--- a/deps/v8/src/builtins/builtins-api.cc
+++ b/deps/v8/src/builtins/builtins-api.cc
@@ -98,18 +98,13 @@ MUST_USE_RESULT MaybeHandle<Object> HandleApiCallHelper(
if (!raw_call_data->IsUndefined(isolate)) {
DCHECK(raw_call_data->IsCallHandlerInfo());
CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
- Object* callback_obj = call_data->callback();
- v8::FunctionCallback callback =
- v8::ToCData<v8::FunctionCallback>(callback_obj);
Object* data_obj = call_data->data();
- LOG(isolate, ApiObjectAccess("call", JSObject::cast(*js_receiver)));
FunctionCallbackArguments custom(isolate, data_obj, *function, raw_holder,
*new_target, &args[0] - 1,
args.length() - 1);
-
- Handle<Object> result = custom.Call(callback);
+ Handle<Object> result = custom.Call(call_data);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (result.is_null()) {
@@ -154,7 +149,7 @@ class RelocatableArguments : public BuiltinArguments, public Relocatable {
virtual inline void IterateInstance(RootVisitor* v) {
if (length() == 0) return;
- v->VisitRootPointers(Root::kRelocatable, lowest_address(),
+ v->VisitRootPointers(Root::kRelocatable, nullptr, lowest_address(),
highest_address() + 1);
}
@@ -256,12 +251,7 @@ MUST_USE_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
Object* handler =
constructor->shared()->get_api_func_data()->instance_call_handler();
DCHECK(!handler->IsUndefined(isolate));
- // TODO(ishell): remove this debugging code.
- CHECK(handler->IsCallHandlerInfo());
CallHandlerInfo* call_data = CallHandlerInfo::cast(handler);
- Object* callback_obj = call_data->callback();
- v8::FunctionCallback callback =
- v8::ToCData<v8::FunctionCallback>(callback_obj);
// Get the data for the call and perform the callback.
Object* result;
@@ -272,7 +262,7 @@ MUST_USE_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
FunctionCallbackArguments custom(isolate, call_data->data(), constructor,
obj, new_target, &args[0] - 1,
args.length() - 1);
- Handle<Object> result_handle = custom.Call(callback);
+ Handle<Object> result_handle = custom.Call(call_data);
if (result_handle.is_null()) {
result = isolate->heap()->undefined_value();
} else {
diff --git a/deps/v8/src/builtins/builtins-arguments-gen.cc b/deps/v8/src/builtins/builtins-arguments-gen.cc
index 7db8b971d7..2bf5e1c343 100644
--- a/deps/v8/src/builtins/builtins-arguments-gen.cc
+++ b/deps/v8/src/builtins/builtins-arguments-gen.cc
@@ -44,7 +44,7 @@ ArgumentsBuiltinsAssembler::GetArgumentsFrameAndCount(Node* function,
Node* formal_parameter_count =
LoadObjectField(shared, SharedFunctionInfo::kFormalParameterCountOffset,
MachineType::Int32());
- formal_parameter_count = Word32ToParameter(formal_parameter_count, mode);
+ formal_parameter_count = Int32ToParameter(formal_parameter_count, mode);
argument_count.Bind(formal_parameter_count);
Node* marker_or_function = LoadBufferObject(
diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc
index 027baa2873..52a6222882 100644
--- a/deps/v8/src/builtins/builtins-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-array-gen.cc
@@ -2,53 +2,50 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/builtins/builtins-iterator-gen.h"
#include "src/builtins/builtins-string-gen.h"
+#include "src/builtins/builtins-typedarray-gen.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
#include "src/factory-inl.h"
#include "src/frame-constants.h"
+#include "src/builtins/builtins-array-gen.h"
+
namespace v8 {
namespace internal {
-class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
- public:
- explicit ArrayBuiltinCodeStubAssembler(compiler::CodeAssemblerState* state)
- : CodeStubAssembler(state),
- k_(this, MachineRepresentation::kTagged),
- a_(this, MachineRepresentation::kTagged),
- to_(this, MachineRepresentation::kTagged, SmiConstant(0)),
- fully_spec_compliant_(this, {&k_, &a_, &to_}) {}
-
- typedef std::function<void(ArrayBuiltinCodeStubAssembler* masm)>
- BuiltinResultGenerator;
-
- typedef std::function<Node*(ArrayBuiltinCodeStubAssembler* masm,
- Node* k_value, Node* k)>
- CallResultProcessor;
-
- typedef std::function<void(ArrayBuiltinCodeStubAssembler* masm)>
- PostLoopAction;
+using Node = compiler::Node;
- enum class MissingPropertyMode { kSkip, kUseUndefined };
+ArrayBuiltinsAssembler::ArrayBuiltinsAssembler(
+ compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state),
+ k_(this, MachineRepresentation::kTagged),
+ a_(this, MachineRepresentation::kTagged),
+ to_(this, MachineRepresentation::kTagged, SmiConstant(0)),
+ fully_spec_compliant_(this, {&k_, &a_, &to_}) {}
- void FindResultGenerator() { a_.Bind(UndefinedConstant()); }
+void ArrayBuiltinsAssembler::FindResultGenerator() {
+ a_.Bind(UndefinedConstant());
+}
- Node* FindProcessor(Node* k_value, Node* k) {
- Node* value = CallJS(CodeFactory::Call(isolate()), context(), callbackfn(),
- this_arg(), k_value, k, o());
- Label false_continue(this), return_true(this);
- BranchIfToBooleanIsTrue(value, &return_true, &false_continue);
- BIND(&return_true);
- ReturnFromBuiltin(k_value);
- BIND(&false_continue);
- return a();
+Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
+ Node* value = CallJS(CodeFactory::Call(isolate()), context(), callbackfn(),
+ this_arg(), k_value, k, o());
+ Label false_continue(this), return_true(this);
+ BranchIfToBooleanIsTrue(value, &return_true, &false_continue);
+ BIND(&return_true);
+ ReturnFromBuiltin(k_value);
+ BIND(&false_continue);
+ return a();
}
- void FindIndexResultGenerator() { a_.Bind(SmiConstant(-1)); }
+ void ArrayBuiltinsAssembler::FindIndexResultGenerator() {
+ a_.Bind(SmiConstant(-1));
+ }
- Node* FindIndexProcessor(Node* k_value, Node* k) {
+ Node* ArrayBuiltinsAssembler::FindIndexProcessor(Node* k_value, Node* k) {
Node* value = CallJS(CodeFactory::Call(isolate()), context(), callbackfn(),
this_arg(), k_value, k, o());
Label false_continue(this), return_true(this);
@@ -59,17 +56,21 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
return a();
}
- void ForEachResultGenerator() { a_.Bind(UndefinedConstant()); }
+ void ArrayBuiltinsAssembler::ForEachResultGenerator() {
+ a_.Bind(UndefinedConstant());
+ }
- Node* ForEachProcessor(Node* k_value, Node* k) {
+ Node* ArrayBuiltinsAssembler::ForEachProcessor(Node* k_value, Node* k) {
CallJS(CodeFactory::Call(isolate()), context(), callbackfn(), this_arg(),
k_value, k, o());
return a();
}
- void SomeResultGenerator() { a_.Bind(FalseConstant()); }
+ void ArrayBuiltinsAssembler::SomeResultGenerator() {
+ a_.Bind(FalseConstant());
+ }
- Node* SomeProcessor(Node* k_value, Node* k) {
+ Node* ArrayBuiltinsAssembler::SomeProcessor(Node* k_value, Node* k) {
Node* value = CallJS(CodeFactory::Call(isolate()), context(), callbackfn(),
this_arg(), k_value, k, o());
Label false_continue(this), return_true(this);
@@ -80,9 +81,11 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
return a();
}
- void EveryResultGenerator() { a_.Bind(TrueConstant()); }
+ void ArrayBuiltinsAssembler::EveryResultGenerator() {
+ a_.Bind(TrueConstant());
+ }
- Node* EveryProcessor(Node* k_value, Node* k) {
+ Node* ArrayBuiltinsAssembler::EveryProcessor(Node* k_value, Node* k) {
Node* value = CallJS(CodeFactory::Call(isolate()), context(), callbackfn(),
this_arg(), k_value, k, o());
Label true_continue(this), return_false(this);
@@ -93,9 +96,11 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
return a();
}
- void ReduceResultGenerator() { return a_.Bind(this_arg()); }
+ void ArrayBuiltinsAssembler::ReduceResultGenerator() {
+ return a_.Bind(this_arg());
+ }
- Node* ReduceProcessor(Node* k_value, Node* k) {
+ Node* ArrayBuiltinsAssembler::ReduceProcessor(Node* k_value, Node* k) {
VARIABLE(result, MachineRepresentation::kTagged);
Label done(this, {&result}), initial(this);
GotoIf(WordEqual(a(), TheHoleConstant()), &initial);
@@ -111,21 +116,21 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
return result.value();
}
- void ReducePostLoopAction() {
+ void ArrayBuiltinsAssembler::ReducePostLoopAction() {
Label ok(this);
GotoIf(WordNotEqual(a(), TheHoleConstant()), &ok);
ThrowTypeError(context(), MessageTemplate::kReduceNoInitial);
BIND(&ok);
}
- void FilterResultGenerator() {
+ void ArrayBuiltinsAssembler::FilterResultGenerator() {
// 7. Let A be ArraySpeciesCreate(O, 0).
// This version of ArraySpeciesCreate will create with the correct
// ElementsKind in the fast case.
- ArraySpeciesCreate();
+ GenerateArraySpeciesCreate();
}
- Node* FilterProcessor(Node* k_value, Node* k) {
+ Node* ArrayBuiltinsAssembler::FilterProcessor(Node* k_value, Node* k) {
// ii. Let selected be ToBoolean(? Call(callbackfn, T, kValue, k, O)).
Node* selected = CallJS(CodeFactory::Call(isolate()), context(),
callbackfn(), this_arg(), k_value, k, o());
@@ -191,11 +196,19 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
return a();
}
- void MapResultGenerator() { ArraySpeciesCreate(len_); }
+ void ArrayBuiltinsAssembler::MapResultGenerator() {
+ GenerateArraySpeciesCreate(len_);
+ }
- void TypedArrayMapResultGenerator() {
+ void ArrayBuiltinsAssembler::TypedArrayMapResultGenerator() {
// 6. Let A be ? TypedArraySpeciesCreate(O, len).
- Node* a = TypedArraySpeciesCreateByLength(context(), o(), len_);
+ TNode<JSTypedArray> original_array = CAST(o());
+ TNode<Smi> length = CAST(len_);
+ const char* method_name = "%TypedArray%.prototype.map";
+
+ TypedArrayBuiltinsAssembler typedarray_asm(state());
+ TNode<JSTypedArray> a = typedarray_asm.SpeciesCreateByLength(
+ context(), original_array, length, method_name);
// In the Spec and our current implementation, the length check is already
// performed in TypedArraySpeciesCreate.
CSA_ASSERT(this,
@@ -206,7 +219,8 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
a_.Bind(a);
}
- Node* SpecCompliantMapProcessor(Node* k_value, Node* k) {
+ Node* ArrayBuiltinsAssembler::SpecCompliantMapProcessor(Node* k_value,
+ Node* k) {
// i. Let kValue be ? Get(O, Pk). Performed by the caller of
// SpecCompliantMapProcessor.
// ii. Let mapped_value be ? Call(callbackfn, T, kValue, k, O).
@@ -218,7 +232,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
return a();
}
- Node* FastMapProcessor(Node* k_value, Node* k) {
+ Node* ArrayBuiltinsAssembler::FastMapProcessor(Node* k_value, Node* k) {
// i. Let kValue be ? Get(O, Pk). Performed by the caller of
// FastMapProcessor.
// ii. Let mapped_value be ? Call(callbackfn, T, kValue, k, O).
@@ -312,7 +326,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
}
// See tc39.github.io/ecma262/#sec-%typedarray%.prototype.map.
- Node* TypedArrayMapProcessor(Node* k_value, Node* k) {
+ Node* ArrayBuiltinsAssembler::TypedArrayMapProcessor(Node* k_value, Node* k) {
// 8. c. Let mapped_value be ? Call(callbackfn, T, « kValue, k, O »).
Node* mapped_value = CallJS(CodeFactory::Call(isolate()), context(),
callbackfn(), this_arg(), k_value, k, o());
@@ -325,11 +339,21 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
Branch(fast_typed_array_target_, &fast, &slow);
BIND(&fast);
- // #sec-integerindexedelementset 3. Let numValue be ? ToNumber(value).
- Node* num_value = ToNumber(context(), mapped_value);
+ // #sec-integerindexedelementset
+ // 5. If arrayTypeName is "BigUint64Array" or "BigInt64Array", let
+ // numValue be ? ToBigInt(v).
+ // 6. Otherwise, let numValue be ? ToNumber(value).
+ Node* num_value;
+ if (source_elements_kind_ == BIGINT64_ELEMENTS ||
+ source_elements_kind_ == BIGUINT64_ELEMENTS) {
+ num_value = ToBigInt(context(), mapped_value);
+ } else {
+ num_value = ToNumber_Inline(context(), mapped_value);
+ }
// The only way how this can bailout is because of a detached buffer.
EmitElementStore(a(), k, num_value, false, source_elements_kind_,
- KeyedAccessStoreMode::STANDARD_STORE, &detached);
+ KeyedAccessStoreMode::STANDARD_STORE, &detached,
+ context());
Goto(&done);
BIND(&slow);
@@ -339,28 +363,16 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
BIND(&detached);
// tc39.github.io/ecma262/#sec-integerindexedelementset
- // 5. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
+ // 8. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
ThrowTypeError(context_, MessageTemplate::kDetachedOperation, name_);
BIND(&done);
return a();
}
- void NullPostLoopAction() {}
+ void ArrayBuiltinsAssembler::NullPostLoopAction() {}
- protected:
- Node* context() { return context_; }
- Node* receiver() { return receiver_; }
- Node* new_target() { return new_target_; }
- Node* argc() { return argc_; }
- Node* o() { return o_; }
- Node* len() { return len_; }
- Node* callbackfn() { return callbackfn_; }
- Node* this_arg() { return this_arg_; }
- Node* k() { return k_.value(); }
- Node* a() { return a_.value(); }
-
- void ReturnFromBuiltin(Node* value) {
+ void ArrayBuiltinsAssembler::ReturnFromBuiltin(Node* value) {
if (argc_ == nullptr) {
Return(value);
} else {
@@ -370,9 +382,9 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
}
}
- void InitIteratingArrayBuiltinBody(Node* context, Node* receiver,
- Node* callbackfn, Node* this_arg,
- Node* new_target, Node* argc) {
+ void ArrayBuiltinsAssembler::InitIteratingArrayBuiltinBody(
+ TNode<Context> context, TNode<Object> receiver, Node* callbackfn,
+ Node* this_arg, Node* new_target, TNode<IntPtrT> argc) {
context_ = context;
receiver_ = receiver;
new_target_ = new_target;
@@ -381,12 +393,11 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
argc_ = argc;
}
- void GenerateIteratingArrayBuiltinBody(
+ void ArrayBuiltinsAssembler::GenerateIteratingArrayBuiltinBody(
const char* name, const BuiltinResultGenerator& generator,
const CallResultProcessor& processor, const PostLoopAction& action,
const Callable& slow_case_continuation,
- MissingPropertyMode missing_property_mode,
- ForEachDirection direction = ForEachDirection::kForward) {
+ MissingPropertyMode missing_property_mode, ForEachDirection direction) {
Label non_array(this), array_changes(this, {&k_, &a_, &to_});
// TODO(danno): Seriously? Do we really need to throw the exact error
@@ -398,7 +409,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
// 1. Let O be ToObject(this value).
// 2. ReturnIfAbrupt(O)
- o_ = CallBuiltin(Builtins::kToObject, context(), receiver());
+ o_ = ToObject(context(), receiver());
// 3. Let len be ToLength(Get(O, "length")).
// 4. ReturnIfAbrupt(len).
@@ -453,15 +464,12 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
ReturnFromBuiltin(result);
}
- void InitIteratingArrayBuiltinLoopContinuation(Node* context, Node* receiver,
- Node* callbackfn,
- Node* this_arg, Node* a,
- Node* o, Node* initial_k,
- Node* len, Node* to) {
+ void ArrayBuiltinsAssembler::InitIteratingArrayBuiltinLoopContinuation(
+ TNode<Context> context, TNode<Object> receiver, Node* callbackfn,
+ Node* this_arg, Node* a, Node* o, Node* initial_k, Node* len, Node* to) {
context_ = context;
this_arg_ = this_arg;
callbackfn_ = callbackfn;
- argc_ = nullptr;
a_.Bind(a);
k_.Bind(initial_k);
o_ = o;
@@ -469,10 +477,10 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
to_.Bind(to);
}
- void GenerateIteratingTypedArrayBuiltinBody(
+ void ArrayBuiltinsAssembler::GenerateIteratingTypedArrayBuiltinBody(
const char* name, const BuiltinResultGenerator& generator,
const CallResultProcessor& processor, const PostLoopAction& action,
- ForEachDirection direction = ForEachDirection::kForward) {
+ ForEachDirection direction) {
name_ = name;
// ValidateTypedArray: tc39.github.io/ecma262/#sec-validatetypedarray
@@ -525,6 +533,8 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
BIND(&distinguish_types);
+ generator(this);
+
if (direction == ForEachDirection::kForward) {
k_.Bind(SmiConstant(0));
} else {
@@ -539,7 +549,6 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
Label done(this);
source_elements_kind_ = ElementsKindForInstanceType(
static_cast<InstanceType>(instance_types[i]));
- generator(this);
// TODO(tebbi): Silently cancelling the loop on buffer detachment is a
// spec violation. Should go to &throw_detached and throw a TypeError
// instead.
@@ -552,10 +561,9 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
}
}
- void GenerateIteratingArrayBuiltinLoopContinuation(
+ void ArrayBuiltinsAssembler::GenerateIteratingArrayBuiltinLoopContinuation(
const CallResultProcessor& processor, const PostLoopAction& action,
- MissingPropertyMode missing_property_mode,
- ForEachDirection direction = ForEachDirection::kForward) {
+ MissingPropertyMode missing_property_mode, ForEachDirection direction) {
Label loop(this, {&k_, &a_, &to_});
Label after_loop(this);
Goto(&loop);
@@ -613,8 +621,8 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
Return(a_.value());
}
- private:
- static ElementsKind ElementsKindForInstanceType(InstanceType type) {
+ ElementsKind ArrayBuiltinsAssembler::ElementsKindForInstanceType(
+ InstanceType type) {
switch (type) {
#define INSTANCE_TYPE_TO_ELEMENTS_KIND(Type, type, TYPE, ctype, size) \
case FIXED_##TYPE##_ARRAY_TYPE: \
@@ -628,9 +636,9 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
}
}
- void VisitAllTypedArrayElements(Node* array_buffer,
- const CallResultProcessor& processor,
- Label* detached, ForEachDirection direction) {
+ void ArrayBuiltinsAssembler::VisitAllTypedArrayElements(
+ Node* array_buffer, const CallResultProcessor& processor, Label* detached,
+ ForEachDirection direction) {
VariableList list({&a_, &k_, &to_}, zone());
FastLoopBody body = [&](Node* index) {
@@ -660,11 +668,10 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
advance_mode);
}
- void VisitAllFastElementsOneKind(ElementsKind kind,
- const CallResultProcessor& processor,
- Label* array_changed, ParameterMode mode,
- ForEachDirection direction,
- MissingPropertyMode missing_property_mode) {
+ void ArrayBuiltinsAssembler::VisitAllFastElementsOneKind(
+ ElementsKind kind, const CallResultProcessor& processor,
+ Label* array_changed, ParameterMode mode, ForEachDirection direction,
+ MissingPropertyMode missing_property_mode) {
Comment("begin VisitAllFastElementsOneKind");
VARIABLE(original_map, MachineRepresentation::kTagged);
original_map.Bind(LoadMap(o()));
@@ -735,10 +742,10 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
Comment("end VisitAllFastElementsOneKind");
}
- void HandleFastElements(const CallResultProcessor& processor,
- const PostLoopAction& action, Label* slow,
- ForEachDirection direction,
- MissingPropertyMode missing_property_mode) {
+ void ArrayBuiltinsAssembler::HandleFastElements(
+ const CallResultProcessor& processor, const PostLoopAction& action,
+ Label* slow, ForEachDirection direction,
+ MissingPropertyMode missing_property_mode) {
Label switch_on_elements_kind(this), fast_elements(this),
maybe_double_elements(this), fast_double_elements(this);
@@ -788,7 +795,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
// Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
// This version is specialized to create a zero length array
// of the elements kind of the input array.
- void ArraySpeciesCreate() {
+ void ArrayBuiltinsAssembler::GenerateArraySpeciesCreate() {
Label runtime(this, Label::kDeferred), done(this);
TNode<Smi> len = SmiConstant(0);
@@ -810,9 +817,9 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
// Respect the ElementsKind of the input array.
TNode<Int32T> elements_kind = LoadMapElementsKind(original_map);
GotoIfNot(IsFastElementsKind(elements_kind), &runtime);
- TNode<Context> native_context = CAST(LoadNativeContext(context()));
+ TNode<Context> native_context = LoadNativeContext(context());
TNode<Map> array_map =
- CAST(LoadJSArrayElementsMap(elements_kind, native_context));
+ LoadJSArrayElementsMap(elements_kind, native_context);
TNode<JSArray> array =
CAST(AllocateJSArray(GetInitialFastElementsKind(), array_map, len, len,
nullptr, CodeStubAssembler::SMI_PARAMETERS));
@@ -834,7 +841,8 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
}
// Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
- void ArraySpeciesCreate(SloppyTNode<Smi> len) {
+ void ArrayBuiltinsAssembler::GenerateArraySpeciesCreate(
+ SloppyTNode<Smi> len) {
Label runtime(this, Label::kDeferred), done(this);
Node* const original_map = LoadMap(o());
@@ -860,9 +868,9 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
// element in the input array (maybe the callback deletes an element).
const ElementsKind elements_kind =
GetHoleyElementsKind(GetInitialFastElementsKind());
- TNode<Context> native_context = CAST(LoadNativeContext(context()));
+ TNode<Context> native_context = LoadNativeContext(context());
TNode<Map> array_map =
- CAST(LoadJSArrayElementsMap(elements_kind, native_context));
+ LoadJSArrayElementsMap(elements_kind, native_context);
a_.Bind(AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, len, len, nullptr,
CodeStubAssembler::SMI_PARAMETERS));
@@ -881,30 +889,14 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
BIND(&done);
}
- Node* callbackfn_ = nullptr;
- Node* o_ = nullptr;
- Node* this_arg_ = nullptr;
- Node* len_ = nullptr;
- Node* context_ = nullptr;
- Node* receiver_ = nullptr;
- Node* new_target_ = nullptr;
- Node* argc_ = nullptr;
- Node* fast_typed_array_target_ = nullptr;
- const char* name_ = nullptr;
- Variable k_;
- Variable a_;
- Variable to_;
- Label fully_spec_compliant_;
- ElementsKind source_elements_kind_ = ElementsKind::NO_ELEMENTS;
-};
-
TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
- Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
CSA_ASSERT(this, IsUndefined(Parameter(BuiltinDescriptor::kNewTarget)));
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Label runtime(this, Label::kDeferred);
Label fast(this);
@@ -920,16 +912,18 @@ TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
BIND(&fast);
{
- CSA_ASSERT(this, TaggedIsPositiveSmi(LoadJSArrayLength(receiver)));
- Node* length = LoadAndUntagObjectField(receiver, JSArray::kLengthOffset);
+ TNode<JSArray> array_receiver = CAST(receiver);
+ CSA_ASSERT(this, TaggedIsPositiveSmi(LoadJSArrayLength(array_receiver)));
+ Node* length =
+ LoadAndUntagObjectField(array_receiver, JSArray::kLengthOffset);
Label return_undefined(this), fast_elements(this);
GotoIf(IntPtrEqual(length, IntPtrConstant(0)), &return_undefined);
// 2) Ensure that the length is writable.
- EnsureArrayLengthWritable(LoadMap(receiver), &runtime);
+ EnsureArrayLengthWritable(LoadMap(array_receiver), &runtime);
// 3) Check that the elements backing store isn't copy-on-write.
- Node* elements = LoadElements(receiver);
+ Node* elements = LoadElements(array_receiver);
GotoIf(WordEqual(LoadMap(elements),
LoadRoot(Heap::kFixedCOWArrayMapRootIndex)),
&runtime);
@@ -945,10 +939,10 @@ TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
capacity),
&runtime);
- StoreObjectFieldNoWriteBarrier(receiver, JSArray::kLengthOffset,
+ StoreObjectFieldNoWriteBarrier(array_receiver, JSArray::kLengthOffset,
SmiTag(new_length));
- Node* elements_kind = LoadMapElementsKind(LoadMap(receiver));
+ Node* elements_kind = LoadMapElementsKind(LoadMap(array_receiver));
GotoIf(Int32LessThanOrEqual(elements_kind,
Int32Constant(TERMINAL_FAST_ELEMENTS_KIND)),
&fast_elements);
@@ -1008,12 +1002,14 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
// TODO(ishell): use constants from Descriptor once the JSFunction linkage
// arguments are reordered.
- Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
CSA_ASSERT(this, IsUndefined(Parameter(BuiltinDescriptor::kNewTarget)));
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
+ TNode<JSArray> array_receiver;
Node* kind = nullptr;
Label fast(this);
@@ -1021,13 +1017,14 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
BIND(&fast);
{
+ array_receiver = CAST(receiver);
arg_index = IntPtrConstant(0);
- kind = EnsureArrayPushable(receiver, &runtime);
+ kind = EnsureArrayPushable(array_receiver, &runtime);
GotoIf(IsElementsKindGreaterThan(kind, HOLEY_SMI_ELEMENTS),
&object_push_pre);
- Node* new_length = BuildAppendJSArray(PACKED_SMI_ELEMENTS, receiver, &args,
- &arg_index, &smi_transition);
+ Node* new_length = BuildAppendJSArray(PACKED_SMI_ELEMENTS, array_receiver,
+ &args, &arg_index, &smi_transition);
args.PopAndReturn(new_length);
}
@@ -1037,17 +1034,17 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
// the most generic implementation for the rest of the array.
BIND(&smi_transition);
{
- Node* arg = args.AtIndex(arg_index);
+ Node* arg = args.AtIndex(arg_index.value());
GotoIf(TaggedIsSmi(arg), &default_label);
- Node* length = LoadJSArrayLength(receiver);
+ Node* length = LoadJSArrayLength(array_receiver);
// TODO(danno): Use the KeyedStoreGeneric stub here when possible,
// calling into the runtime to do the elements transition is overkill.
- CallRuntime(Runtime::kSetProperty, context, receiver, length, arg,
+ CallRuntime(Runtime::kSetProperty, context, array_receiver, length, arg,
SmiConstant(LanguageMode::kStrict));
Increment(&arg_index);
// The runtime SetProperty call could have converted the array to dictionary
// mode, which must be detected to abort the fast-path.
- Node* map = LoadMap(receiver);
+ Node* map = LoadMap(array_receiver);
Node* bit_field2 = LoadMapBitField2(map);
Node* kind = DecodeWord32<Map::ElementsKindBits>(bit_field2);
GotoIf(Word32Equal(kind, Int32Constant(DICTIONARY_ELEMENTS)),
@@ -1065,16 +1062,16 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
BIND(&object_push);
{
- Node* new_length = BuildAppendJSArray(PACKED_ELEMENTS, receiver, &args,
- &arg_index, &default_label);
+ Node* new_length = BuildAppendJSArray(PACKED_ELEMENTS, array_receiver,
+ &args, &arg_index, &default_label);
args.PopAndReturn(new_length);
}
BIND(&double_push);
{
Node* new_length =
- BuildAppendJSArray(PACKED_DOUBLE_ELEMENTS, receiver, &args, &arg_index,
- &double_transition);
+ BuildAppendJSArray(PACKED_DOUBLE_ELEMENTS, array_receiver, &args,
+ &arg_index, &double_transition);
args.PopAndReturn(new_length);
}
@@ -1084,17 +1081,17 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
// on the most generic implementation for the rest of the array.
BIND(&double_transition);
{
- Node* arg = args.AtIndex(arg_index);
+ Node* arg = args.AtIndex(arg_index.value());
GotoIfNumber(arg, &default_label);
- Node* length = LoadJSArrayLength(receiver);
+ Node* length = LoadJSArrayLength(array_receiver);
// TODO(danno): Use the KeyedStoreGeneric stub here when possible,
// calling into the runtime to do the elements transition is overkill.
- CallRuntime(Runtime::kSetProperty, context, receiver, length, arg,
+ CallRuntime(Runtime::kSetProperty, context, array_receiver, length, arg,
SmiConstant(LanguageMode::kStrict));
Increment(&arg_index);
// The runtime SetProperty call could have converted the array to dictionary
// mode, which must be detected to abort the fast-path.
- Node* map = LoadMap(receiver);
+ Node* map = LoadMap(array_receiver);
Node* bit_field2 = LoadMapBitField2(map);
Node* kind = DecodeWord32<Map::ElementsKindBits>(bit_field2);
GotoIf(Word32Equal(kind, Int32Constant(DICTIONARY_ELEMENTS)),
@@ -1107,13 +1104,13 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
BIND(&default_label);
{
args.ForEach(
- [this, receiver, context](Node* arg) {
- Node* length = LoadJSArrayLength(receiver);
- CallRuntime(Runtime::kSetProperty, context, receiver, length, arg,
- SmiConstant(LanguageMode::kStrict));
+ [this, array_receiver, context](Node* arg) {
+ Node* length = LoadJSArrayLength(array_receiver);
+ CallRuntime(Runtime::kSetProperty, context, array_receiver, length,
+ arg, SmiConstant(LanguageMode::kStrict));
},
- arg_index);
- args.PopAndReturn(LoadJSArrayLength(receiver));
+ arg_index.value());
+ args.PopAndReturn(LoadJSArrayLength(array_receiver));
}
BIND(&runtime);
@@ -1131,8 +1128,8 @@ class ArrayPrototypeSliceCodeStubAssembler : public CodeStubAssembler {
compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
- Node* HandleFastSlice(Node* context, Node* array, Node* from, Node* count,
- Label* slow) {
+ Node* HandleFastSlice(TNode<Context> context, Node* array, Node* from,
+ Node* count, Label* slow) {
VARIABLE(result, MachineRepresentation::kTagged);
Label done(this);
@@ -1262,7 +1259,8 @@ class ArrayPrototypeSliceCodeStubAssembler : public CodeStubAssembler {
return result.value();
}
- void CopyOneElement(Node* context, Node* o, Node* a, Node* p_k, Variable& n) {
+ void CopyOneElement(TNode<Context> context, Node* o, Node* a, Node* p_k,
+ Variable& n) {
// b. Let kPresent be HasProperty(O, Pk).
// c. ReturnIfAbrupt(kPresent).
TNode<Oddball> k_present = HasProperty(o, p_k, context, kHasProperty);
@@ -1291,9 +1289,9 @@ TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) {
Label slow(this, Label::kDeferred), fast_elements_kind(this);
CodeStubArguments args(this, argc);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
- VARIABLE(o, MachineRepresentation::kTagged);
+ TVARIABLE(JSReceiver, o);
VARIABLE(len, MachineRepresentation::kTagged);
Label length_done(this), generic_length(this), check_arguments_length(this),
load_arguments_length(this);
@@ -1301,8 +1299,9 @@ TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) {
GotoIf(TaggedIsSmi(receiver), &generic_length);
GotoIfNot(IsJSArray(receiver), &check_arguments_length);
- o.Bind(receiver);
- len.Bind(LoadJSArrayLength(receiver));
+ TNode<JSArray> array_receiver = CAST(receiver);
+ o = array_receiver;
+ len.Bind(LoadJSArrayLength(array_receiver));
// Check for the array clone case. There can be no arguments to slice, the
// array prototype chain must be intact and have no elements, the array has to
@@ -1318,7 +1317,7 @@ TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) {
BIND(&check_arguments_length);
- Node* map = LoadMap(receiver);
+ Node* map = LoadMap(array_receiver);
Node* native_context = LoadNativeContext(context);
GotoIfContextElementEqual(map, native_context,
Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX,
@@ -1337,16 +1336,16 @@ TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) {
BIND(&load_arguments_length);
Node* arguments_length =
- LoadObjectField(receiver, JSArgumentsObject::kLengthOffset);
+ LoadObjectField(array_receiver, JSArgumentsObject::kLengthOffset);
GotoIf(TaggedIsNotSmi(arguments_length), &generic_length);
- o.Bind(receiver);
+ o = CAST(receiver);
len.Bind(arguments_length);
Goto(&length_done);
BIND(&generic_length);
// 1. Let O be ToObject(this value).
// 2. ReturnIfAbrupt(O).
- o.Bind(CallBuiltin(Builtins::kToObject, context, receiver));
+ o = ToObject(context, receiver);
// 3. Let len be ToLength(Get(O, "length")).
// 4. ReturnIfAbrupt(len).
@@ -1359,7 +1358,7 @@ TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) {
// 5. Let relativeStart be ToInteger(start).
// 6. ReturnIfAbrupt(relativeStart).
- TNode<Object> arg0 = CAST(args.GetOptionalArgumentValue(0, SmiConstant(0)));
+ TNode<Object> arg0 = args.GetOptionalArgumentValue(0, SmiConstant(0));
Node* relative_start = ToInteger_Inline(context, arg0);
// 7. If relativeStart < 0, let k be max((len + relativeStart),0);
@@ -1378,8 +1377,7 @@ TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) {
// 8. If end is undefined, let relativeEnd be len;
// else let relativeEnd be ToInteger(end).
// 9. ReturnIfAbrupt(relativeEnd).
- TNode<Object> end =
- CAST(args.GetOptionalArgumentValue(1, UndefinedConstant()));
+ TNode<Object> end = args.GetOptionalArgumentValue(1, UndefinedConstant());
Label end_undefined(this), end_done(this);
VARIABLE(relative_end, MachineRepresentation::kTagged);
GotoIf(WordEqual(end, UndefinedConstant()), &end_undefined);
@@ -1460,12 +1458,13 @@ TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) {
}
TF_BUILTIN(ArrayPrototypeShift, CodeStubAssembler) {
- Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
CSA_ASSERT(this, IsUndefined(Parameter(BuiltinDescriptor::kNewTarget)));
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Label runtime(this, Label::kDeferred);
Label fast(this);
@@ -1482,17 +1481,19 @@ TF_BUILTIN(ArrayPrototypeShift, CodeStubAssembler) {
BIND(&fast);
{
- CSA_ASSERT(this, TaggedIsPositiveSmi(LoadJSArrayLength(receiver)));
- Node* length = LoadAndUntagObjectField(receiver, JSArray::kLengthOffset);
+ TNode<JSArray> array_receiver = CAST(receiver);
+ CSA_ASSERT(this, TaggedIsPositiveSmi(LoadJSArrayLength(array_receiver)));
+ Node* length =
+ LoadAndUntagObjectField(array_receiver, JSArray::kLengthOffset);
Label return_undefined(this), fast_elements_tagged(this),
fast_elements_smi(this);
GotoIf(IntPtrEqual(length, IntPtrConstant(0)), &return_undefined);
// 2) Ensure that the length is writable.
- EnsureArrayLengthWritable(LoadMap(receiver), &runtime);
+ EnsureArrayLengthWritable(LoadMap(array_receiver), &runtime);
// 3) Check that the elements backing store isn't copy-on-write.
- Node* elements = LoadElements(receiver);
+ Node* elements = LoadElements(array_receiver);
GotoIf(WordEqual(LoadMap(elements),
LoadRoot(Heap::kFixedCOWArrayMapRootIndex)),
&runtime);
@@ -1514,10 +1515,10 @@ TF_BUILTIN(ArrayPrototypeShift, CodeStubAssembler) {
IntPtrConstant(JSArray::kMaxCopyElements)),
&runtime);
- StoreObjectFieldNoWriteBarrier(receiver, JSArray::kLengthOffset,
+ StoreObjectFieldNoWriteBarrier(array_receiver, JSArray::kLengthOffset,
SmiTag(new_length));
- Node* elements_kind = LoadMapElementsKind(LoadMap(receiver));
+ Node* elements_kind = LoadMapElementsKind(LoadMap(array_receiver));
GotoIf(
Int32LessThanOrEqual(elements_kind, Int32Constant(HOLEY_SMI_ELEMENTS)),
&fast_elements_smi);
@@ -1616,9 +1617,9 @@ TF_BUILTIN(ArrayPrototypeShift, CodeStubAssembler) {
}
}
-TF_BUILTIN(ExtractFastJSArray, ArrayBuiltinCodeStubAssembler) {
+TF_BUILTIN(ExtractFastJSArray, ArrayBuiltinsAssembler) {
ParameterMode mode = OptimalParameterMode();
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* array = Parameter(Descriptor::kSource);
Node* begin = TaggedToParameter(Parameter(Descriptor::kBegin), mode);
Node* count = TaggedToParameter(Parameter(Descriptor::kCount), mode);
@@ -1629,8 +1630,8 @@ TF_BUILTIN(ExtractFastJSArray, ArrayBuiltinCodeStubAssembler) {
Return(ExtractFastJSArray(context, array, begin, count, mode));
}
-TF_BUILTIN(CloneFastJSArray, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
+TF_BUILTIN(CloneFastJSArray, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* array = Parameter(Descriptor::kSource);
CSA_ASSERT(this, IsJSArray(array));
@@ -1640,9 +1641,9 @@ TF_BUILTIN(CloneFastJSArray, ArrayBuiltinCodeStubAssembler) {
Return(CloneFastJSArray(context, array, mode));
}
-TF_BUILTIN(ArrayFindLoopContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayFindLoopContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
@@ -1656,16 +1657,16 @@ TF_BUILTIN(ArrayFindLoopContinuation, ArrayBuiltinCodeStubAssembler) {
len, to);
GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinCodeStubAssembler::FindProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ &ArrayBuiltinsAssembler::FindProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction,
MissingPropertyMode::kUseUndefined, ForEachDirection::kForward);
}
// Continuation that is called after an eager deoptimization from TF (ex. the
// array changes during iteration).
-TF_BUILTIN(ArrayFindLoopEagerDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayFindLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -1678,9 +1679,9 @@ TF_BUILTIN(ArrayFindLoopEagerDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
// Continuation that is called after a lazy deoptimization from TF (ex. the
// callback function is no longer callable).
-TF_BUILTIN(ArrayFindLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayFindLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -1695,9 +1696,9 @@ TF_BUILTIN(ArrayFindLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
// right after the callback and it's returned value must be handled before
// iteration continues.
TF_BUILTIN(ArrayFindLoopAfterCallbackLazyDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -1720,13 +1721,13 @@ TF_BUILTIN(ArrayFindLoopAfterCallbackLazyDeoptContinuation,
}
// ES #sec-get-%typedarray%.prototype.find
-TF_BUILTIN(ArrayPrototypeFind, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(ArrayPrototypeFind, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -1734,17 +1735,16 @@ TF_BUILTIN(ArrayPrototypeFind, ArrayBuiltinCodeStubAssembler) {
new_target, argc);
GenerateIteratingArrayBuiltinBody(
- "Array.prototype.find",
- &ArrayBuiltinCodeStubAssembler::FindResultGenerator,
- &ArrayBuiltinCodeStubAssembler::FindProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ "Array.prototype.find", &ArrayBuiltinsAssembler::FindResultGenerator,
+ &ArrayBuiltinsAssembler::FindProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction,
Builtins::CallableFor(isolate(), Builtins::kArrayFindLoopContinuation),
MissingPropertyMode::kUseUndefined, ForEachDirection::kForward);
}
-TF_BUILTIN(ArrayFindIndexLoopContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayFindIndexLoopContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
@@ -1758,15 +1758,14 @@ TF_BUILTIN(ArrayFindIndexLoopContinuation, ArrayBuiltinCodeStubAssembler) {
len, to);
GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinCodeStubAssembler::FindIndexProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ &ArrayBuiltinsAssembler::FindIndexProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction,
MissingPropertyMode::kUseUndefined, ForEachDirection::kForward);
}
-TF_BUILTIN(ArrayFindIndexLoopEagerDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayFindIndexLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -1777,10 +1776,9 @@ TF_BUILTIN(ArrayFindIndexLoopEagerDeoptContinuation,
initial_k, len, UndefinedConstant()));
}
-TF_BUILTIN(ArrayFindIndexLoopLazyDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayFindIndexLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -1792,9 +1790,9 @@ TF_BUILTIN(ArrayFindIndexLoopLazyDeoptContinuation,
}
TF_BUILTIN(ArrayFindIndexLoopAfterCallbackLazyDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -1817,13 +1815,13 @@ TF_BUILTIN(ArrayFindIndexLoopAfterCallbackLazyDeoptContinuation,
}
// ES #sec-get-%typedarray%.prototype.findIndex
-TF_BUILTIN(ArrayPrototypeFindIndex, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(ArrayPrototypeFindIndex, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -1832,22 +1830,393 @@ TF_BUILTIN(ArrayPrototypeFindIndex, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingArrayBuiltinBody(
"Array.prototype.findIndex",
- &ArrayBuiltinCodeStubAssembler::FindIndexResultGenerator,
- &ArrayBuiltinCodeStubAssembler::FindIndexProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ &ArrayBuiltinsAssembler::FindIndexResultGenerator,
+ &ArrayBuiltinsAssembler::FindIndexProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction,
Builtins::CallableFor(isolate(),
Builtins::kArrayFindIndexLoopContinuation),
MissingPropertyMode::kUseUndefined, ForEachDirection::kForward);
}
+class ArrayPopulatorAssembler : public CodeStubAssembler {
+ public:
+ explicit ArrayPopulatorAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ TNode<Object> ConstructArrayLike(TNode<Context> context,
+ TNode<Object> receiver) {
+ TVARIABLE(Object, array);
+ Label is_constructor(this), is_not_constructor(this), done(this);
+ GotoIf(TaggedIsSmi(receiver), &is_not_constructor);
+ Branch(IsConstructor(receiver), &is_constructor, &is_not_constructor);
+
+ BIND(&is_constructor);
+ {
+ array = CAST(
+ ConstructJS(CodeFactory::Construct(isolate()), context, receiver));
+ Goto(&done);
+ }
+
+ BIND(&is_not_constructor);
+ {
+ Label allocate_js_array(this);
+
+ TNode<Map> array_map = CAST(LoadContextElement(
+ context, Context::JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX));
+
+ array = CAST(AllocateJSArray(PACKED_SMI_ELEMENTS, array_map,
+ SmiConstant(0), SmiConstant(0), nullptr,
+ ParameterMode::SMI_PARAMETERS));
+ Goto(&done);
+ }
+
+ BIND(&done);
+ return array.value();
+ }
+
+ TNode<Object> ConstructArrayLike(TNode<Context> context,
+ TNode<Object> receiver,
+ TNode<Number> length) {
+ TVARIABLE(Object, array);
+ Label is_constructor(this), is_not_constructor(this), done(this);
+ CSA_ASSERT(this, IsNumberNormalized(length));
+ GotoIf(TaggedIsSmi(receiver), &is_not_constructor);
+ Branch(IsConstructor(receiver), &is_constructor, &is_not_constructor);
+
+ BIND(&is_constructor);
+ {
+ array = CAST(ConstructJS(CodeFactory::Construct(isolate()), context,
+ receiver, length));
+ Goto(&done);
+ }
+
+ BIND(&is_not_constructor);
+ {
+ Label allocate_js_array(this);
+
+ Label next(this), runtime(this, Label::kDeferred);
+ TNode<Smi> limit = SmiConstant(JSArray::kInitialMaxFastElementArray);
+ CSA_ASSERT_BRANCH(this, [=](Label* ok, Label* not_ok) {
+ BranchIfNumberRelationalComparison(Operation::kGreaterThanOrEqual,
+ length, SmiConstant(0), ok, not_ok);
+ });
+ // This check also transitively covers the case where length is too big
+ // to be representable by a SMI and so is not usable with
+ // AllocateJSArray.
+ BranchIfNumberRelationalComparison(Operation::kGreaterThanOrEqual, length,
+ limit, &runtime, &next);
+
+ BIND(&runtime);
+ {
+ TNode<Context> native_context = LoadNativeContext(context);
+ TNode<JSFunction> array_function = CAST(
+ LoadContextElement(native_context, Context::ARRAY_FUNCTION_INDEX));
+ array = CallRuntime(Runtime::kNewArray, context, array_function, length,
+ array_function, UndefinedConstant());
+ Goto(&done);
+ }
+
+ BIND(&next);
+ CSA_ASSERT(this, TaggedIsSmi(length));
+
+ TNode<Map> array_map = CAST(LoadContextElement(
+ context, Context::JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX));
+
+ // TODO(delphick): Consider using
+ // AllocateUninitializedJSArrayWithElements to avoid initializing an
+ // array and then writing over it.
+ array = CAST(AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, length,
+ SmiConstant(0), nullptr,
+ ParameterMode::SMI_PARAMETERS));
+ Goto(&done);
+ }
+
+ BIND(&done);
+ return array.value();
+ }
+
+ void GenerateSetLength(TNode<Context> context, TNode<Object> array,
+ TNode<Number> length) {
+ Label fast(this), runtime(this), done(this);
+ // TODO(delphick): We should be able to skip the fast set altogether, if the
+ // length already equals the expected length, which it always is now on the
+ // fast path.
+ // Only set the length in this stub if
+ // 1) the array has fast elements,
+ // 2) the length is writable,
+ // 3) the new length is equal to the old length.
+
+ // 1) Check that the array has fast elements.
+ // TODO(delphick): Consider changing this since it does an an unnecessary
+ // check for SMIs.
+ // TODO(delphick): Also we could hoist this to after the array construction
+ // and copy the args into array in the same way as the Array constructor.
+ BranchIfFastJSArray(array, context, &fast, &runtime);
+
+ BIND(&fast);
+ {
+ TNode<JSArray> fast_array = CAST(array);
+
+ TNode<Smi> length_smi = CAST(length);
+ TNode<Smi> old_length = LoadFastJSArrayLength(fast_array);
+ CSA_ASSERT(this, TaggedIsPositiveSmi(old_length));
+
+ // 2) Ensure that the length is writable.
+ // TODO(delphick): This check may be redundant due to the
+ // BranchIfFastJSArray above.
+ EnsureArrayLengthWritable(LoadMap(fast_array), &runtime);
+
+ // 3) If the created array's length does not match the required length,
+ // then use the runtime to set the property as that will insert holes
+ // into excess elements or shrink the backing store as appropriate.
+ GotoIf(SmiNotEqual(length_smi, old_length), &runtime);
+
+ StoreObjectFieldNoWriteBarrier(fast_array, JSArray::kLengthOffset,
+ length_smi);
+
+ Goto(&done);
+ }
+
+ BIND(&runtime);
+ {
+ CallRuntime(Runtime::kSetProperty, context, static_cast<Node*>(array),
+ CodeStubAssembler::LengthStringConstant(), length,
+ SmiConstant(LanguageMode::kStrict));
+ Goto(&done);
+ }
+
+ BIND(&done);
+ }
+};
+
+// ES #sec-array.from
+TF_BUILTIN(ArrayFrom, ArrayPopulatorAssembler) {
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
+
+ CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
+
+ TNode<Object> map_function = args.GetOptionalArgumentValue(1);
+
+ // If map_function is not undefined, then ensure it's callable else throw.
+ {
+ Label no_error(this), error(this);
+ GotoIf(IsUndefined(map_function), &no_error);
+ GotoIf(TaggedIsSmi(map_function), &error);
+ Branch(IsCallable(map_function), &no_error, &error);
+
+ BIND(&error);
+ ThrowTypeError(context, MessageTemplate::kCalledNonCallable, map_function);
+
+ BIND(&no_error);
+ }
+
+ Label iterable(this), not_iterable(this), finished(this), if_exception(this);
+
+ TNode<Object> this_arg = args.GetOptionalArgumentValue(2);
+ TNode<Object> items = args.GetOptionalArgumentValue(0);
+ // The spec doesn't require ToObject to be called directly on the iterable
+ // branch, but it's part of GetMethod that is in the spec.
+ TNode<JSReceiver> array_like = ToObject(context, items);
+
+ TVARIABLE(Object, array);
+ TVARIABLE(Number, length);
+
+ // Determine whether items[Symbol.iterator] is defined:
+ IteratorBuiltinsAssembler iterator_assembler(state());
+ Node* iterator_method =
+ iterator_assembler.GetIteratorMethod(context, array_like);
+ Branch(IsNullOrUndefined(iterator_method), &not_iterable, &iterable);
+
+ BIND(&iterable);
+ {
+ TVARIABLE(Number, index, SmiConstant(0));
+ TVARIABLE(Object, var_exception);
+ Label loop(this, &index), loop_done(this),
+ on_exception(this, Label::kDeferred),
+ index_overflow(this, Label::kDeferred);
+
+ // Check that the method is callable.
+ {
+ Label get_method_not_callable(this, Label::kDeferred), next(this);
+ GotoIf(TaggedIsSmi(iterator_method), &get_method_not_callable);
+ GotoIfNot(IsCallable(iterator_method), &get_method_not_callable);
+ Goto(&next);
+
+ BIND(&get_method_not_callable);
+ ThrowTypeError(context, MessageTemplate::kCalledNonCallable,
+ iterator_method);
+
+ BIND(&next);
+ }
+
+ // Construct the output array with empty length.
+ array = ConstructArrayLike(context, args.GetReceiver());
+
+ // Actually get the iterator and throw if the iterator method does not yield
+ // one.
+ IteratorRecord iterator_record =
+ iterator_assembler.GetIterator(context, items, iterator_method);
+
+ TNode<Context> native_context = LoadNativeContext(context);
+ TNode<Object> fast_iterator_result_map =
+ LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
+
+ Goto(&loop);
+
+ BIND(&loop);
+ {
+ // Loop while iterator is not done.
+ TNode<Object> next = CAST(iterator_assembler.IteratorStep(
+ context, iterator_record, &loop_done, fast_iterator_result_map));
+ TVARIABLE(Object, value,
+ CAST(iterator_assembler.IteratorValue(
+ context, next, fast_iterator_result_map)));
+
+ // If a map_function is supplied then call it (using this_arg as
+ // receiver), on the value returned from the iterator. Exceptions are
+ // caught so the iterator can be closed.
+ {
+ Label next(this);
+ GotoIf(IsUndefined(map_function), &next);
+
+ CSA_ASSERT(this, IsCallable(map_function));
+ Node* v = CallJS(CodeFactory::Call(isolate()), context, map_function,
+ this_arg, value.value(), index.value());
+ GotoIfException(v, &on_exception, &var_exception);
+ value = CAST(v);
+ Goto(&next);
+ BIND(&next);
+ }
+
+ // Store the result in the output object (catching any exceptions so the
+ // iterator can be closed).
+ Node* define_status =
+ CallRuntime(Runtime::kCreateDataProperty, context, array.value(),
+ index.value(), value.value());
+ GotoIfException(define_status, &on_exception, &var_exception);
+
+ index = NumberInc(index.value());
+
+ // The spec requires that we throw an exception if index reaches 2^53-1,
+ // but an empty loop would take >100 days to do this many iterations. To
+ // actually run for that long would require an iterator that never set
+ // done to true and a target array which somehow never ran out of memory,
+ // e.g. a proxy that discarded the values. Ignoring this case just means
+ // we would repeatedly call CreateDataProperty with index = 2^53.
+ CSA_ASSERT_BRANCH(this, [&](Label* ok, Label* not_ok) {
+ BranchIfNumberRelationalComparison(Operation::kLessThan, index.value(),
+ NumberConstant(kMaxSafeInteger), ok,
+ not_ok);
+ });
+ Goto(&loop);
+ }
+
+ BIND(&loop_done);
+ {
+ length = index;
+ Goto(&finished);
+ }
+
+ BIND(&on_exception);
+ {
+ // Close the iterator, rethrowing either the passed exception or
+ // exceptions thrown during the close.
+ iterator_assembler.IteratorCloseOnException(context, iterator_record,
+ &var_exception);
+ }
+ }
+
+ // Since there's no iterator, items cannot be a Fast JS Array.
+ BIND(&not_iterable);
+ {
+ CSA_ASSERT(this, Word32BinaryNot(IsFastJSArray(array_like, context)));
+
+ // Treat array_like as an array and try to get its length.
+ length = CAST(ToLength_Inline(
+ context, GetProperty(context, array_like, factory()->length_string())));
+
+ // Construct an array using the receiver as constructor with the same length
+ // as the input array.
+ array = ConstructArrayLike(context, args.GetReceiver(), length.value());
+
+ TVARIABLE(Number, index, SmiConstant(0));
+
+ GotoIf(SmiEqual(length.value(), SmiConstant(0)), &finished);
+
+ // Loop from 0 to length-1.
+ {
+ Label loop(this, &index);
+ Goto(&loop);
+ BIND(&loop);
+ TVARIABLE(Object, value);
+
+ value = GetProperty(context, array_like, index.value());
+
+ // If a map_function is supplied then call it (using this_arg as
+ // receiver), on the value retrieved from the array.
+ {
+ Label next(this);
+ GotoIf(IsUndefined(map_function), &next);
+
+ CSA_ASSERT(this, IsCallable(map_function));
+ value = CAST(CallJS(CodeFactory::Call(isolate()), context, map_function,
+ this_arg, value.value(), index.value()));
+ Goto(&next);
+ BIND(&next);
+ }
+
+ // Store the result in the output object.
+ CallRuntime(Runtime::kCreateDataProperty, context, array.value(),
+ index.value(), value.value());
+ index = NumberInc(index.value());
+ BranchIfNumberRelationalComparison(Operation::kLessThan, index.value(),
+ length.value(), &loop, &finished);
+ }
+ }
+
+ BIND(&finished);
+
+ // Finally set the length on the output and return it.
+ GenerateSetLength(context, array.value(), length.value());
+ args.PopAndReturn(array.value());
+}
+
+// ES #sec-array.of
+TF_BUILTIN(ArrayOf, ArrayPopulatorAssembler) {
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
+ TNode<Smi> length = SmiFromInt32(argc);
+
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+
+ CodeStubArguments args(this, length, nullptr, ParameterMode::SMI_PARAMETERS);
+
+ TNode<Object> array = ConstructArrayLike(context, args.GetReceiver(), length);
+
+ // TODO(delphick): Avoid using CreateDataProperty on the fast path.
+ BuildFastLoop(SmiConstant(0), length,
+ [=](Node* index) {
+ CallRuntime(
+ Runtime::kCreateDataProperty, context,
+ static_cast<Node*>(array), index,
+ args.AtIndex(index, ParameterMode::SMI_PARAMETERS));
+ },
+ 1, ParameterMode::SMI_PARAMETERS, IndexAdvanceMode::kPost);
+
+ GenerateSetLength(context, array, length);
+ args.PopAndReturn(array);
+}
+
// ES #sec-get-%typedarray%.prototype.find
-TF_BUILTIN(TypedArrayPrototypeFind, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(TypedArrayPrototypeFind, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -1856,19 +2225,19 @@ TF_BUILTIN(TypedArrayPrototypeFind, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingTypedArrayBuiltinBody(
"%TypedArray%.prototype.find",
- &ArrayBuiltinCodeStubAssembler::FindResultGenerator,
- &ArrayBuiltinCodeStubAssembler::FindProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
+ &ArrayBuiltinsAssembler::FindResultGenerator,
+ &ArrayBuiltinsAssembler::FindProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction);
}
// ES #sec-get-%typedarray%.prototype.findIndex
-TF_BUILTIN(TypedArrayPrototypeFindIndex, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(TypedArrayPrototypeFindIndex, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -1877,14 +2246,14 @@ TF_BUILTIN(TypedArrayPrototypeFindIndex, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingTypedArrayBuiltinBody(
"%TypedArray%.prototype.findIndex",
- &ArrayBuiltinCodeStubAssembler::FindIndexResultGenerator,
- &ArrayBuiltinCodeStubAssembler::FindIndexProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
+ &ArrayBuiltinsAssembler::FindIndexResultGenerator,
+ &ArrayBuiltinsAssembler::FindIndexProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction);
}
-TF_BUILTIN(ArrayForEachLoopContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayForEachLoopContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
@@ -1898,15 +2267,13 @@ TF_BUILTIN(ArrayForEachLoopContinuation, ArrayBuiltinCodeStubAssembler) {
len, to);
GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinCodeStubAssembler::ForEachProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
- MissingPropertyMode::kSkip);
+ &ArrayBuiltinsAssembler::ForEachProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction, MissingPropertyMode::kSkip);
}
-TF_BUILTIN(ArrayForEachLoopEagerDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayForEachLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -1917,10 +2284,9 @@ TF_BUILTIN(ArrayForEachLoopEagerDeoptContinuation,
initial_k, len, UndefinedConstant()));
}
-TF_BUILTIN(ArrayForEachLoopLazyDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayForEachLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -1931,13 +2297,13 @@ TF_BUILTIN(ArrayForEachLoopLazyDeoptContinuation,
initial_k, len, UndefinedConstant()));
}
-TF_BUILTIN(ArrayForEach, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(ArrayForEach, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -1946,20 +2312,20 @@ TF_BUILTIN(ArrayForEach, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingArrayBuiltinBody(
"Array.prototype.forEach",
- &ArrayBuiltinCodeStubAssembler::ForEachResultGenerator,
- &ArrayBuiltinCodeStubAssembler::ForEachProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ &ArrayBuiltinsAssembler::ForEachResultGenerator,
+ &ArrayBuiltinsAssembler::ForEachProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction,
Builtins::CallableFor(isolate(), Builtins::kArrayForEachLoopContinuation),
MissingPropertyMode::kSkip);
}
-TF_BUILTIN(TypedArrayPrototypeForEach, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(TypedArrayPrototypeForEach, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -1968,14 +2334,14 @@ TF_BUILTIN(TypedArrayPrototypeForEach, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingTypedArrayBuiltinBody(
"%TypedArray%.prototype.forEach",
- &ArrayBuiltinCodeStubAssembler::ForEachResultGenerator,
- &ArrayBuiltinCodeStubAssembler::ForEachProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
+ &ArrayBuiltinsAssembler::ForEachResultGenerator,
+ &ArrayBuiltinsAssembler::ForEachProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction);
}
-TF_BUILTIN(ArraySomeLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArraySomeLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -2002,9 +2368,9 @@ TF_BUILTIN(ArraySomeLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
}
}
-TF_BUILTIN(ArraySomeLoopEagerDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArraySomeLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -2015,9 +2381,9 @@ TF_BUILTIN(ArraySomeLoopEagerDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
len, UndefinedConstant()));
}
-TF_BUILTIN(ArraySomeLoopContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArraySomeLoopContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
@@ -2031,18 +2397,17 @@ TF_BUILTIN(ArraySomeLoopContinuation, ArrayBuiltinCodeStubAssembler) {
len, to);
GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinCodeStubAssembler::SomeProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
- MissingPropertyMode::kSkip);
+ &ArrayBuiltinsAssembler::SomeProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction, MissingPropertyMode::kSkip);
}
-TF_BUILTIN(ArraySome, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(ArraySome, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -2050,21 +2415,20 @@ TF_BUILTIN(ArraySome, ArrayBuiltinCodeStubAssembler) {
new_target, argc);
GenerateIteratingArrayBuiltinBody(
- "Array.prototype.some",
- &ArrayBuiltinCodeStubAssembler::SomeResultGenerator,
- &ArrayBuiltinCodeStubAssembler::SomeProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ "Array.prototype.some", &ArrayBuiltinsAssembler::SomeResultGenerator,
+ &ArrayBuiltinsAssembler::SomeProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction,
Builtins::CallableFor(isolate(), Builtins::kArraySomeLoopContinuation),
MissingPropertyMode::kSkip);
}
-TF_BUILTIN(TypedArrayPrototypeSome, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(TypedArrayPrototypeSome, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -2073,14 +2437,14 @@ TF_BUILTIN(TypedArrayPrototypeSome, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingTypedArrayBuiltinBody(
"%TypedArray%.prototype.some",
- &ArrayBuiltinCodeStubAssembler::SomeResultGenerator,
- &ArrayBuiltinCodeStubAssembler::SomeProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
+ &ArrayBuiltinsAssembler::SomeResultGenerator,
+ &ArrayBuiltinsAssembler::SomeProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction);
}
-TF_BUILTIN(ArrayEveryLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayEveryLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -2107,10 +2471,9 @@ TF_BUILTIN(ArrayEveryLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
{ Return(FalseConstant()); }
}
-TF_BUILTIN(ArrayEveryLoopEagerDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayEveryLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -2121,9 +2484,9 @@ TF_BUILTIN(ArrayEveryLoopEagerDeoptContinuation,
len, UndefinedConstant()));
}
-TF_BUILTIN(ArrayEveryLoopContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayEveryLoopContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
@@ -2137,18 +2500,17 @@ TF_BUILTIN(ArrayEveryLoopContinuation, ArrayBuiltinCodeStubAssembler) {
len, to);
GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinCodeStubAssembler::EveryProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
- MissingPropertyMode::kSkip);
+ &ArrayBuiltinsAssembler::EveryProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction, MissingPropertyMode::kSkip);
}
-TF_BUILTIN(ArrayEvery, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(ArrayEvery, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -2156,21 +2518,20 @@ TF_BUILTIN(ArrayEvery, ArrayBuiltinCodeStubAssembler) {
new_target, argc);
GenerateIteratingArrayBuiltinBody(
- "Array.prototype.every",
- &ArrayBuiltinCodeStubAssembler::EveryResultGenerator,
- &ArrayBuiltinCodeStubAssembler::EveryProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ "Array.prototype.every", &ArrayBuiltinsAssembler::EveryResultGenerator,
+ &ArrayBuiltinsAssembler::EveryProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction,
Builtins::CallableFor(isolate(), Builtins::kArrayEveryLoopContinuation),
MissingPropertyMode::kSkip);
}
-TF_BUILTIN(TypedArrayPrototypeEvery, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(TypedArrayPrototypeEvery, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -2179,14 +2540,14 @@ TF_BUILTIN(TypedArrayPrototypeEvery, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingTypedArrayBuiltinBody(
"%TypedArray%.prototype.every",
- &ArrayBuiltinCodeStubAssembler::EveryResultGenerator,
- &ArrayBuiltinCodeStubAssembler::EveryProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
+ &ArrayBuiltinsAssembler::EveryResultGenerator,
+ &ArrayBuiltinsAssembler::EveryProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction);
}
-TF_BUILTIN(ArrayReduceLoopContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayReduceLoopContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* accumulator = Parameter(Descriptor::kAccumulator);
@@ -2200,15 +2561,30 @@ TF_BUILTIN(ArrayReduceLoopContinuation, ArrayBuiltinCodeStubAssembler) {
initial_k, len, to);
GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinCodeStubAssembler::ReduceProcessor,
- &ArrayBuiltinCodeStubAssembler::ReducePostLoopAction,
+ &ArrayBuiltinsAssembler::ReduceProcessor,
+ &ArrayBuiltinsAssembler::ReducePostLoopAction,
MissingPropertyMode::kSkip);
}
-TF_BUILTIN(ArrayReduceLoopEagerDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayReducePreLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* len = Parameter(Descriptor::kLength);
+
+ Callable stub(
+ Builtins::CallableFor(isolate(), Builtins::kArrayReduceLoopContinuation));
+ // Simulate starting the loop at 0, but ensuring that the accumulator is
+ // the hole. The continuation stub will search for the initial non-hole
+ // element, rightly throwing an exception if not found.
+ Return(CallStub(stub, context, receiver, callbackfn, UndefinedConstant(),
+ TheHoleConstant(), receiver, SmiConstant(0), len,
+ UndefinedConstant()));
+}
+
+TF_BUILTIN(ArrayReduceLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* accumulator = Parameter(Descriptor::kAccumulator);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -2220,10 +2596,9 @@ TF_BUILTIN(ArrayReduceLoopEagerDeoptContinuation,
accumulator, receiver, initial_k, len, UndefinedConstant()));
}
-TF_BUILTIN(ArrayReduceLoopLazyDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayReduceLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* initial_k = Parameter(Descriptor::kInitialK);
Node* len = Parameter(Descriptor::kLength);
@@ -2235,13 +2610,13 @@ TF_BUILTIN(ArrayReduceLoopLazyDeoptContinuation,
result, receiver, initial_k, len, UndefinedConstant()));
}
-TF_BUILTIN(ArrayReduce, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(ArrayReduce, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
@@ -2249,21 +2624,20 @@ TF_BUILTIN(ArrayReduce, ArrayBuiltinCodeStubAssembler) {
new_target, argc);
GenerateIteratingArrayBuiltinBody(
- "Array.prototype.reduce",
- &ArrayBuiltinCodeStubAssembler::ReduceResultGenerator,
- &ArrayBuiltinCodeStubAssembler::ReduceProcessor,
- &ArrayBuiltinCodeStubAssembler::ReducePostLoopAction,
+ "Array.prototype.reduce", &ArrayBuiltinsAssembler::ReduceResultGenerator,
+ &ArrayBuiltinsAssembler::ReduceProcessor,
+ &ArrayBuiltinsAssembler::ReducePostLoopAction,
Builtins::CallableFor(isolate(), Builtins::kArrayReduceLoopContinuation),
MissingPropertyMode::kSkip);
}
-TF_BUILTIN(TypedArrayPrototypeReduce, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(TypedArrayPrototypeReduce, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
@@ -2272,14 +2646,14 @@ TF_BUILTIN(TypedArrayPrototypeReduce, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingTypedArrayBuiltinBody(
"%TypedArray%.prototype.reduce",
- &ArrayBuiltinCodeStubAssembler::ReduceResultGenerator,
- &ArrayBuiltinCodeStubAssembler::ReduceProcessor,
- &ArrayBuiltinCodeStubAssembler::ReducePostLoopAction);
+ &ArrayBuiltinsAssembler::ReduceResultGenerator,
+ &ArrayBuiltinsAssembler::ReduceProcessor,
+ &ArrayBuiltinsAssembler::ReducePostLoopAction);
}
-TF_BUILTIN(ArrayReduceRightLoopContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayReduceRightLoopContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* accumulator = Parameter(Descriptor::kAccumulator);
@@ -2293,15 +2667,31 @@ TF_BUILTIN(ArrayReduceRightLoopContinuation, ArrayBuiltinCodeStubAssembler) {
initial_k, len, to);
GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinCodeStubAssembler::ReduceProcessor,
- &ArrayBuiltinCodeStubAssembler::ReducePostLoopAction,
- MissingPropertyMode::kSkip, ForEachDirection::kReverse);
+ &ArrayBuiltinsAssembler::ReduceProcessor,
+ &ArrayBuiltinsAssembler::ReducePostLoopAction, MissingPropertyMode::kSkip,
+ ForEachDirection::kReverse);
}
-TF_BUILTIN(ArrayReduceRightLoopEagerDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayReduceRightPreLoopEagerDeoptContinuation,
+ ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* len = Parameter(Descriptor::kLength);
+
+ Callable stub(Builtins::CallableFor(
+ isolate(), Builtins::kArrayReduceRightLoopContinuation));
+ // Simulate starting the loop at 0, but ensuring that the accumulator is
+ // the hole. The continuation stub will search for the initial non-hole
+ // element, rightly throwing an exception if not found.
+ Return(CallStub(stub, context, receiver, callbackfn, UndefinedConstant(),
+ TheHoleConstant(), receiver, SmiConstant(0), len,
+ UndefinedConstant()));
+}
+
+TF_BUILTIN(ArrayReduceRightLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* accumulator = Parameter(Descriptor::kAccumulator);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -2313,10 +2703,9 @@ TF_BUILTIN(ArrayReduceRightLoopEagerDeoptContinuation,
accumulator, receiver, initial_k, len, UndefinedConstant()));
}
-TF_BUILTIN(ArrayReduceRightLoopLazyDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayReduceRightLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* initial_k = Parameter(Descriptor::kInitialK);
Node* len = Parameter(Descriptor::kLength);
@@ -2328,13 +2717,13 @@ TF_BUILTIN(ArrayReduceRightLoopLazyDeoptContinuation,
result, receiver, initial_k, len, UndefinedConstant()));
}
-TF_BUILTIN(ArrayReduceRight, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(ArrayReduceRight, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
@@ -2343,21 +2732,21 @@ TF_BUILTIN(ArrayReduceRight, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingArrayBuiltinBody(
"Array.prototype.reduceRight",
- &ArrayBuiltinCodeStubAssembler::ReduceResultGenerator,
- &ArrayBuiltinCodeStubAssembler::ReduceProcessor,
- &ArrayBuiltinCodeStubAssembler::ReducePostLoopAction,
+ &ArrayBuiltinsAssembler::ReduceResultGenerator,
+ &ArrayBuiltinsAssembler::ReduceProcessor,
+ &ArrayBuiltinsAssembler::ReducePostLoopAction,
Builtins::CallableFor(isolate(),
Builtins::kArrayReduceRightLoopContinuation),
MissingPropertyMode::kSkip, ForEachDirection::kReverse);
}
-TF_BUILTIN(TypedArrayPrototypeReduceRight, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(TypedArrayPrototypeReduceRight, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
@@ -2366,15 +2755,15 @@ TF_BUILTIN(TypedArrayPrototypeReduceRight, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingTypedArrayBuiltinBody(
"%TypedArray%.prototype.reduceRight",
- &ArrayBuiltinCodeStubAssembler::ReduceResultGenerator,
- &ArrayBuiltinCodeStubAssembler::ReduceProcessor,
- &ArrayBuiltinCodeStubAssembler::ReducePostLoopAction,
+ &ArrayBuiltinsAssembler::ReduceResultGenerator,
+ &ArrayBuiltinsAssembler::ReduceProcessor,
+ &ArrayBuiltinsAssembler::ReducePostLoopAction,
ForEachDirection::kReverse);
}
-TF_BUILTIN(ArrayFilterLoopContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayFilterLoopContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
@@ -2388,15 +2777,13 @@ TF_BUILTIN(ArrayFilterLoopContinuation, ArrayBuiltinCodeStubAssembler) {
len, to);
GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinCodeStubAssembler::FilterProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
- MissingPropertyMode::kSkip);
+ &ArrayBuiltinsAssembler::FilterProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction, MissingPropertyMode::kSkip);
}
-TF_BUILTIN(ArrayFilterLoopEagerDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayFilterLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
@@ -2409,10 +2796,9 @@ TF_BUILTIN(ArrayFilterLoopEagerDeoptContinuation,
to));
}
-TF_BUILTIN(ArrayFilterLoopLazyDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayFilterLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
@@ -2451,13 +2837,13 @@ TF_BUILTIN(ArrayFilterLoopLazyDeoptContinuation,
to.value()));
}
-TF_BUILTIN(ArrayFilter, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(ArrayFilter, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -2465,17 +2851,16 @@ TF_BUILTIN(ArrayFilter, ArrayBuiltinCodeStubAssembler) {
new_target, argc);
GenerateIteratingArrayBuiltinBody(
- "Array.prototype.filter",
- &ArrayBuiltinCodeStubAssembler::FilterResultGenerator,
- &ArrayBuiltinCodeStubAssembler::FilterProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ "Array.prototype.filter", &ArrayBuiltinsAssembler::FilterResultGenerator,
+ &ArrayBuiltinsAssembler::FilterProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction,
Builtins::CallableFor(isolate(), Builtins::kArrayFilterLoopContinuation),
MissingPropertyMode::kSkip);
}
-TF_BUILTIN(ArrayMapLoopContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayMapLoopContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
@@ -2489,14 +2874,13 @@ TF_BUILTIN(ArrayMapLoopContinuation, ArrayBuiltinCodeStubAssembler) {
len, to);
GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinCodeStubAssembler::SpecCompliantMapProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
- MissingPropertyMode::kSkip);
+ &ArrayBuiltinsAssembler::SpecCompliantMapProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction, MissingPropertyMode::kSkip);
}
-TF_BUILTIN(ArrayMapLoopEagerDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayMapLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
@@ -2508,9 +2892,9 @@ TF_BUILTIN(ArrayMapLoopEagerDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
UndefinedConstant()));
}
-TF_BUILTIN(ArrayMapLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayMapLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
@@ -2533,13 +2917,13 @@ TF_BUILTIN(ArrayMapLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
UndefinedConstant()));
}
-TF_BUILTIN(ArrayMap, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(ArrayMap, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -2547,20 +2931,20 @@ TF_BUILTIN(ArrayMap, ArrayBuiltinCodeStubAssembler) {
new_target, argc);
GenerateIteratingArrayBuiltinBody(
- "Array.prototype.map", &ArrayBuiltinCodeStubAssembler::MapResultGenerator,
- &ArrayBuiltinCodeStubAssembler::FastMapProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ "Array.prototype.map", &ArrayBuiltinsAssembler::MapResultGenerator,
+ &ArrayBuiltinsAssembler::FastMapProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction,
Builtins::CallableFor(isolate(), Builtins::kArrayMapLoopContinuation),
MissingPropertyMode::kSkip);
}
-TF_BUILTIN(TypedArrayPrototypeMap, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(TypedArrayPrototypeMap, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -2569,9 +2953,9 @@ TF_BUILTIN(TypedArrayPrototypeMap, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingTypedArrayBuiltinBody(
"%TypedArray%.prototype.map",
- &ArrayBuiltinCodeStubAssembler::TypedArrayMapResultGenerator,
- &ArrayBuiltinCodeStubAssembler::TypedArrayMapProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
+ &ArrayBuiltinsAssembler::TypedArrayMapResultGenerator,
+ &ArrayBuiltinsAssembler::TypedArrayMapProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction);
}
TF_BUILTIN(ArrayIsArray, CodeStubAssembler) {
@@ -2620,7 +3004,7 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
TNode<Object> receiver = args.GetReceiver();
TNode<Object> search_element =
args.GetOptionalArgumentValue(kSearchElementArg);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* intptr_zero = IntPtrConstant(0);
@@ -2999,7 +3383,8 @@ class ArrayPrototypeIterationAssembler : public CodeStubAssembler {
: CodeStubAssembler(state) {}
protected:
- void Generate_ArrayPrototypeIterationMethod(Node* context, Node* receiver,
+ void Generate_ArrayPrototypeIterationMethod(TNode<Context> context,
+ TNode<Object> receiver,
IterationKind iteration_kind) {
VARIABLE(var_array, MachineRepresentation::kTagged);
VARIABLE(var_map, MachineRepresentation::kTagged);
@@ -3009,15 +3394,17 @@ class ArrayPrototypeIterationAssembler : public CodeStubAssembler {
Label create_array_iterator(this);
GotoIf(TaggedIsSmi(receiver), &if_isnotobject);
- var_array.Bind(receiver);
- var_map.Bind(LoadMap(receiver));
+
+ TNode<HeapObject> object_receiver = CAST(receiver);
+ var_array.Bind(object_receiver);
+ var_map.Bind(LoadMap(object_receiver));
var_type.Bind(LoadMapInstanceType(var_map.value()));
Branch(IsJSReceiverInstanceType(var_type.value()), &create_array_iterator,
&if_isnotobject);
BIND(&if_isnotobject);
{
- Node* result = CallBuiltin(Builtins::kToObject, context, receiver);
+ TNode<JSReceiver> result = ToObject(context, receiver);
var_array.Bind(result);
var_map.Bind(LoadMap(result));
var_type.Bind(LoadMapInstanceType(var_map.value()));
@@ -3031,31 +3418,30 @@ class ArrayPrototypeIterationAssembler : public CodeStubAssembler {
};
TF_BUILTIN(ArrayPrototypeValues, ArrayPrototypeIterationAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Generate_ArrayPrototypeIterationMethod(context, receiver,
IterationKind::kValues);
}
TF_BUILTIN(ArrayPrototypeEntries, ArrayPrototypeIterationAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Generate_ArrayPrototypeIterationMethod(context, receiver,
IterationKind::kEntries);
}
TF_BUILTIN(ArrayPrototypeKeys, ArrayPrototypeIterationAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Generate_ArrayPrototypeIterationMethod(context, receiver,
IterationKind::kKeys);
}
TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
- Handle<String> operation = factory()->NewStringFromAsciiChecked(
- "Array Iterator.prototype.next", TENURED);
+ const char* method_name = "Array Iterator.prototype.next";
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* iterator = Parameter(Descriptor::kReceiver);
VARIABLE(var_value, MachineRepresentation::kTagged);
@@ -3300,6 +3686,8 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE,
JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE,
JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_BIGUINT64_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_BIGINT64_ARRAY_KEY_VALUE_ITERATOR_TYPE,
JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE,
JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE,
JS_INT8_ARRAY_VALUE_ITERATOR_TYPE,
@@ -3309,19 +3697,23 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
JS_INT32_ARRAY_VALUE_ITERATOR_TYPE,
JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE,
JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE,
+ JS_BIGUINT64_ARRAY_VALUE_ITERATOR_TYPE,
+ JS_BIGINT64_ARRAY_VALUE_ITERATOR_TYPE,
};
Label uint8_values(this), int8_values(this), uint16_values(this),
int16_values(this), uint32_values(this), int32_values(this),
- float32_values(this), float64_values(this);
+ float32_values(this), float64_values(this), biguint64_values(this),
+ bigint64_values(this);
Label* kInstanceTypeHandlers[] = {
- &allocate_key_result, &uint8_values, &uint8_values,
- &int8_values, &uint16_values, &int16_values,
- &uint32_values, &int32_values, &float32_values,
- &float64_values, &uint8_values, &uint8_values,
- &int8_values, &uint16_values, &int16_values,
- &uint32_values, &int32_values, &float32_values,
- &float64_values,
+ &allocate_key_result, &uint8_values, &uint8_values,
+ &int8_values, &uint16_values, &int16_values,
+ &uint32_values, &int32_values, &float32_values,
+ &float64_values, &biguint64_values, &bigint64_values,
+ &uint8_values, &uint8_values, &int8_values,
+ &uint16_values, &int16_values, &uint32_values,
+ &int32_values, &float32_values, &float64_values,
+ &biguint64_values, &bigint64_values,
};
var_done.Bind(FalseConstant());
@@ -3330,59 +3722,62 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
BIND(&uint8_values);
{
- Node* value_uint8 = LoadFixedTypedArrayElement(
- data_ptr, index, UINT8_ELEMENTS, SMI_PARAMETERS);
- var_value.Bind(SmiFromWord32(value_uint8));
+ var_value.Bind(LoadFixedTypedArrayElementAsTagged(
+ data_ptr, index, UINT8_ELEMENTS, SMI_PARAMETERS));
Goto(&allocate_entry_if_needed);
}
BIND(&int8_values);
{
- Node* value_int8 = LoadFixedTypedArrayElement(
- data_ptr, index, INT8_ELEMENTS, SMI_PARAMETERS);
- var_value.Bind(SmiFromWord32(value_int8));
+ var_value.Bind(LoadFixedTypedArrayElementAsTagged(
+ data_ptr, index, INT8_ELEMENTS, SMI_PARAMETERS));
Goto(&allocate_entry_if_needed);
}
BIND(&uint16_values);
{
- Node* value_uint16 = LoadFixedTypedArrayElement(
- data_ptr, index, UINT16_ELEMENTS, SMI_PARAMETERS);
- var_value.Bind(SmiFromWord32(value_uint16));
+ var_value.Bind(LoadFixedTypedArrayElementAsTagged(
+ data_ptr, index, UINT16_ELEMENTS, SMI_PARAMETERS));
Goto(&allocate_entry_if_needed);
}
BIND(&int16_values);
{
- Node* value_int16 = LoadFixedTypedArrayElement(
- data_ptr, index, INT16_ELEMENTS, SMI_PARAMETERS);
- var_value.Bind(SmiFromWord32(value_int16));
+ var_value.Bind(LoadFixedTypedArrayElementAsTagged(
+ data_ptr, index, INT16_ELEMENTS, SMI_PARAMETERS));
Goto(&allocate_entry_if_needed);
}
BIND(&uint32_values);
{
- Node* value_uint32 = LoadFixedTypedArrayElement(
- data_ptr, index, UINT32_ELEMENTS, SMI_PARAMETERS);
- var_value.Bind(ChangeUint32ToTagged(value_uint32));
+ var_value.Bind(LoadFixedTypedArrayElementAsTagged(
+ data_ptr, index, UINT32_ELEMENTS, SMI_PARAMETERS));
Goto(&allocate_entry_if_needed);
}
BIND(&int32_values);
{
- Node* value_int32 = LoadFixedTypedArrayElement(
- data_ptr, index, INT32_ELEMENTS, SMI_PARAMETERS);
- var_value.Bind(ChangeInt32ToTagged(value_int32));
+ var_value.Bind(LoadFixedTypedArrayElementAsTagged(
+ data_ptr, index, INT32_ELEMENTS, SMI_PARAMETERS));
Goto(&allocate_entry_if_needed);
}
BIND(&float32_values);
{
- Node* value_float32 = LoadFixedTypedArrayElement(
- data_ptr, index, FLOAT32_ELEMENTS, SMI_PARAMETERS);
- var_value.Bind(
- AllocateHeapNumberWithValue(ChangeFloat32ToFloat64(value_float32)));
+ var_value.Bind(LoadFixedTypedArrayElementAsTagged(
+ data_ptr, index, FLOAT32_ELEMENTS, SMI_PARAMETERS));
Goto(&allocate_entry_if_needed);
}
BIND(&float64_values);
{
- Node* value_float64 = LoadFixedTypedArrayElement(
- data_ptr, index, FLOAT64_ELEMENTS, SMI_PARAMETERS);
- var_value.Bind(AllocateHeapNumberWithValue(value_float64));
+ var_value.Bind(LoadFixedTypedArrayElementAsTagged(
+ data_ptr, index, FLOAT64_ELEMENTS, SMI_PARAMETERS));
+ Goto(&allocate_entry_if_needed);
+ }
+ BIND(&biguint64_values);
+ {
+ var_value.Bind(LoadFixedTypedArrayElementAsTagged(
+ data_ptr, index, BIGUINT64_ELEMENTS, SMI_PARAMETERS));
+ Goto(&allocate_entry_if_needed);
+ }
+ BIND(&bigint64_values);
+ {
+ var_value.Bind(LoadFixedTypedArrayElementAsTagged(
+ data_ptr, index, BIGINT64_ELEMENTS, SMI_PARAMETERS));
Goto(&allocate_entry_if_needed);
}
}
@@ -3447,14 +3842,12 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
BIND(&throw_bad_receiver);
{
// The {receiver} is not a valid JSArrayIterator.
- CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
- HeapConstant(operation), iterator);
- Unreachable();
+ ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
+ StringConstant(method_name), iterator);
}
BIND(&if_isdetached);
- ThrowTypeError(context, MessageTemplate::kDetachedOperation,
- HeapConstant(operation));
+ ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-array-gen.h b/deps/v8/src/builtins/builtins-array-gen.h
new file mode 100644
index 0000000000..67ac51480c
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-array-gen.h
@@ -0,0 +1,156 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_BUILTINS_ARRAY_GEN_H_
+#define V8_BUILTINS_BUILTINS_ARRAY_GEN_H_
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+class ArrayBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit ArrayBuiltinsAssembler(compiler::CodeAssemblerState* state);
+
+ typedef std::function<void(ArrayBuiltinsAssembler* masm)>
+ BuiltinResultGenerator;
+
+ typedef std::function<Node*(ArrayBuiltinsAssembler* masm, Node* k_value,
+ Node* k)>
+ CallResultProcessor;
+
+ typedef std::function<void(ArrayBuiltinsAssembler* masm)> PostLoopAction;
+
+ enum class MissingPropertyMode { kSkip, kUseUndefined };
+
+ void FindResultGenerator();
+
+ Node* FindProcessor(Node* k_value, Node* k);
+
+ void FindIndexResultGenerator();
+
+ Node* FindIndexProcessor(Node* k_value, Node* k);
+
+ void ForEachResultGenerator();
+
+ Node* ForEachProcessor(Node* k_value, Node* k);
+
+ void SomeResultGenerator();
+
+ Node* SomeProcessor(Node* k_value, Node* k);
+
+ void EveryResultGenerator();
+
+ Node* EveryProcessor(Node* k_value, Node* k);
+
+ void ReduceResultGenerator();
+
+ Node* ReduceProcessor(Node* k_value, Node* k);
+
+ void ReducePostLoopAction();
+
+ void FilterResultGenerator();
+
+ Node* FilterProcessor(Node* k_value, Node* k);
+
+ void MapResultGenerator();
+
+ void TypedArrayMapResultGenerator();
+
+ Node* SpecCompliantMapProcessor(Node* k_value, Node* k);
+
+ Node* FastMapProcessor(Node* k_value, Node* k);
+
+ // See tc39.github.io/ecma262/#sec-%typedarray%.prototype.map.
+ Node* TypedArrayMapProcessor(Node* k_value, Node* k);
+
+ void NullPostLoopAction();
+
+ protected:
+ TNode<Context> context() { return context_; }
+ TNode<Object> receiver() { return receiver_; }
+ Node* new_target() { return new_target_; }
+ TNode<IntPtrT> argc() { return argc_; }
+ Node* o() { return o_; }
+ Node* len() { return len_; }
+ Node* callbackfn() { return callbackfn_; }
+ Node* this_arg() { return this_arg_; }
+ Node* k() { return k_.value(); }
+ Node* a() { return a_.value(); }
+
+ void ReturnFromBuiltin(Node* value);
+
+ void InitIteratingArrayBuiltinBody(TNode<Context> context,
+ TNode<Object> receiver, Node* callbackfn,
+ Node* this_arg, Node* new_target,
+ TNode<IntPtrT> argc);
+
+ void GenerateIteratingArrayBuiltinBody(
+ const char* name, const BuiltinResultGenerator& generator,
+ const CallResultProcessor& processor, const PostLoopAction& action,
+ const Callable& slow_case_continuation,
+ MissingPropertyMode missing_property_mode,
+ ForEachDirection direction = ForEachDirection::kForward);
+ void InitIteratingArrayBuiltinLoopContinuation(
+ TNode<Context> context, TNode<Object> receiver, Node* callbackfn,
+ Node* this_arg, Node* a, Node* o, Node* initial_k, Node* len, Node* to);
+
+ void GenerateIteratingTypedArrayBuiltinBody(
+ const char* name, const BuiltinResultGenerator& generator,
+ const CallResultProcessor& processor, const PostLoopAction& action,
+ ForEachDirection direction = ForEachDirection::kForward);
+
+ void GenerateIteratingArrayBuiltinLoopContinuation(
+ const CallResultProcessor& processor, const PostLoopAction& action,
+ MissingPropertyMode missing_property_mode,
+ ForEachDirection direction = ForEachDirection::kForward);
+
+ private:
+ static ElementsKind ElementsKindForInstanceType(InstanceType type);
+
+ void VisitAllTypedArrayElements(Node* array_buffer,
+ const CallResultProcessor& processor,
+ Label* detached, ForEachDirection direction);
+
+ void VisitAllFastElementsOneKind(ElementsKind kind,
+ const CallResultProcessor& processor,
+ Label* array_changed, ParameterMode mode,
+ ForEachDirection direction,
+ MissingPropertyMode missing_property_mode);
+
+ void HandleFastElements(const CallResultProcessor& processor,
+ const PostLoopAction& action, Label* slow,
+ ForEachDirection direction,
+ MissingPropertyMode missing_property_mode);
+
+ // Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
+ // This version is specialized to create a zero length array
+ // of the elements kind of the input array.
+ void GenerateArraySpeciesCreate();
+
+ // Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
+ void GenerateArraySpeciesCreate(SloppyTNode<Smi> len);
+
+ Node* callbackfn_ = nullptr;
+ Node* o_ = nullptr;
+ Node* this_arg_ = nullptr;
+ Node* len_ = nullptr;
+ TNode<Context> context_;
+ TNode<Object> receiver_;
+ Node* new_target_ = nullptr;
+ TNode<IntPtrT> argc_;
+ Node* fast_typed_array_target_ = nullptr;
+ const char* name_ = nullptr;
+ Variable k_;
+ Variable a_;
+ Variable to_;
+ Label fully_spec_compliant_;
+ ElementsKind source_elements_kind_ = ElementsKind::NO_ELEMENTS;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BUILTINS_BUILTINS_ARRAY_GEN_H_
diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc
index 70ee2326f5..f400e824f0 100644
--- a/deps/v8/src/builtins/builtins-array.cc
+++ b/deps/v8/src/builtins/builtins-array.cc
@@ -10,6 +10,7 @@
#include "src/contexts.h"
#include "src/counters.h"
#include "src/elements.h"
+#include "src/global-handles.h"
#include "src/isolate.h"
#include "src/lookup.h"
#include "src/objects-inl.h"
@@ -251,7 +252,7 @@ BUILTIN(ArraySlice) {
JSArray* array = JSArray::cast(*receiver);
if (V8_UNLIKELY(!array->HasFastElements() ||
!IsJSArrayFastElementMovingAllowed(isolate, array) ||
- !isolate->IsArraySpeciesLookupChainIntact() ||
+ !isolate->IsSpeciesLookupChainIntact() ||
// If this is a subclass of Array, then call out to JS
!array->HasArrayPrototype(isolate))) {
AllowHeapAllocation allow_allocation;
@@ -316,7 +317,7 @@ BUILTIN(ArraySplice) {
// If this is a subclass of Array, then call out to JS.
!Handle<JSArray>::cast(receiver)->HasArrayPrototype(isolate) ||
// If anything with @@species has been messed with, call out to JS.
- !isolate->IsArraySpeciesLookupChainIntact())) {
+ !isolate->IsSpeciesLookupChainIntact())) {
return CallJsIntrinsic(isolate, isolate->array_splice(), args);
}
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
@@ -1186,7 +1187,7 @@ BUILTIN(ArrayConcat) {
// Avoid a real species read to avoid extra lookups to the array constructor
if (V8_LIKELY(receiver->IsJSArray() &&
Handle<JSArray>::cast(receiver)->HasArrayPrototype(isolate) &&
- isolate->IsArraySpeciesLookupChainIntact())) {
+ isolate->IsSpeciesLookupChainIntact())) {
if (Fast_ArrayConcat(isolate, &args).ToHandle(&result_array)) {
return *result_array;
}
diff --git a/deps/v8/src/builtins/builtins-async-function-gen.cc b/deps/v8/src/builtins/builtins-async-function-gen.cc
index 0d0e34ee0d..0db53c687e 100644
--- a/deps/v8/src/builtins/builtins-async-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-function-gen.cc
@@ -21,37 +21,18 @@ class AsyncFunctionBuiltinsAssembler : public AsyncBuiltinsAssembler {
Node* const awaited, Node* const outer_promise,
const bool is_predicted_as_caught);
- void AsyncFunctionAwaitResumeClosure(
- Node* const context, Node* const sent_value,
- JSGeneratorObject::ResumeMode resume_mode);
+ void AsyncFunctionAwaitResume(Node* const context, Node* const argument,
+ Node* const generator,
+ JSGeneratorObject::ResumeMode resume_mode);
};
-namespace {
-
-// Describe fields of Context associated with AsyncFunctionAwait resume
-// closures.
-// TODO(jgruber): Refactor to reuse code for upcoming async-generators.
-class AwaitContext {
- public:
- enum Fields { kGeneratorSlot = Context::MIN_CONTEXT_SLOTS, kLength };
-};
-
-} // anonymous namespace
-
-void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwaitResumeClosure(
- Node* context, Node* sent_value,
+void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwaitResume(
+ Node* const context, Node* const argument, Node* const generator,
JSGeneratorObject::ResumeMode resume_mode) {
+ CSA_ASSERT(this, IsJSGeneratorObject(generator));
DCHECK(resume_mode == JSGeneratorObject::kNext ||
resume_mode == JSGeneratorObject::kThrow);
- Node* const generator =
- LoadContextElement(context, AwaitContext::kGeneratorSlot);
- CSA_SLOW_ASSERT(this, HasInstanceType(generator, JS_GENERATOR_OBJECT_TYPE));
-
- // Inline version of GeneratorPrototypeNext / GeneratorPrototypeReturn with
- // unnecessary runtime checks removed.
- // TODO(jgruber): Refactor to reuse code from builtins-generator.cc.
-
// Ensure that the generator is neither closed nor running.
CSA_SLOW_ASSERT(
this,
@@ -66,31 +47,23 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwaitResumeClosure(
// Resume the {receiver} using our trampoline.
Callable callable = CodeFactory::ResumeGenerator(isolate());
- CallStub(callable, context, sent_value, generator);
-
- // The resulting Promise is a throwaway, so it doesn't matter what it
- // resolves to. What is important is that we don't end up keeping the
- // whole chain of intermediate Promises alive by returning the return value
- // of ResumeGenerator, as that would create a memory leak.
+ TailCallStub(callable, context, argument, generator);
}
-TF_BUILTIN(AsyncFunctionAwaitRejectClosure, AsyncFunctionBuiltinsAssembler) {
- CSA_ASSERT_JS_ARGC_EQ(this, 1);
- Node* const sentError = Parameter(Descriptor::kSentError);
+TF_BUILTIN(AsyncFunctionAwaitFulfill, AsyncFunctionBuiltinsAssembler) {
+ Node* const argument = Parameter(Descriptor::kArgument);
+ Node* const generator = Parameter(Descriptor::kGenerator);
Node* const context = Parameter(Descriptor::kContext);
-
- AsyncFunctionAwaitResumeClosure(context, sentError,
- JSGeneratorObject::kThrow);
- Return(UndefinedConstant());
+ AsyncFunctionAwaitResume(context, argument, generator,
+ JSGeneratorObject::kNext);
}
-TF_BUILTIN(AsyncFunctionAwaitResolveClosure, AsyncFunctionBuiltinsAssembler) {
- CSA_ASSERT_JS_ARGC_EQ(this, 1);
- Node* const sentValue = Parameter(Descriptor::kSentValue);
+TF_BUILTIN(AsyncFunctionAwaitReject, AsyncFunctionBuiltinsAssembler) {
+ Node* const argument = Parameter(Descriptor::kArgument);
+ Node* const generator = Parameter(Descriptor::kGenerator);
Node* const context = Parameter(Descriptor::kContext);
-
- AsyncFunctionAwaitResumeClosure(context, sentValue, JSGeneratorObject::kNext);
- Return(UndefinedConstant());
+ AsyncFunctionAwaitResume(context, argument, generator,
+ JSGeneratorObject::kThrow);
}
// ES#abstract-ops-async-function-await
@@ -105,25 +78,12 @@ TF_BUILTIN(AsyncFunctionAwaitResolveClosure, AsyncFunctionBuiltinsAssembler) {
void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwait(
Node* const context, Node* const generator, Node* const awaited,
Node* const outer_promise, const bool is_predicted_as_caught) {
- CSA_SLOW_ASSERT(this, HasInstanceType(generator, JS_GENERATOR_OBJECT_TYPE));
- CSA_SLOW_ASSERT(this, HasInstanceType(outer_promise, JS_PROMISE_TYPE));
-
- ContextInitializer init_closure_context = [&](Node* context) {
- StoreContextElementNoWriteBarrier(context, AwaitContext::kGeneratorSlot,
- generator);
- };
-
- // TODO(jgruber): AsyncBuiltinsAssembler::Await currently does not reuse
- // the awaited promise if it is already a promise. Reuse is non-spec compliant
- // but part of our old behavior gives us a couple of percent
- // performance boost.
- // TODO(jgruber): Use a faster specialized version of
- // InternalPerformPromiseThen.
-
- Await(context, generator, awaited, outer_promise, AwaitContext::kLength,
- init_closure_context, Context::ASYNC_FUNCTION_AWAIT_RESOLVE_SHARED_FUN,
- Context::ASYNC_FUNCTION_AWAIT_REJECT_SHARED_FUN,
- is_predicted_as_caught);
+ CSA_SLOW_ASSERT(this, IsJSGeneratorObject(generator));
+ CSA_SLOW_ASSERT(this, IsJSPromise(outer_promise));
+
+ Await(context, generator, awaited, outer_promise,
+ Builtins::kAsyncFunctionAwaitFulfill,
+ Builtins::kAsyncFunctionAwaitReject, is_predicted_as_caught);
// Return outer promise to avoid adding an load of the outer promise before
// suspending in BytecodeGenerator.
@@ -133,30 +93,28 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwait(
// Called by the parser from the desugaring of 'await' when catch
// prediction indicates that there is a locally surrounding catch block.
TF_BUILTIN(AsyncFunctionAwaitCaught, AsyncFunctionBuiltinsAssembler) {
- CSA_ASSERT_JS_ARGC_EQ(this, 3);
Node* const generator = Parameter(Descriptor::kGenerator);
- Node* const awaited = Parameter(Descriptor::kAwaited);
+ Node* const value = Parameter(Descriptor::kValue);
Node* const outer_promise = Parameter(Descriptor::kOuterPromise);
Node* const context = Parameter(Descriptor::kContext);
static const bool kIsPredictedAsCaught = true;
- AsyncFunctionAwait(context, generator, awaited, outer_promise,
+ AsyncFunctionAwait(context, generator, value, outer_promise,
kIsPredictedAsCaught);
}
// Called by the parser from the desugaring of 'await' when catch
// prediction indicates no locally surrounding catch block.
TF_BUILTIN(AsyncFunctionAwaitUncaught, AsyncFunctionBuiltinsAssembler) {
- CSA_ASSERT_JS_ARGC_EQ(this, 3);
Node* const generator = Parameter(Descriptor::kGenerator);
- Node* const awaited = Parameter(Descriptor::kAwaited);
+ Node* const value = Parameter(Descriptor::kValue);
Node* const outer_promise = Parameter(Descriptor::kOuterPromise);
Node* const context = Parameter(Descriptor::kContext);
static const bool kIsPredictedAsCaught = false;
- AsyncFunctionAwait(context, generator, awaited, outer_promise,
+ AsyncFunctionAwait(context, generator, value, outer_promise,
kIsPredictedAsCaught);
}
diff --git a/deps/v8/src/builtins/builtins-async-gen.cc b/deps/v8/src/builtins/builtins-async-gen.cc
index 0cdcb57a3f..7958afba00 100644
--- a/deps/v8/src/builtins/builtins-async-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-gen.cc
@@ -13,6 +13,58 @@ namespace internal {
using compiler::Node;
+void AsyncBuiltinsAssembler::Await(Node* context, Node* generator, Node* value,
+ Node* outer_promise,
+ Builtins::Name fulfill_builtin,
+ Builtins::Name reject_builtin,
+ Node* is_predicted_as_caught) {
+ CSA_SLOW_ASSERT(this, Word32Or(IsJSAsyncGeneratorObject(generator),
+ IsJSGeneratorObject(generator)));
+ CSA_SLOW_ASSERT(this, IsJSPromise(outer_promise));
+ CSA_SLOW_ASSERT(this, IsBoolean(is_predicted_as_caught));
+
+ Node* const native_context = LoadNativeContext(context);
+
+ // TODO(bmeurer): This could be optimized and folded into a single allocation.
+ Node* const promise = AllocateAndInitJSPromise(native_context);
+ Node* const promise_reactions =
+ LoadObjectField(promise, JSPromise::kReactionsOrResultOffset);
+ Node* const fulfill_handler =
+ HeapConstant(Builtins::CallableFor(isolate(), fulfill_builtin).code());
+ Node* const reject_handler =
+ HeapConstant(Builtins::CallableFor(isolate(), reject_builtin).code());
+ Node* const reaction = AllocatePromiseReaction(
+ promise_reactions, generator, fulfill_handler, reject_handler);
+ StoreObjectField(promise, JSPromise::kReactionsOrResultOffset, reaction);
+ PromiseSetHasHandler(promise);
+
+ // Perform ! Call(promiseCapability.[[Resolve]], undefined, « value »).
+ CallBuiltin(Builtins::kResolvePromise, native_context, promise, value);
+
+ // When debugging, we need to link from the {generator} to the
+ // {outer_promise} of the async function/generator.
+ Label done(this);
+ GotoIfNot(IsDebugActive(), &done);
+ CallRuntime(Runtime::kSetProperty, native_context, generator,
+ LoadRoot(Heap::kgenerator_outer_promise_symbolRootIndex),
+ outer_promise, SmiConstant(LanguageMode::kStrict));
+ GotoIf(IsFalse(is_predicted_as_caught), &done);
+ GotoIf(TaggedIsSmi(value), &done);
+ GotoIfNot(IsJSPromise(value), &done);
+ PromiseSetHandledHint(value);
+ Goto(&done);
+ BIND(&done);
+}
+
+void AsyncBuiltinsAssembler::Await(Node* context, Node* generator, Node* value,
+ Node* outer_promise,
+ Builtins::Name fulfill_builtin,
+ Builtins::Name reject_builtin,
+ bool is_predicted_as_caught) {
+ return Await(context, generator, value, outer_promise, fulfill_builtin,
+ reject_builtin, BooleanConstant(is_predicted_as_caught));
+}
+
namespace {
// Describe fields of Context associated with the AsyncIterator unwrap closure.
class ValueUnwrapContext {
@@ -22,165 +74,6 @@ class ValueUnwrapContext {
} // namespace
-Node* AsyncBuiltinsAssembler::Await(
- Node* context, Node* generator, Node* value, Node* outer_promise,
- int context_length, const ContextInitializer& init_closure_context,
- Node* on_resolve_context_index, Node* on_reject_context_index,
- Node* is_predicted_as_caught) {
- DCHECK_GE(context_length, Context::MIN_CONTEXT_SLOTS);
-
- Node* const native_context = LoadNativeContext(context);
-
- static const int kWrappedPromiseOffset = FixedArray::SizeFor(context_length);
- static const int kThrowawayPromiseOffset =
- kWrappedPromiseOffset + JSPromise::kSizeWithEmbedderFields;
- static const int kResolveClosureOffset =
- kThrowawayPromiseOffset + JSPromise::kSizeWithEmbedderFields;
- static const int kRejectClosureOffset =
- kResolveClosureOffset + JSFunction::kSizeWithoutPrototype;
- static const int kTotalSize =
- kRejectClosureOffset + JSFunction::kSizeWithoutPrototype;
-
- Node* const base = AllocateInNewSpace(kTotalSize);
- Node* const closure_context = base;
- {
- // Initialize closure context
- InitializeFunctionContext(native_context, closure_context, context_length);
- init_closure_context(closure_context);
- }
-
- // Let promiseCapability be ! NewPromiseCapability(%Promise%).
- Node* const promise_fun =
- LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
- CSA_ASSERT(this, IsFunctionWithPrototypeSlotMap(LoadMap(promise_fun)));
- Node* const promise_map =
- LoadObjectField(promise_fun, JSFunction::kPrototypeOrInitialMapOffset);
- // Assert that the JSPromise map has an instance size is
- // JSPromise::kSizeWithEmbedderFields.
- CSA_ASSERT(this, WordEqual(LoadMapInstanceSizeInWords(promise_map),
- IntPtrConstant(JSPromise::kSizeWithEmbedderFields /
- kPointerSize)));
- Node* const wrapped_value = InnerAllocate(base, kWrappedPromiseOffset);
- {
- // Initialize Promise
- StoreMapNoWriteBarrier(wrapped_value, promise_map);
- InitializeJSObjectFromMap(
- wrapped_value, promise_map,
- IntPtrConstant(JSPromise::kSizeWithEmbedderFields));
- PromiseInit(wrapped_value);
- }
-
- Node* const throwaway = InnerAllocate(base, kThrowawayPromiseOffset);
- {
- // Initialize throwawayPromise
- StoreMapNoWriteBarrier(throwaway, promise_map);
- InitializeJSObjectFromMap(
- throwaway, promise_map,
- IntPtrConstant(JSPromise::kSizeWithEmbedderFields));
- PromiseInit(throwaway);
- }
-
- Node* const on_resolve = InnerAllocate(base, kResolveClosureOffset);
- {
- // Initialize resolve handler
- InitializeNativeClosure(closure_context, native_context, on_resolve,
- on_resolve_context_index);
- }
-
- Node* const on_reject = InnerAllocate(base, kRejectClosureOffset);
- {
- // Initialize reject handler
- InitializeNativeClosure(closure_context, native_context, on_reject,
- on_reject_context_index);
- }
-
- {
- // Add PromiseHooks if needed
- Label next(this);
- GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &next);
- CallRuntime(Runtime::kPromiseHookInit, context, wrapped_value,
- outer_promise);
- CallRuntime(Runtime::kPromiseHookInit, context, throwaway, wrapped_value);
- Goto(&next);
- BIND(&next);
- }
-
- // Perform ! Call(promiseCapability.[[Resolve]], undefined, « promise »).
- CallBuiltin(Builtins::kResolveNativePromise, context, wrapped_value, value);
-
- // The Promise will be thrown away and not handled, but it shouldn't trigger
- // unhandled reject events as its work is done
- PromiseSetHasHandler(throwaway);
-
- Label do_perform_promise_then(this);
- GotoIfNot(IsDebugActive(), &do_perform_promise_then);
- {
- Label common(this);
- GotoIf(TaggedIsSmi(value), &common);
- GotoIfNot(HasInstanceType(value, JS_PROMISE_TYPE), &common);
- {
- // Mark the reject handler callback to be a forwarding edge, rather
- // than a meaningful catch handler
- Node* const key =
- HeapConstant(factory()->promise_forwarding_handler_symbol());
- CallRuntime(Runtime::kSetProperty, context, on_reject, key,
- TrueConstant(), SmiConstant(LanguageMode::kStrict));
-
- GotoIf(IsFalse(is_predicted_as_caught), &common);
- PromiseSetHandledHint(value);
- }
-
- Goto(&common);
- BIND(&common);
- // Mark the dependency to outer Promise in case the throwaway Promise is
- // found on the Promise stack
- CSA_SLOW_ASSERT(this, HasInstanceType(outer_promise, JS_PROMISE_TYPE));
-
- Node* const key = HeapConstant(factory()->promise_handled_by_symbol());
- CallRuntime(Runtime::kSetProperty, context, throwaway, key, outer_promise,
- SmiConstant(LanguageMode::kStrict));
- }
-
- Goto(&do_perform_promise_then);
- BIND(&do_perform_promise_then);
-
- CallBuiltin(Builtins::kPerformNativePromiseThen, context, wrapped_value,
- on_resolve, on_reject, throwaway);
-
- return wrapped_value;
-}
-
-void AsyncBuiltinsAssembler::InitializeNativeClosure(Node* context,
- Node* native_context,
- Node* function,
- Node* context_index) {
- Node* const function_map = LoadContextElement(
- native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
- // Ensure that we don't have to initialize prototype_or_initial_map field of
- // JSFunction.
- CSA_ASSERT(this, WordEqual(LoadMapInstanceSizeInWords(function_map),
- IntPtrConstant(JSFunction::kSizeWithoutPrototype /
- kPointerSize)));
- STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
- StoreMapNoWriteBarrier(function, function_map);
- StoreObjectFieldRoot(function, JSObject::kPropertiesOrHashOffset,
- Heap::kEmptyFixedArrayRootIndex);
- StoreObjectFieldRoot(function, JSObject::kElementsOffset,
- Heap::kEmptyFixedArrayRootIndex);
- StoreObjectFieldRoot(function, JSFunction::kFeedbackVectorOffset,
- Heap::kUndefinedCellRootIndex);
-
- Node* shared_info = LoadContextElement(native_context, context_index);
- CSA_ASSERT(this, IsSharedFunctionInfo(shared_info));
- StoreObjectFieldNoWriteBarrier(
- function, JSFunction::kSharedFunctionInfoOffset, shared_info);
- StoreObjectFieldNoWriteBarrier(function, JSFunction::kContextOffset, context);
-
- Node* const code =
- LoadObjectField(shared_info, SharedFunctionInfo::kCodeOffset);
- StoreObjectFieldNoWriteBarrier(function, JSFunction::kCodeOffset, code);
-}
-
Node* AsyncBuiltinsAssembler::CreateUnwrapClosure(Node* native_context,
Node* done) {
Node* const map = LoadContextElement(
diff --git a/deps/v8/src/builtins/builtins-async-gen.h b/deps/v8/src/builtins/builtins-async-gen.h
index 212b0b618b..70f68a498b 100644
--- a/deps/v8/src/builtins/builtins-async-gen.h
+++ b/deps/v8/src/builtins/builtins-async-gen.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_BUILTINS_BUILTINS_ASYNC_H_
-#define V8_BUILTINS_BUILTINS_ASYNC_H_
+#ifndef V8_BUILTINS_BUILTINS_ASYNC_GEN_H_
+#define V8_BUILTINS_BUILTINS_ASYNC_GEN_H_
#include "src/builtins/builtins-promise-gen.h"
@@ -16,51 +16,26 @@ class AsyncBuiltinsAssembler : public PromiseBuiltinsAssembler {
: PromiseBuiltinsAssembler(state) {}
protected:
- typedef std::function<void(Node*)> ContextInitializer;
-
- // Perform steps to resume generator after `value` is resolved.
- // `on_reject_context_index` is an index into the Native Context, which should
- // point to a SharedFunctioninfo instance used to create the closure. The
- // value following the reject index should be a similar value for the resolve
- // closure. Returns the Promise-wrapped `value`.
- Node* Await(Node* context, Node* generator, Node* value, Node* outer_promise,
- int context_length,
- const ContextInitializer& init_closure_context,
- Node* on_resolve_context_index, Node* on_reject_context_index,
- Node* is_predicted_as_caught);
- Node* Await(Node* context, Node* generator, Node* value, Node* outer_promise,
- int context_length,
- const ContextInitializer& init_closure_context,
- int on_resolve_context_index, int on_reject_context_index,
- Node* is_predicted_as_caught) {
- return Await(context, generator, value, outer_promise, context_length,
- init_closure_context, IntPtrConstant(on_resolve_context_index),
- IntPtrConstant(on_reject_context_index),
- is_predicted_as_caught);
- }
- Node* Await(Node* context, Node* generator, Node* value, Node* outer_promise,
- int context_length,
- const ContextInitializer& init_closure_context,
- int on_resolve_context_index, int on_reject_context_index,
- bool is_predicted_as_caught) {
- return Await(context, generator, value, outer_promise, context_length,
- init_closure_context, on_resolve_context_index,
- on_reject_context_index,
- BooleanConstant(is_predicted_as_caught));
- }
+ void Await(Node* context, Node* generator, Node* value, Node* outer_promise,
+ Builtins::Name fulfill_builtin, Builtins::Name reject_builtin,
+ Node* is_predicted_as_caught);
+ void Await(Node* context, Node* generator, Node* value, Node* outer_promise,
+ Builtins::Name fulfill_builtin, Builtins::Name reject_builtin,
+ bool is_predicted_as_caught);
// Return a new built-in function object as defined in
// Async Iterator Value Unwrap Functions
Node* CreateUnwrapClosure(Node* const native_context, Node* const done);
private:
- void InitializeNativeClosure(Node* context, Node* native_context,
- Node* function, Node* context_index);
Node* AllocateAsyncIteratorValueUnwrapContext(Node* native_context,
Node* done);
+ Node* AllocateAwaitPromiseJobTask(Node* generator, Node* fulfill_handler,
+ Node* reject_handler, Node* promise,
+ Node* context);
};
} // namespace internal
} // namespace v8
-#endif // V8_BUILTINS_BUILTINS_ASYNC_H_
+#endif // V8_BUILTINS_BUILTINS_ASYNC_GEN_H_
diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc
index 70726a5f9d..b78747aaa9 100644
--- a/deps/v8/src/builtins/builtins-async-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc
@@ -68,24 +68,24 @@ class AsyncGeneratorBuiltinsAssembler : public AsyncBuiltinsAssembler {
return IsGeneratorStateNotExecuting(LoadGeneratorState(generator));
}
- inline Node* LoadGeneratorAwaitedPromise(Node* const generator) {
- return LoadObjectField(generator,
- JSAsyncGeneratorObject::kAwaitedPromiseOffset);
+ inline Node* IsGeneratorAwaiting(Node* const generator) {
+ Node* is_generator_awaiting =
+ LoadObjectField(generator, JSAsyncGeneratorObject::kIsAwaitingOffset);
+ return SmiEqual(is_generator_awaiting, SmiConstant(1));
}
- inline Node* IsGeneratorNotSuspendedForAwait(Node* const generator) {
- return IsUndefined(LoadGeneratorAwaitedPromise(generator));
- }
-
- inline Node* IsGeneratorSuspendedForAwait(Node* const generator) {
- return HasInstanceType(LoadGeneratorAwaitedPromise(generator),
- JS_PROMISE_TYPE);
+ inline void SetGeneratorAwaiting(Node* const generator) {
+ CSA_ASSERT(this, Word32BinaryNot(IsGeneratorAwaiting(generator)));
+ StoreObjectFieldNoWriteBarrier(
+ generator, JSAsyncGeneratorObject::kIsAwaitingOffset, SmiConstant(1));
+ CSA_ASSERT(this, IsGeneratorAwaiting(generator));
}
- inline void ClearAwaitedPromise(Node* const generator) {
- StoreObjectFieldRoot(generator,
- JSAsyncGeneratorObject::kAwaitedPromiseOffset,
- Heap::kUndefinedValueRootIndex);
+ inline void SetGeneratorNotAwaiting(Node* const generator) {
+ CSA_ASSERT(this, IsGeneratorAwaiting(generator));
+ StoreObjectFieldNoWriteBarrier(
+ generator, JSAsyncGeneratorObject::kIsAwaitingOffset, SmiConstant(0));
+ CSA_ASSERT(this, Word32BinaryNot(IsGeneratorAwaiting(generator)));
}
inline void CloseGenerator(Node* const generator) {
@@ -140,8 +140,8 @@ class AsyncGeneratorBuiltinsAssembler : public AsyncBuiltinsAssembler {
// for AsyncGenerators.
template <typename Descriptor>
void AsyncGeneratorAwait(bool is_catchable);
- void AsyncGeneratorAwaitResumeClosure(
- Node* context, Node* value,
+ void AsyncGeneratorAwaitResume(
+ Node* context, Node* generator, Node* argument,
JSAsyncGeneratorObject::ResumeMode resume_mode);
};
@@ -193,7 +193,7 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorEnqueue(
MakeTypeError(MessageTemplate::kIncompatibleMethodReceiver, context,
StringConstant(method_name), generator);
- CallBuiltin(Builtins::kRejectNativePromise, context, promise, error,
+ CallBuiltin(Builtins::kRejectPromise, context, promise, error,
TrueConstant());
args->PopAndReturn(promise);
}
@@ -219,21 +219,12 @@ Node* AsyncGeneratorBuiltinsAssembler::AllocateAsyncGeneratorRequest(
return request;
}
-void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwaitResumeClosure(
- Node* context, Node* value,
+void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwaitResume(
+ Node* context, Node* generator, Node* argument,
JSAsyncGeneratorObject::ResumeMode resume_mode) {
- Node* const generator =
- LoadContextElement(context, AwaitContext::kGeneratorSlot);
CSA_SLOW_ASSERT(this, TaggedIsAsyncGenerator(generator));
-#if defined(DEBUG) && defined(ENABLE_SLOW_DCHECKS)
- Node* const awaited_promise = LoadGeneratorAwaitedPromise(generator);
- CSA_SLOW_ASSERT(this, HasInstanceType(awaited_promise, JS_PROMISE_TYPE));
- CSA_SLOW_ASSERT(this, Word32NotEqual(PromiseStatus(awaited_promise),
- Int32Constant(v8::Promise::kPending)));
-#endif
-
- ClearAwaitedPromise(generator);
+ SetGeneratorNotAwaiting(generator);
CSA_SLOW_ASSERT(this, IsGeneratorSuspended(generator));
@@ -242,40 +233,30 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwaitResumeClosure(
JSGeneratorObject::kResumeModeOffset,
SmiConstant(resume_mode));
- CallStub(CodeFactory::ResumeGenerator(isolate()), context, value, generator);
+ CallStub(CodeFactory::ResumeGenerator(isolate()), context, argument,
+ generator);
TailCallBuiltin(Builtins::kAsyncGeneratorResumeNext, context, generator);
}
template <typename Descriptor>
void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwait(bool is_catchable) {
- Node* generator = Parameter(Descriptor::kGenerator);
- Node* value = Parameter(Descriptor::kAwaited);
- Node* context = Parameter(Descriptor::kContext);
+ Node* const generator = Parameter(Descriptor::kGenerator);
+ Node* const value = Parameter(Descriptor::kValue);
+ Node* const context = Parameter(Descriptor::kContext);
CSA_SLOW_ASSERT(this, TaggedIsAsyncGenerator(generator));
Node* const request = LoadFirstAsyncGeneratorRequestFromQueue(generator);
CSA_ASSERT(this, IsNotUndefined(request));
- ContextInitializer init_closure_context = [&](Node* context) {
- StoreContextElementNoWriteBarrier(context, AwaitContext::kGeneratorSlot,
- generator);
- };
-
Node* outer_promise =
LoadObjectField(request, AsyncGeneratorRequest::kPromiseOffset);
- const int resolve_index = Context::ASYNC_GENERATOR_AWAIT_RESOLVE_SHARED_FUN;
- const int reject_index = Context::ASYNC_GENERATOR_AWAIT_REJECT_SHARED_FUN;
-
- Node* promise =
- Await(context, generator, value, outer_promise, AwaitContext::kLength,
- init_closure_context, resolve_index, reject_index, is_catchable);
-
- CSA_SLOW_ASSERT(this, IsGeneratorNotSuspendedForAwait(generator));
- StoreObjectField(generator, JSAsyncGeneratorObject::kAwaitedPromiseOffset,
- promise);
+ SetGeneratorAwaiting(generator);
+ Await(context, generator, value, outer_promise,
+ Builtins::kAsyncGeneratorAwaitFulfill,
+ Builtins::kAsyncGeneratorAwaitReject, is_catchable);
Return(UndefinedConstant());
}
@@ -386,18 +367,20 @@ TF_BUILTIN(AsyncGeneratorPrototypeThrow, AsyncGeneratorBuiltinsAssembler) {
"[AsyncGenerator].prototype.throw");
}
-TF_BUILTIN(AsyncGeneratorAwaitResolveClosure, AsyncGeneratorBuiltinsAssembler) {
- Node* value = Parameter(Descriptor::kValue);
- Node* context = Parameter(Descriptor::kContext);
- AsyncGeneratorAwaitResumeClosure(context, value,
- JSAsyncGeneratorObject::kNext);
+TF_BUILTIN(AsyncGeneratorAwaitFulfill, AsyncGeneratorBuiltinsAssembler) {
+ Node* const generator = Parameter(Descriptor::kGenerator);
+ Node* const argument = Parameter(Descriptor::kArgument);
+ Node* const context = Parameter(Descriptor::kContext);
+ AsyncGeneratorAwaitResume(context, generator, argument,
+ JSAsyncGeneratorObject::kNext);
}
-TF_BUILTIN(AsyncGeneratorAwaitRejectClosure, AsyncGeneratorBuiltinsAssembler) {
- Node* value = Parameter(Descriptor::kValue);
- Node* context = Parameter(Descriptor::kContext);
- AsyncGeneratorAwaitResumeClosure(context, value,
- JSAsyncGeneratorObject::kThrow);
+TF_BUILTIN(AsyncGeneratorAwaitReject, AsyncGeneratorBuiltinsAssembler) {
+ Node* const generator = Parameter(Descriptor::kGenerator);
+ Node* const argument = Parameter(Descriptor::kArgument);
+ Node* const context = Parameter(Descriptor::kContext);
+ AsyncGeneratorAwaitResume(context, generator, argument,
+ JSAsyncGeneratorObject::kThrow);
}
TF_BUILTIN(AsyncGeneratorAwaitUncaught, AsyncGeneratorBuiltinsAssembler) {
@@ -435,7 +418,7 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) {
CSA_ASSERT(this, IsGeneratorNotExecuting(generator));
// Stop resuming if suspended for Await.
- ReturnIf(IsGeneratorSuspendedForAwait(generator), UndefinedConstant());
+ ReturnIf(IsGeneratorAwaiting(generator), UndefinedConstant());
// Stop resuming if request queue is empty.
ReturnIf(IsUndefined(var_next.value()), UndefinedConstant());
@@ -452,10 +435,9 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) {
&settle_promise);
CloseGenerator(generator);
var_state.Bind(SmiConstant(JSGeneratorObject::kGeneratorClosed));
-
Goto(&settle_promise);
- BIND(&settle_promise);
+ BIND(&settle_promise);
Node* next_value = LoadValueFromAsyncGeneratorRequest(next);
Branch(SmiEqual(resume_type, SmiConstant(JSGeneratorObject::kReturn)),
&if_return, &if_throw);
@@ -511,7 +493,7 @@ TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) {
Node* const context = Parameter(Descriptor::kContext);
CSA_SLOW_ASSERT(this, TaggedIsAsyncGenerator(generator));
- CSA_ASSERT(this, IsGeneratorNotSuspendedForAwait(generator));
+ CSA_ASSERT(this, Word32BinaryNot(IsGeneratorAwaiting(generator)));
// If this assertion fails, the `value` component was not Awaited as it should
// have been, per https://github.com/tc39/proposal-async-iteration/pull/102/.
@@ -537,7 +519,7 @@ TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) {
}
// Perform Call(promiseCapability.[[Resolve]], undefined, «iteratorResult»).
- CallBuiltin(Builtins::kResolveNativePromise, context, promise, iter_result);
+ CallBuiltin(Builtins::kResolvePromise, context, promise, iter_result);
// Per spec, AsyncGeneratorResolve() returns undefined. However, for the
// benefit of %TraceExit(), return the Promise.
@@ -553,7 +535,7 @@ TF_BUILTIN(AsyncGeneratorReject, AsyncGeneratorBuiltinsAssembler) {
Node* const next = TakeFirstAsyncGeneratorRequestFromQueue(generator);
Node* const promise = LoadPromiseFromAsyncGeneratorRequest(next);
- Return(CallBuiltin(Builtins::kRejectNativePromise, context, promise, value,
+ Return(CallBuiltin(Builtins::kRejectPromise, context, promise, value,
TrueConstant()));
}
@@ -566,34 +548,23 @@ TF_BUILTIN(AsyncGeneratorYield, AsyncGeneratorBuiltinsAssembler) {
Node* const request = LoadFirstAsyncGeneratorRequestFromQueue(generator);
Node* const outer_promise = LoadPromiseFromAsyncGeneratorRequest(request);
- ContextInitializer init_closure_context = [&](Node* context) {
- StoreContextElementNoWriteBarrier(context, AwaitContext::kGeneratorSlot,
- generator);
- };
-
- const int on_resolve = Context::ASYNC_GENERATOR_YIELD_RESOLVE_SHARED_FUN;
- const int on_reject = Context::ASYNC_GENERATOR_AWAIT_REJECT_SHARED_FUN;
-
- Node* const promise =
- Await(context, generator, value, outer_promise, AwaitContext::kLength,
- init_closure_context, on_resolve, on_reject, is_caught);
- StoreObjectField(generator, JSAsyncGeneratorObject::kAwaitedPromiseOffset,
- promise);
+ SetGeneratorAwaiting(generator);
+ Await(context, generator, value, outer_promise,
+ Builtins::kAsyncGeneratorYieldFulfill,
+ Builtins::kAsyncGeneratorAwaitReject, is_caught);
Return(UndefinedConstant());
}
-TF_BUILTIN(AsyncGeneratorYieldResolveClosure, AsyncGeneratorBuiltinsAssembler) {
+TF_BUILTIN(AsyncGeneratorYieldFulfill, AsyncGeneratorBuiltinsAssembler) {
Node* const context = Parameter(Descriptor::kContext);
- Node* const value = Parameter(Descriptor::kValue);
- Node* const generator =
- LoadContextElement(context, AwaitContext::kGeneratorSlot);
+ Node* const generator = Parameter(Descriptor::kGenerator);
+ Node* const argument = Parameter(Descriptor::kArgument);
- CSA_SLOW_ASSERT(this, IsGeneratorSuspendedForAwait(generator));
- ClearAwaitedPromise(generator);
+ SetGeneratorNotAwaiting(generator);
// Per proposal-async-iteration/#sec-asyncgeneratoryield step 9
// Return ! AsyncGeneratorResolve(_F_.[[Generator]], _value_, *false*).
- CallBuiltin(Builtins::kAsyncGeneratorResolve, context, generator, value,
+ CallBuiltin(Builtins::kAsyncGeneratorResolve, context, generator, argument,
FalseConstant());
TailCallBuiltin(Builtins::kAsyncGeneratorResumeNext, context, generator);
@@ -619,42 +590,33 @@ TF_BUILTIN(AsyncGeneratorReturn, AsyncGeneratorBuiltinsAssembler) {
Node* const generator = Parameter(Descriptor::kGenerator);
Node* const value = Parameter(Descriptor::kValue);
Node* const is_caught = Parameter(Descriptor::kIsCaught);
+ Node* const context = Parameter(Descriptor::kContext);
Node* const req = LoadFirstAsyncGeneratorRequestFromQueue(generator);
+ Node* const outer_promise = LoadPromiseFromAsyncGeneratorRequest(req);
CSA_ASSERT(this, IsNotUndefined(req));
- Label perform_await(this);
- VARIABLE(var_on_resolve, MachineType::PointerRepresentation(),
- IntPtrConstant(
- Context::ASYNC_GENERATOR_RETURN_CLOSED_RESOLVE_SHARED_FUN));
- VARIABLE(
- var_on_reject, MachineType::PointerRepresentation(),
- IntPtrConstant(Context::ASYNC_GENERATOR_RETURN_CLOSED_REJECT_SHARED_FUN));
-
+ Label if_closed(this, Label::kDeferred), if_not_closed(this), done(this);
Node* const state = LoadGeneratorState(generator);
- GotoIf(IsGeneratorStateClosed(state), &perform_await);
- var_on_resolve.Bind(
- IntPtrConstant(Context::ASYNC_GENERATOR_RETURN_RESOLVE_SHARED_FUN));
- var_on_reject.Bind(
- IntPtrConstant(Context::ASYNC_GENERATOR_AWAIT_REJECT_SHARED_FUN));
- Goto(&perform_await);
+ SetGeneratorAwaiting(generator);
+ Branch(IsGeneratorStateClosed(state), &if_closed, &if_not_closed);
- BIND(&perform_await);
+ BIND(&if_closed);
+ {
+ Await(context, generator, value, outer_promise,
+ Builtins::kAsyncGeneratorReturnClosedFulfill,
+ Builtins::kAsyncGeneratorReturnClosedReject, is_caught);
+ Goto(&done);
+ }
- ContextInitializer init_closure_context = [&](Node* context) {
- StoreContextElementNoWriteBarrier(context, AwaitContext::kGeneratorSlot,
- generator);
- };
+ BIND(&if_not_closed);
+ {
+ Await(context, generator, value, outer_promise,
+ Builtins::kAsyncGeneratorReturnFulfill,
+ Builtins::kAsyncGeneratorAwaitReject, is_caught);
+ Goto(&done);
+ }
- Node* const context = Parameter(Descriptor::kContext);
- Node* const outer_promise = LoadPromiseFromAsyncGeneratorRequest(req);
- Node* const promise =
- Await(context, generator, value, outer_promise, AwaitContext::kLength,
- init_closure_context, var_on_resolve.value(), var_on_reject.value(),
- is_caught);
-
- CSA_SLOW_ASSERT(this, IsGeneratorNotSuspendedForAwait(generator));
- StoreObjectField(generator, JSAsyncGeneratorObject::kAwaitedPromiseOffset,
- promise);
+ BIND(&done);
Return(UndefinedConstant());
}
@@ -662,49 +624,44 @@ TF_BUILTIN(AsyncGeneratorReturn, AsyncGeneratorBuiltinsAssembler) {
// Resume the generator with "return" resume_mode, and finally perform
// AsyncGeneratorResumeNext. Per
// proposal-async-iteration/#sec-asyncgeneratoryield step 8.e
-TF_BUILTIN(AsyncGeneratorReturnResolveClosure,
- AsyncGeneratorBuiltinsAssembler) {
+TF_BUILTIN(AsyncGeneratorReturnFulfill, AsyncGeneratorBuiltinsAssembler) {
+ Node* const generator = Parameter(Descriptor::kGenerator);
+ Node* const argument = Parameter(Descriptor::kArgument);
Node* const context = Parameter(Descriptor::kContext);
- Node* const value = Parameter(Descriptor::kValue);
- AsyncGeneratorAwaitResumeClosure(context, value, JSGeneratorObject::kReturn);
+ AsyncGeneratorAwaitResume(context, generator, argument,
+ JSGeneratorObject::kReturn);
}
// On-resolve closure for Await in AsyncGeneratorReturn
// Perform AsyncGeneratorResolve({awaited_value}, true) and finally perform
// AsyncGeneratorResumeNext.
-TF_BUILTIN(AsyncGeneratorReturnClosedResolveClosure,
- AsyncGeneratorBuiltinsAssembler) {
+TF_BUILTIN(AsyncGeneratorReturnClosedFulfill, AsyncGeneratorBuiltinsAssembler) {
+ Node* const generator = Parameter(Descriptor::kGenerator);
+ Node* const argument = Parameter(Descriptor::kArgument);
Node* const context = Parameter(Descriptor::kContext);
- Node* const value = Parameter(Descriptor::kValue);
- Node* const generator =
- LoadContextElement(context, AwaitContext::kGeneratorSlot);
- CSA_SLOW_ASSERT(this, IsGeneratorSuspendedForAwait(generator));
- ClearAwaitedPromise(generator);
+ SetGeneratorNotAwaiting(generator);
// https://tc39.github.io/proposal-async-iteration/
// #async-generator-resume-next-return-processor-fulfilled step 2:
// Return ! AsyncGeneratorResolve(_F_.[[Generator]], _value_, *true*).
- CallBuiltin(Builtins::kAsyncGeneratorResolve, context, generator, value,
+ CallBuiltin(Builtins::kAsyncGeneratorResolve, context, generator, argument,
TrueConstant());
TailCallBuiltin(Builtins::kAsyncGeneratorResumeNext, context, generator);
}
-TF_BUILTIN(AsyncGeneratorReturnClosedRejectClosure,
- AsyncGeneratorBuiltinsAssembler) {
+TF_BUILTIN(AsyncGeneratorReturnClosedReject, AsyncGeneratorBuiltinsAssembler) {
+ Node* const generator = Parameter(Descriptor::kGenerator);
+ Node* const argument = Parameter(Descriptor::kArgument);
Node* const context = Parameter(Descriptor::kContext);
- Node* const value = Parameter(Descriptor::kValue);
- Node* const generator =
- LoadContextElement(context, AwaitContext::kGeneratorSlot);
- CSA_SLOW_ASSERT(this, IsGeneratorSuspendedForAwait(generator));
- ClearAwaitedPromise(generator);
+ SetGeneratorNotAwaiting(generator);
// https://tc39.github.io/proposal-async-iteration/
// #async-generator-resume-next-return-processor-rejected step 2:
// Return ! AsyncGeneratorReject(_F_.[[Generator]], _reason_).
- CallBuiltin(Builtins::kAsyncGeneratorReject, context, generator, value);
+ CallBuiltin(Builtins::kAsyncGeneratorReject, context, generator, argument);
TailCallBuiltin(Builtins::kAsyncGeneratorResumeNext, context, generator);
}
diff --git a/deps/v8/src/builtins/builtins-async-iterator-gen.cc b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
index f232b32700..58691bd00e 100644
--- a/deps/v8/src/builtins/builtins-async-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
@@ -28,13 +28,29 @@ class AsyncFromSyncBuiltinsAssembler : public AsyncBuiltinsAssembler {
typedef std::function<void(Node* const context, Node* const promise,
Label* if_exception)>
UndefinedMethodHandler;
+ typedef std::function<Node*(Node*)> SyncIteratorNodeGenerator;
void Generate_AsyncFromSyncIteratorMethod(
Node* const context, Node* const iterator, Node* const sent_value,
- Handle<Name> method_name, UndefinedMethodHandler&& if_method_undefined,
+ const SyncIteratorNodeGenerator& get_method,
+ const UndefinedMethodHandler& if_method_undefined,
const char* operation_name,
Label::Type reject_label_type = Label::kDeferred,
Node* const initial_exception_value = nullptr);
+ void Generate_AsyncFromSyncIteratorMethod(
+ Node* const context, Node* const iterator, Node* const sent_value,
+ Handle<String> name, const UndefinedMethodHandler& if_method_undefined,
+ const char* operation_name,
+ Label::Type reject_label_type = Label::kDeferred,
+ Node* const initial_exception_value = nullptr) {
+ auto get_method = [=](Node* const sync_iterator) {
+ return GetProperty(context, sync_iterator, name);
+ };
+ return Generate_AsyncFromSyncIteratorMethod(
+ context, iterator, sent_value, get_method, if_method_undefined,
+ operation_name, reject_label_type, initial_exception_value);
+ }
+
// Load "value" and "done" from an iterator result object. If an exception
// is thrown at any point, jumps to te `if_exception` label with exception
// stored in `var_exception`.
@@ -79,7 +95,8 @@ void AsyncFromSyncBuiltinsAssembler::ThrowIfNotAsyncFromSyncIterator(
void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
Node* const context, Node* const iterator, Node* const sent_value,
- Handle<Name> method_name, UndefinedMethodHandler&& if_method_undefined,
+ const SyncIteratorNodeGenerator& get_method,
+ const UndefinedMethodHandler& if_method_undefined,
const char* operation_name, Label::Type reject_label_type,
Node* const initial_exception_value) {
Node* const native_context = LoadNativeContext(context);
@@ -96,7 +113,7 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
Node* const sync_iterator =
LoadObjectField(iterator, JSAsyncFromSyncIterator::kSyncIteratorOffset);
- Node* const method = GetProperty(context, sync_iterator, method_name);
+ Node* const method = get_method(sync_iterator);
if (if_method_undefined) {
Label if_isnotundefined(this);
@@ -119,7 +136,7 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
// Perform ! Call(valueWrapperCapability.[[Resolve]], undefined, «
// throwValue »).
- CallBuiltin(Builtins::kResolveNativePromise, context, wrapper, value);
+ CallBuiltin(Builtins::kResolvePromise, context, wrapper, value);
// Let onFulfilled be a new built-in function object as defined in
// Async Iterator Value Unwrap Functions.
@@ -128,13 +145,13 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
// Perform ! PerformPromiseThen(valueWrapperCapability.[[Promise]],
// onFulfilled, undefined, promiseCapability).
- Return(CallBuiltin(Builtins::kPerformNativePromiseThen, context, wrapper,
+ Return(CallBuiltin(Builtins::kPerformPromiseThen, context, wrapper,
on_fulfilled, UndefinedConstant(), promise));
BIND(&reject_promise);
{
Node* const exception = var_exception.value();
- CallBuiltin(Builtins::kRejectNativePromise, context, promise, exception,
+ CallBuiltin(Builtins::kRejectPromise, context, promise, exception,
TrueConstant());
Return(promise);
}
@@ -211,6 +228,7 @@ std::pair<Node*, Node*> AsyncFromSyncBuiltinsAssembler::LoadIteratorResult(
BIND(&done);
return std::make_pair(var_value.value(), var_done.value());
}
+
} // namespace
// https://tc39.github.io/proposal-async-iteration/
@@ -220,9 +238,12 @@ TF_BUILTIN(AsyncFromSyncIteratorPrototypeNext, AsyncFromSyncBuiltinsAssembler) {
Node* const value = Parameter(Descriptor::kValue);
Node* const context = Parameter(Descriptor::kContext);
+ auto get_method = [=](Node* const unused) {
+ return LoadObjectField(iterator, JSAsyncFromSyncIterator::kNextOffset);
+ };
Generate_AsyncFromSyncIteratorMethod(
- context, iterator, value, factory()->next_string(),
- UndefinedMethodHandler(), "[Async-from-Sync Iterator].prototype.next");
+ context, iterator, value, get_method, UndefinedMethodHandler(),
+ "[Async-from-Sync Iterator].prototype.next");
}
// https://tc39.github.io/proposal-async-iteration/
@@ -243,7 +264,7 @@ TF_BUILTIN(AsyncFromSyncIteratorPrototypeReturn,
// Perform ! Call(promiseCapability.[[Resolve]], undefined, « iterResult »).
// IfAbruptRejectPromise(nextDone, promiseCapability).
// Return promiseCapability.[[Promise]].
- PromiseFulfill(context, promise, iter_result, v8::Promise::kFulfilled);
+ CallBuiltin(Builtins::kResolvePromise, context, promise, iter_result);
Return(promise);
};
diff --git a/deps/v8/src/builtins/builtins-bigint.cc b/deps/v8/src/builtins/builtins-bigint.cc
index 6d9bb6e797..fdbd3937d4 100644
--- a/deps/v8/src/builtins/builtins-bigint.cc
+++ b/deps/v8/src/builtins/builtins-bigint.cc
@@ -36,33 +36,6 @@ BUILTIN(BigIntConstructor_ConstructStub) {
isolate->factory()->BigInt_string()));
}
-BUILTIN(BigIntParseInt) {
- HandleScope scope(isolate);
- Handle<Object> string = args.atOrUndefined(isolate, 1);
- Handle<Object> radix = args.atOrUndefined(isolate, 2);
-
- // Convert {string} to a String and flatten it.
- // Fast path: avoid back-and-forth conversion for Smi inputs.
- if (string->IsSmi() && radix->IsUndefined(isolate)) {
- RETURN_RESULT_OR_FAILURE(isolate, BigInt::FromNumber(isolate, string));
- }
- Handle<String> subject;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, subject,
- Object::ToString(isolate, string));
- subject = String::Flatten(subject);
-
- // Convert {radix} to Int32.
- if (!radix->IsNumber()) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, radix, Object::ToNumber(radix));
- }
- int radix32 = DoubleToInt32(radix->Number());
- if (radix32 != 0 && (radix32 < 2 || radix32 > 36)) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewSyntaxError(MessageTemplate::kToRadixFormatRange));
- }
- RETURN_RESULT_OR_FAILURE(isolate, BigIntParseInt(isolate, subject, radix32));
-}
-
BUILTIN(BigIntAsUintN) {
HandleScope scope(isolate);
Handle<Object> bits_obj = args.atOrUndefined(isolate, 1);
@@ -97,14 +70,6 @@ BUILTIN(BigIntAsIntN) {
return *BigInt::AsIntN(bits->Number(), bigint);
}
-BUILTIN(BigIntPrototypeToLocaleString) {
- HandleScope scope(isolate);
-
- // TODO(jkummerow): Implement.
-
- UNIMPLEMENTED();
-}
-
namespace {
MaybeHandle<BigInt> ThisBigIntValue(Isolate* isolate, Handle<Object> value,
@@ -127,18 +92,14 @@ MaybeHandle<BigInt> ThisBigIntValue(Isolate* isolate, Handle<Object> value,
BigInt);
}
-} // namespace
-
-BUILTIN(BigIntPrototypeToString) {
- HandleScope scope(isolate);
+Object* BigIntToStringImpl(Handle<Object> receiver, Handle<Object> radix,
+ Isolate* isolate, const char* builtin_name) {
// 1. Let x be ? thisBigIntValue(this value).
Handle<BigInt> x;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, x,
- ThisBigIntValue(isolate, args.receiver(), "BigInt.prototype.toString"));
+ isolate, x, ThisBigIntValue(isolate, receiver, builtin_name));
// 2. If radix is not present, let radixNumber be 10.
// 3. Else if radix is undefined, let radixNumber be 10.
- Handle<Object> radix = args.atOrUndefined(isolate, 1);
int radix_number;
if (radix->IsUndefined(isolate)) {
radix_number = 10;
@@ -158,6 +119,22 @@ BUILTIN(BigIntPrototypeToString) {
RETURN_RESULT_OR_FAILURE(isolate, BigInt::ToString(x, radix_number));
}
+} // namespace
+
+BUILTIN(BigIntPrototypeToLocaleString) {
+ HandleScope scope(isolate);
+ Handle<Object> radix = isolate->factory()->undefined_value();
+ return BigIntToStringImpl(args.receiver(), radix, isolate,
+ "BigInt.prototype.toLocaleString");
+}
+
+BUILTIN(BigIntPrototypeToString) {
+ HandleScope scope(isolate);
+ Handle<Object> radix = args.atOrUndefined(isolate, 1);
+ return BigIntToStringImpl(args.receiver(), radix, isolate,
+ "BigInt.prototype.toString");
+}
+
BUILTIN(BigIntPrototypeValueOf) {
HandleScope scope(isolate);
RETURN_RESULT_OR_FAILURE(
diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc
index d4a7153d74..7443202c98 100644
--- a/deps/v8/src/builtins/builtins-call-gen.cc
+++ b/deps/v8/src/builtins/builtins-call-gen.cc
@@ -168,7 +168,7 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
Node* elements_length = LoadFixedArrayBaseLength(elements);
GotoIfNot(WordEqual(length, elements_length), &if_runtime);
var_elements.Bind(elements);
- var_length.Bind(SmiToWord32(length));
+ var_length.Bind(SmiToInt32(length));
Goto(&if_done);
}
@@ -289,12 +289,8 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
&if_runtime);
// Check that the map of the initial array iterator hasn't changed.
- Node* native_context = LoadNativeContext(context);
- Node* arr_it_proto_map = LoadMap(CAST(LoadContextElement(
- native_context, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX)));
- Node* initial_map = LoadContextElement(
- native_context, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX);
- GotoIfNot(WordEqual(arr_it_proto_map, initial_map), &if_runtime);
+ TNode<Context> native_context = LoadNativeContext(context);
+ GotoIfNot(HasInitialArrayIteratorPrototypeMap(native_context), &if_runtime);
Node* kind = LoadMapElementsKind(spread_map);
diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc
index 392040c995..563703707c 100644
--- a/deps/v8/src/builtins/builtins-collections-gen.cc
+++ b/deps/v8/src/builtins/builtins-collections-gen.cc
@@ -26,31 +26,32 @@ class BaseCollectionsAssembler : public CodeStubAssembler {
virtual ~BaseCollectionsAssembler() {}
protected:
- enum Variant { kMap, kSet };
+ enum Variant { kMap, kSet, kWeakMap, kWeakSet };
// Adds an entry to a collection. For Maps, properly handles extracting the
// key and value from the entry (see LoadKeyValue()).
- TNode<Object> AddConstructorEntry(Variant variant, TNode<Context> context,
- TNode<Object> collection,
- TNode<Object> add_function,
- TNode<Object> key_value,
- Label* if_exception = nullptr,
- TVariable<Object>* var_exception = nullptr);
+ void AddConstructorEntry(Variant variant, TNode<Context> context,
+ TNode<Object> collection, TNode<Object> add_function,
+ TNode<Object> key_value,
+ Label* if_may_have_side_effects = nullptr,
+ Label* if_exception = nullptr,
+ TVariable<Object>* var_exception = nullptr);
// Adds constructor entries to a collection. Choosing a fast path when
// possible.
void AddConstructorEntries(Variant variant, TNode<Context> context,
TNode<Context> native_context,
TNode<Object> collection,
- TNode<Object> initial_entries,
- TNode<BoolT> is_fast_jsarray);
+ TNode<Object> initial_entries);
// Fast path for adding constructor entries. Assumes the entries are a fast
// JS array (see CodeStubAssembler::BranchIfFastJSArray()).
void AddConstructorEntriesFromFastJSArray(Variant variant,
TNode<Context> context,
+ TNode<Context> native_context,
TNode<Object> collection,
- TNode<JSArray> fast_jsarray);
+ TNode<JSArray> fast_jsarray,
+ Label* if_may_have_side_effects);
// Adds constructor entries to a collection using the iterator protocol.
void AddConstructorEntriesFromIterable(Variant variant,
@@ -61,8 +62,7 @@ class BaseCollectionsAssembler : public CodeStubAssembler {
// Constructs a collection instance. Choosing a fast path when possible.
TNode<Object> AllocateJSCollection(TNode<Context> context,
- TNode<Context> native_context,
- int constructor_function_index,
+ TNode<JSFunction> constructor,
TNode<Object> new_target);
// Fast path for constructing a collection instance if the constructor
@@ -72,7 +72,7 @@ class BaseCollectionsAssembler : public CodeStubAssembler {
// Fallback for constructing a collection instance if the constructor function
// has been modified.
TNode<Object> AllocateJSCollectionSlow(TNode<Context> context,
- TNode<HeapObject> constructor,
+ TNode<JSFunction> constructor,
TNode<Object> new_target);
// Allocates the backing store for a collection.
@@ -81,15 +81,26 @@ class BaseCollectionsAssembler : public CodeStubAssembler {
// Main entry point for a collection constructor builtin.
void GenerateConstructor(Variant variant,
- const int constructor_function_index,
- Handle<String> constructor_function_name,
- int collection_tableoffset);
+ Handle<String> constructor_function_name);
// Retrieves the collection function that adds an entry. `set` for Maps and
// `add` for Sets.
TNode<Object> GetAddFunction(Variant variant, TNode<Context> context,
TNode<Object> collection);
+ // Retrieves the collection constructor function.
+ TNode<JSFunction> GetConstructor(Variant variant,
+ TNode<Context> native_context);
+
+ // Retrieves the initial collection function that adds an entry. Should only
+ // be called when it is certain that a collection prototype's map hasn't been
+ // changed.
+ TNode<JSFunction> GetInitialAddFunction(Variant variant,
+ TNode<Context> native_context);
+
+ // Retrieves the offset to access the backing table from the collection.
+ int GetTableOffset(Variant variant);
+
// Estimates the number of entries the collection will have after adding the
// entries passed in the constructor. AllocateTable() can use this to avoid
// the time of growing/rehashing when adding the constructor entries.
@@ -98,6 +109,11 @@ class BaseCollectionsAssembler : public CodeStubAssembler {
void GotoIfNotJSReceiver(Node* const obj, Label* if_not_receiver);
+ // Determines whether the collection's prototype has been modified.
+ TNode<BoolT> HasInitialCollectionPrototype(Variant variant,
+ TNode<Context> native_context,
+ TNode<Object> collection);
+
// Loads an element from a fixed array. If the element is the hole, returns
// `undefined`.
TNode<Object> LoadAndNormalizeFixedArrayElement(TNode<Object> elements,
@@ -112,59 +128,85 @@ class BaseCollectionsAssembler : public CodeStubAssembler {
// array. If the array lacks 2 elements, undefined is used.
void LoadKeyValue(TNode<Context> context, TNode<Object> maybe_array,
TVariable<Object>* key, TVariable<Object>* value,
+ Label* if_may_have_side_effects = nullptr,
Label* if_exception = nullptr,
TVariable<Object>* var_exception = nullptr);
};
-TNode<Object> BaseCollectionsAssembler::AddConstructorEntry(
+void BaseCollectionsAssembler::AddConstructorEntry(
Variant variant, TNode<Context> context, TNode<Object> collection,
- TNode<Object> add_function, TNode<Object> key_value, Label* if_exception,
+ TNode<Object> add_function, TNode<Object> key_value,
+ Label* if_may_have_side_effects, Label* if_exception,
TVariable<Object>* var_exception) {
CSA_ASSERT(this, Word32BinaryNot(IsTheHole(key_value)));
- if (variant == kMap) {
- Label exit(this), if_notobject(this, Label::kDeferred);
- GotoIfNotJSReceiver(key_value, &if_notobject);
-
+ if (variant == kMap || variant == kWeakMap) {
TVARIABLE(Object, key);
TVARIABLE(Object, value);
- LoadKeyValue(context, key_value, &key, &value, if_exception, var_exception);
- Node* key_n = key;
- Node* value_n = value;
- TNode<Object> add_call =
- UncheckedCast<Object>(CallJS(CodeFactory::Call(isolate()), context,
- add_function, collection, key_n, value_n));
- Goto(&exit);
-
- BIND(&if_notobject);
- {
- Node* ret = CallRuntime(
- Runtime::kThrowTypeError, context,
- SmiConstant(MessageTemplate::kIteratorValueNotAnObject), key_value);
- if (if_exception != nullptr) {
- DCHECK(var_exception != nullptr);
- GotoIfException(ret, if_exception, var_exception);
- }
- Unreachable();
- }
- BIND(&exit);
- return add_call;
-
- } else { // variant == kSet
- DCHECK(variant == kSet);
- return UncheckedCast<Object>(CallJS(CodeFactory::Call(isolate()), context,
- add_function, collection, key_value));
+ LoadKeyValue(context, key_value, &key, &value, if_may_have_side_effects,
+ if_exception, var_exception);
+ Node* key_n = key.value();
+ Node* value_n = value.value();
+ Node* ret = CallJS(CodeFactory::Call(isolate()), context, add_function,
+ collection, key_n, value_n);
+ GotoIfException(ret, if_exception, var_exception);
+ } else {
+ DCHECK(variant == kSet || variant == kWeakSet);
+ Node* ret = CallJS(CodeFactory::Call(isolate()), context, add_function,
+ collection, key_value);
+ GotoIfException(ret, if_exception, var_exception);
}
}
void BaseCollectionsAssembler::AddConstructorEntries(
Variant variant, TNode<Context> context, TNode<Context> native_context,
- TNode<Object> collection, TNode<Object> initial_entries,
- TNode<BoolT> is_fast_jsarray) {
- Label exit(this), slow_loop(this, Label::kDeferred);
- GotoIf(IsNullOrUndefined(initial_entries), &exit);
+ TNode<Object> collection, TNode<Object> initial_entries) {
+ TVARIABLE(BoolT, use_fast_loop,
+ IsFastJSArrayWithNoCustomIteration(initial_entries, context,
+ native_context));
+ TNode<IntPtrT> at_least_space_for =
+ EstimatedInitialSize(initial_entries, use_fast_loop.value());
+ Label allocate_table(this, &use_fast_loop), exit(this), fast_loop(this),
+ slow_loop(this, Label::kDeferred);
+ Goto(&allocate_table);
+ BIND(&allocate_table);
+ {
+ TNode<Object> table = AllocateTable(variant, context, at_least_space_for);
+ StoreObjectField(collection, GetTableOffset(variant), table);
+ GotoIf(IsNullOrUndefined(initial_entries), &exit);
+ GotoIfNot(
+ HasInitialCollectionPrototype(variant, native_context, collection),
+ &slow_loop);
+ Branch(use_fast_loop.value(), &fast_loop, &slow_loop);
+ }
+ BIND(&fast_loop);
+ {
+ TNode<JSArray> initial_entries_jsarray =
+ UncheckedCast<JSArray>(initial_entries);
+#if DEBUG
+ CSA_ASSERT(this, IsFastJSArrayWithNoCustomIteration(
+ initial_entries_jsarray, context, native_context));
+ TNode<Map> original_initial_entries_map = LoadMap(initial_entries_jsarray);
+#endif
+
+ Label if_may_have_side_effects(this, Label::kDeferred);
+ AddConstructorEntriesFromFastJSArray(variant, context, native_context,
+ collection, initial_entries_jsarray,
+ &if_may_have_side_effects);
+ Goto(&exit);
- // TODO(mvstanton): Re-enable the fast path when a fix is found for
- // crbug.com/798026.
+ if (variant == kMap || variant == kWeakMap) {
+ BIND(&if_may_have_side_effects);
+#if DEBUG
+ CSA_ASSERT(this, HasInitialCollectionPrototype(variant, native_context,
+ collection));
+ CSA_ASSERT(this, WordEqual(original_initial_entries_map,
+ LoadMap(initial_entries_jsarray)));
+#endif
+ use_fast_loop = Int32FalseConstant();
+ Goto(&allocate_table);
+ }
+ }
+ BIND(&slow_loop);
{
AddConstructorEntriesFromIterable(variant, context, native_context,
collection, initial_entries);
@@ -174,17 +216,26 @@ void BaseCollectionsAssembler::AddConstructorEntries(
}
void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
- Variant variant, TNode<Context> context, TNode<Object> collection,
- TNode<JSArray> fast_jsarray) {
+ Variant variant, TNode<Context> context, TNode<Context> native_context,
+ TNode<Object> collection, TNode<JSArray> fast_jsarray,
+ Label* if_may_have_side_effects) {
TNode<FixedArrayBase> elements = LoadElements(fast_jsarray);
TNode<Int32T> elements_kind = LoadMapElementsKind(LoadMap(fast_jsarray));
+ TNode<JSFunction> add_func = GetInitialAddFunction(variant, native_context);
+ CSA_ASSERT(
+ this,
+ WordEqual(GetAddFunction(variant, native_context, collection), add_func));
+ CSA_ASSERT(this, IsFastJSArrayWithNoCustomIteration(fast_jsarray, context,
+ native_context));
TNode<IntPtrT> length = SmiUntag(LoadFastJSArrayLength(fast_jsarray));
- TNode<Object> add_func = GetAddFunction(variant, context, collection);
-
- CSA_ASSERT(this, IsFastJSArray(fast_jsarray, context));
- CSA_ASSERT(this, IsFastElementsKind(elements_kind));
CSA_ASSERT(this, IntPtrGreaterThanOrEqual(length, IntPtrConstant(0)));
+ CSA_ASSERT(
+ this, HasInitialCollectionPrototype(variant, native_context, collection));
+#if DEBUG
+ TNode<Map> original_collection_map = LoadMap(CAST(collection));
+ TNode<Map> original_fast_js_array_map = LoadMap(fast_jsarray);
+#endif
Label exit(this), if_doubles(this), if_smiorobjects(this);
Branch(IsFastSmiOrTaggedElementsKind(elements_kind), &if_smiorobjects,
&if_doubles);
@@ -193,8 +244,14 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
auto set_entry = [&](Node* index) {
TNode<Object> element = LoadAndNormalizeFixedArrayElement(
elements, UncheckedCast<IntPtrT>(index));
- AddConstructorEntry(variant, context, collection, add_func, element);
+ AddConstructorEntry(variant, context, collection, add_func, element,
+ if_may_have_side_effects);
};
+
+ // Instead of using the slower iteration protocol to iterate over the
+ // elements, a fast loop is used. This assumes that adding an element
+ // to the collection does not call user code that could mutate the elements
+ // or collection.
BuildFastLoop(IntPtrConstant(0), length, set_entry, 1,
ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
Goto(&exit);
@@ -203,7 +260,7 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
{
// A Map constructor requires entries to be arrays (ex. [key, value]),
// so a FixedDoubleArray can never succeed.
- if (variant == kMap) {
+ if (variant == kMap || variant == kWeakMap) {
TNode<Float64T> element =
UncheckedCast<Float64T>(LoadFixedDoubleArrayElement(
elements, IntPtrConstant(0), MachineType::Float64(), 0,
@@ -211,10 +268,11 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
ThrowTypeError(context, MessageTemplate::kIteratorValueNotAnObject,
AllocateHeapNumberWithValue(element));
} else {
+ DCHECK(variant == kSet || variant == kWeakSet);
auto set_entry = [&](Node* index) {
TNode<Object> entry = LoadAndNormalizeFixedDoubleArrayElement(
elements, UncheckedCast<IntPtrT>(index));
- AddConstructorEntry(kSet, context, collection, add_func, entry);
+ AddConstructorEntry(variant, context, collection, add_func, entry);
};
BuildFastLoop(IntPtrConstant(0), length, set_entry, 1,
ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
@@ -222,6 +280,12 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
}
}
BIND(&exit);
+#if DEBUG
+ CSA_ASSERT(this,
+ WordEqual(original_collection_map, LoadMap(CAST(collection))));
+ CSA_ASSERT(this,
+ WordEqual(original_fast_js_array_map, LoadMap(fast_jsarray)));
+#endif
}
void BaseCollectionsAssembler::AddConstructorEntriesFromIterable(
@@ -247,10 +311,8 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromIterable(
context, iterator, &exit, fast_iterator_result_map));
TNode<Object> next_value = CAST(iterator_assembler.IteratorValue(
context, next, fast_iterator_result_map));
- TNode<Object> add_result =
- AddConstructorEntry(variant, context, collection, add_func, next_value,
- &if_exception, &var_exception);
- GotoIfException(add_result, &if_exception, &var_exception);
+ AddConstructorEntry(variant, context, collection, add_func, next_value,
+ nullptr, &if_exception, &var_exception);
Goto(&loop);
}
BIND(&if_exception);
@@ -262,10 +324,8 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromIterable(
}
TNode<Object> BaseCollectionsAssembler::AllocateJSCollection(
- TNode<Context> context, TNode<Context> native_context,
- int constructor_function_index, TNode<Object> new_target) {
- TNode<HeapObject> constructor =
- CAST(LoadContextElement(native_context, constructor_function_index));
+ TNode<Context> context, TNode<JSFunction> constructor,
+ TNode<Object> new_target) {
TNode<BoolT> is_target_unmodified = WordEqual(constructor, new_target);
return Select<Object>(is_target_unmodified,
@@ -286,7 +346,7 @@ TNode<Object> BaseCollectionsAssembler::AllocateJSCollectionFast(
}
TNode<Object> BaseCollectionsAssembler::AllocateJSCollectionSlow(
- TNode<Context> context, TNode<HeapObject> constructor,
+ TNode<Context> context, TNode<JSFunction> constructor,
TNode<Object> new_target) {
ConstructorBuiltinsAssembler constructor_assembler(this->state());
return CAST(constructor_assembler.EmitFastNewObject(context, constructor,
@@ -294,8 +354,7 @@ TNode<Object> BaseCollectionsAssembler::AllocateJSCollectionSlow(
}
void BaseCollectionsAssembler::GenerateConstructor(
- Variant variant, const int constructor_function_index,
- Handle<String> constructor_function_name, int collection_tableoffset) {
+ Variant variant, Handle<String> constructor_function_name) {
const int kIterableArg = 0;
CodeStubArguments args(
this, ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount)));
@@ -306,17 +365,11 @@ void BaseCollectionsAssembler::GenerateConstructor(
Label if_undefined(this, Label::kDeferred);
GotoIf(IsUndefined(new_target), &if_undefined);
- TNode<BoolT> is_fast_jsarray = IsFastJSArray(iterable, context);
- TNode<IntPtrT> at_least_space_for =
- EstimatedInitialSize(iterable, is_fast_jsarray);
TNode<Context> native_context = LoadNativeContext(context);
TNode<Object> collection = AllocateJSCollection(
- context, native_context, constructor_function_index, new_target);
- TNode<Object> table = AllocateTable(variant, context, at_least_space_for);
+ context, GetConstructor(variant, native_context), new_target);
- StoreObjectField(collection, collection_tableoffset, table);
- AddConstructorEntries(variant, context, native_context, collection, iterable,
- is_fast_jsarray);
+ AddConstructorEntries(variant, context, native_context, collection, iterable);
Return(collection);
BIND(&if_undefined);
@@ -326,14 +379,10 @@ void BaseCollectionsAssembler::GenerateConstructor(
TNode<Object> BaseCollectionsAssembler::GetAddFunction(
Variant variant, TNode<Context> context, TNode<Object> collection) {
- // TODO(pwong): Consider calling the builtin directly when the prototype is
- // unmodified. This will require tracking WeakMap/WeakSet prototypes on the
- // native context.
- Handle<String> add_func_name = variant == kMap
+ Handle<String> add_func_name = (variant == kMap || variant == kWeakMap)
? isolate()->factory()->set_string()
: isolate()->factory()->add_string();
- TNode<Object> add_func =
- CAST(GetProperty(context, collection, add_func_name));
+ TNode<Object> add_func = GetProperty(context, collection, add_func_name);
Label exit(this), if_notcallable(this, Label::kDeferred);
GotoIf(TaggedIsSmi(add_func), &if_notcallable);
@@ -348,6 +397,60 @@ TNode<Object> BaseCollectionsAssembler::GetAddFunction(
return add_func;
}
+TNode<JSFunction> BaseCollectionsAssembler::GetConstructor(
+ Variant variant, TNode<Context> native_context) {
+ int index;
+ switch (variant) {
+ case kMap:
+ index = Context::JS_MAP_FUN_INDEX;
+ break;
+ case kSet:
+ index = Context::JS_SET_FUN_INDEX;
+ break;
+ case kWeakMap:
+ index = Context::JS_WEAK_MAP_FUN_INDEX;
+ break;
+ case kWeakSet:
+ index = Context::JS_WEAK_SET_FUN_INDEX;
+ break;
+ }
+ return CAST(LoadContextElement(native_context, index));
+}
+
+TNode<JSFunction> BaseCollectionsAssembler::GetInitialAddFunction(
+ Variant variant, TNode<Context> native_context) {
+ int index;
+ switch (variant) {
+ case kMap:
+ index = Context::MAP_SET_INDEX;
+ break;
+ case kSet:
+ index = Context::SET_ADD_INDEX;
+ break;
+ case kWeakMap:
+ index = Context::WEAKMAP_SET_INDEX;
+ break;
+ case kWeakSet:
+ index = Context::WEAKSET_ADD_INDEX;
+ break;
+ }
+ return CAST(LoadContextElement(native_context, index));
+}
+
+int BaseCollectionsAssembler::GetTableOffset(Variant variant) {
+ switch (variant) {
+ case kMap:
+ return JSMap::kTableOffset;
+ case kSet:
+ return JSSet::kTableOffset;
+ case kWeakMap:
+ return JSWeakMap::kTableOffset;
+ case kWeakSet:
+ return JSWeakSet::kTableOffset;
+ }
+ UNREACHABLE();
+}
+
TNode<IntPtrT> BaseCollectionsAssembler::EstimatedInitialSize(
TNode<Object> initial_entries, TNode<BoolT> is_fast_jsarray) {
return Select<IntPtrT>(
@@ -362,6 +465,31 @@ void BaseCollectionsAssembler::GotoIfNotJSReceiver(Node* const obj,
GotoIfNot(IsJSReceiver(obj), if_not_receiver);
}
+TNode<BoolT> BaseCollectionsAssembler::HasInitialCollectionPrototype(
+ Variant variant, TNode<Context> native_context, TNode<Object> collection) {
+ int initial_prototype_index;
+ switch (variant) {
+ case kMap:
+ initial_prototype_index = Context::INITIAL_MAP_PROTOTYPE_MAP_INDEX;
+ break;
+ case kSet:
+ initial_prototype_index = Context::INITIAL_SET_PROTOTYPE_MAP_INDEX;
+ break;
+ case kWeakMap:
+ initial_prototype_index = Context::INITIAL_WEAKMAP_PROTOTYPE_MAP_INDEX;
+ break;
+ case kWeakSet:
+ initial_prototype_index = Context::INITIAL_WEAKSET_PROTOTYPE_MAP_INDEX;
+ break;
+ }
+ TNode<Map> initial_prototype_map =
+ CAST(LoadContextElement(native_context, initial_prototype_index));
+ TNode<Map> collection_proto_map =
+ LoadMap(CAST(LoadMapPrototype(LoadMap(CAST(collection)))));
+
+ return WordEqual(collection_proto_map, initial_prototype_map);
+}
+
TNode<Object> BaseCollectionsAssembler::LoadAndNormalizeFixedArrayElement(
TNode<Object> elements, TNode<IntPtrT> index) {
TNode<Object> element = CAST(LoadFixedArrayElement(elements, index));
@@ -386,15 +514,13 @@ TNode<Object> BaseCollectionsAssembler::LoadAndNormalizeFixedDoubleArrayElement(
Goto(&next);
}
BIND(&next);
- return entry;
+ return entry.value();
}
-void BaseCollectionsAssembler::LoadKeyValue(TNode<Context> context,
- TNode<Object> maybe_array,
- TVariable<Object>* key,
- TVariable<Object>* value,
- Label* if_exception,
- TVariable<Object>* var_exception) {
+void BaseCollectionsAssembler::LoadKeyValue(
+ TNode<Context> context, TNode<Object> maybe_array, TVariable<Object>* key,
+ TVariable<Object>* value, Label* if_may_have_side_effects,
+ Label* if_exception, TVariable<Object>* var_exception) {
CSA_ASSERT(this, Word32BinaryNot(IsTheHole(maybe_array)));
Label exit(this), if_fast(this), if_slow(this, Label::kDeferred);
@@ -461,20 +587,31 @@ void BaseCollectionsAssembler::LoadKeyValue(TNode<Context> context,
}
BIND(&if_slow);
{
- *key = UncheckedCast<Object>(
- GetProperty(context, maybe_array, isolate()->factory()->zero_string()));
- if (if_exception != nullptr) {
- DCHECK(var_exception != nullptr);
- GotoIfException(*key, if_exception, var_exception);
- }
+ Label if_notobject(this, Label::kDeferred);
+ GotoIfNotJSReceiver(maybe_array, &if_notobject);
+ if (if_may_have_side_effects != nullptr) {
+ // If the element is not a fast array, we cannot guarantee accessing the
+ // key and value won't execute user code that will break fast path
+ // assumptions.
+ Goto(if_may_have_side_effects);
+ } else {
+ *key = UncheckedCast<Object>(GetProperty(
+ context, maybe_array, isolate()->factory()->zero_string()));
+ GotoIfException(key->value(), if_exception, var_exception);
- *value = UncheckedCast<Object>(
- GetProperty(context, maybe_array, isolate()->factory()->one_string()));
- if (if_exception != nullptr) {
- DCHECK(var_exception != nullptr);
- GotoIfException(*value, if_exception, var_exception);
+ *value = UncheckedCast<Object>(GetProperty(
+ context, maybe_array, isolate()->factory()->one_string()));
+ GotoIfException(value->value(), if_exception, var_exception);
+ Goto(&exit);
+ }
+ BIND(&if_notobject);
+ {
+ Node* ret = CallRuntime(
+ Runtime::kThrowTypeError, context,
+ SmiConstant(MessageTemplate::kIteratorValueNotAnObject), maybe_array);
+ GotoIfException(ret, if_exception, var_exception);
+ Unreachable();
}
- Goto(&exit);
}
BIND(&exit);
}
@@ -672,18 +809,17 @@ Node* CollectionsBuiltinsAssembler::AllocateJSCollectionIterator(
TNode<Object> CollectionsBuiltinsAssembler::AllocateTable(
Variant variant, TNode<Context> context,
TNode<IntPtrT> at_least_space_for) {
- return CAST(variant == kMap ? AllocateOrderedHashTable<OrderedHashMap>()
- : AllocateOrderedHashTable<OrderedHashSet>());
+ return CAST((variant == kMap || variant == kWeakMap)
+ ? AllocateOrderedHashTable<OrderedHashMap>()
+ : AllocateOrderedHashTable<OrderedHashSet>());
}
TF_BUILTIN(MapConstructor, CollectionsBuiltinsAssembler) {
- GenerateConstructor(kMap, Context::JS_MAP_FUN_INDEX,
- isolate()->factory()->Map_string(), JSMap::kTableOffset);
+ GenerateConstructor(kMap, isolate()->factory()->Map_string());
}
TF_BUILTIN(SetConstructor, CollectionsBuiltinsAssembler) {
- GenerateConstructor(kSet, Context::JS_SET_FUN_INDEX,
- isolate()->factory()->Set_string(), JSSet::kTableOffset);
+ GenerateConstructor(kSet, isolate()->factory()->Set_string());
}
Node* CollectionsBuiltinsAssembler::CallGetOrCreateHashRaw(Node* const key) {
@@ -1049,9 +1185,9 @@ std::tuple<Node*, Node*> CollectionsBuiltinsAssembler::Transition(
GotoIf(TaggedIsSmi(next_table), &done_loop);
var_table.Bind(next_table);
- var_index.Bind(
- SmiUntag(CallBuiltin(Builtins::kOrderedHashTableHealIndex,
- NoContextConstant(), table, SmiTag(index))));
+ var_index.Bind(SmiUntag(
+ CAST(CallBuiltin(Builtins::kOrderedHashTableHealIndex,
+ NoContextConstant(), table, SmiTag(index)))));
Goto(&loop);
}
BIND(&done_loop);
@@ -1624,7 +1760,8 @@ TF_BUILTIN(MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
Branch(InstanceTypeEqual(receiver_instance_type, JS_MAP_VALUE_ITERATOR_TYPE),
&if_receiver_valid, &if_receiver_invalid);
BIND(&if_receiver_invalid);
- ThrowIncompatibleMethodReceiver(context, kMethodName, receiver);
+ ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
+ StringConstant(kMethodName), receiver);
BIND(&if_receiver_valid);
// Check if the {receiver} is exhausted.
@@ -1837,7 +1974,8 @@ TF_BUILTIN(SetIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
InstanceTypeEqual(receiver_instance_type, JS_SET_KEY_VALUE_ITERATOR_TYPE),
&if_receiver_valid, &if_receiver_invalid);
BIND(&if_receiver_invalid);
- ThrowIncompatibleMethodReceiver(context, kMethodName, receiver);
+ ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
+ StringConstant(kMethodName), receiver);
BIND(&if_receiver_valid);
// Check if the {receiver} is exhausted.
@@ -2019,7 +2157,7 @@ void WeakCollectionsBuiltinsAssembler::AddEntry(
// See HashTableBase::ElementAdded().
StoreFixedArrayElement(table, ObjectHashTable::kNumberOfElementsIndex,
- SmiFromWord(number_of_elements), SKIP_WRITE_BARRIER);
+ SmiFromIntPtr(number_of_elements), SKIP_WRITE_BARRIER);
}
TNode<Object> WeakCollectionsBuiltinsAssembler::AllocateTable(
@@ -2043,7 +2181,7 @@ TNode<Object> WeakCollectionsBuiltinsAssembler::AllocateTable(
StoreFixedArrayElement(table, ObjectHashTable::kNumberOfDeletedElementsIndex,
SmiConstant(0), SKIP_WRITE_BARRIER);
StoreFixedArrayElement(table, ObjectHashTable::kCapacityIndex,
- SmiFromWord(capacity), SKIP_WRITE_BARRIER);
+ SmiFromIntPtr(capacity), SKIP_WRITE_BARRIER);
TNode<IntPtrT> start = KeyIndexFromEntry(IntPtrConstant(0));
FillFixedArrayWithValue(HOLEY_ELEMENTS, table, start, length,
@@ -2083,16 +2221,15 @@ TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::FindKeyIndex(
BIND(&loop);
TNode<IntPtrT> key_index;
{
- key_index = KeyIndexFromEntry(var_entry);
+ key_index = KeyIndexFromEntry(var_entry.value());
TNode<Object> entry_key = CAST(LoadFixedArrayElement(table, key_index));
key_compare(entry_key, &if_found);
// See HashTable::NextProbe().
Increment(&var_count);
- var_entry = WordAnd(IntPtrAdd(UncheckedCast<IntPtrT>(var_entry),
- UncheckedCast<IntPtrT>(var_count)),
- entry_mask);
+ var_entry =
+ WordAnd(IntPtrAdd(var_entry.value(), var_count.value()), entry_mask);
Goto(&loop);
}
@@ -2186,9 +2323,9 @@ void WeakCollectionsBuiltinsAssembler::RemoveEntry(
// See HashTableBase::ElementRemoved().
TNode<IntPtrT> number_of_deleted = LoadNumberOfDeleted(table, 1);
StoreFixedArrayElement(table, ObjectHashTable::kNumberOfElementsIndex,
- SmiFromWord(number_of_elements), SKIP_WRITE_BARRIER);
+ SmiFromIntPtr(number_of_elements), SKIP_WRITE_BARRIER);
StoreFixedArrayElement(table, ObjectHashTable::kNumberOfDeletedElementsIndex,
- SmiFromWord(number_of_deleted), SKIP_WRITE_BARRIER);
+ SmiFromIntPtr(number_of_deleted), SKIP_WRITE_BARRIER);
}
TNode<BoolT> WeakCollectionsBuiltinsAssembler::ShouldRehash(
@@ -2222,15 +2359,11 @@ TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::ValueIndexFromKeyIndex(
}
TF_BUILTIN(WeakMapConstructor, WeakCollectionsBuiltinsAssembler) {
- GenerateConstructor(kMap, Context::JS_WEAK_MAP_FUN_INDEX,
- isolate()->factory()->WeakMap_string(),
- JSWeakMap::kTableOffset);
+ GenerateConstructor(kWeakMap, isolate()->factory()->WeakMap_string());
}
TF_BUILTIN(WeakSetConstructor, WeakCollectionsBuiltinsAssembler) {
- GenerateConstructor(kSet, Context::JS_WEAK_SET_FUN_INDEX,
- isolate()->factory()->WeakSet_string(),
- JSWeakSet::kTableOffset);
+ GenerateConstructor(kWeakSet, isolate()->factory()->WeakSet_string());
}
TF_BUILTIN(WeakMapLookupHashIndex, WeakCollectionsBuiltinsAssembler) {
@@ -2342,8 +2475,8 @@ TF_BUILTIN(WeakCollectionSet, WeakCollectionsBuiltinsAssembler) {
TNode<IntPtrT> entry_mask = EntryMask(capacity);
TVARIABLE(IntPtrT, var_hash, LoadJSReceiverIdentityHash(key, &if_no_hash));
- TNode<IntPtrT> key_index =
- FindKeyIndexForKey(table, key, var_hash, entry_mask, &if_not_found);
+ TNode<IntPtrT> key_index = FindKeyIndexForKey(table, key, var_hash.value(),
+ entry_mask, &if_not_found);
StoreFixedArrayElement(table, ValueIndexFromKeyIndex(key_index), value);
Return(collection);
@@ -2365,14 +2498,14 @@ TF_BUILTIN(WeakCollectionSet, WeakCollectionsBuiltinsAssembler) {
&call_runtime);
TNode<IntPtrT> insertion_key_index =
- FindKeyIndexForInsertion(table, var_hash, entry_mask);
+ FindKeyIndexForInsertion(table, var_hash.value(), entry_mask);
AddEntry(table, insertion_key_index, key, value, number_of_elements);
Return(collection);
}
BIND(&call_runtime);
{
CallRuntime(Runtime::kWeakCollectionSet, context, collection, key, value,
- SmiTag(var_hash));
+ SmiTag(var_hash.value()));
Return(collection);
}
}
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc
index 5c3883a870..945fb4394b 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.cc
+++ b/deps/v8/src/builtins/builtins-constructor-gen.cc
@@ -55,21 +55,54 @@ TF_BUILTIN(ConstructWithSpread, CallOrConstructBuiltinsAssembler) {
typedef compiler::Node Node;
-Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
- Node* feedback_vector,
- Node* slot,
- Node* context) {
- Isolate* isolate = this->isolate();
- Factory* factory = isolate->factory();
- IncrementCounter(isolate->counters()->fast_new_closure_total(), 1);
-
- Node* compiler_hints =
- LoadObjectField(shared_info, SharedFunctionInfo::kCompilerHintsOffset,
- MachineType::Uint32());
+Node* ConstructorBuiltinsAssembler::NotHasBoilerplate(Node* literal_site) {
+ return TaggedIsSmi(literal_site);
+}
+
+Node* ConstructorBuiltinsAssembler::LoadAllocationSiteBoilerplate(Node* site) {
+ CSA_ASSERT(this, IsAllocationSite(site));
+ return LoadObjectField(site,
+ AllocationSite::kTransitionInfoOrBoilerplateOffset);
+}
+
+TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
+ Node* shared_function_info = Parameter(Descriptor::kSharedFunctionInfo);
+ Node* feedback_cell = Parameter(Descriptor::kFeedbackCell);
+ Node* context = Parameter(Descriptor::kContext);
+
+ CSA_ASSERT(this, IsFeedbackCell(feedback_cell));
+ CSA_ASSERT(this, IsSharedFunctionInfo(shared_function_info));
+
+ IncrementCounter(isolate()->counters()->fast_new_closure_total(), 1);
+
+ // Bump the closure counter encoded the {feedback_cell}s map.
+ {
+ Node* const feedback_cell_map = LoadMap(feedback_cell);
+ Label no_closures(this), one_closure(this), cell_done(this);
+
+ GotoIf(IsNoClosuresCellMap(feedback_cell_map), &no_closures);
+ GotoIf(IsOneClosureCellMap(feedback_cell_map), &one_closure);
+ CSA_ASSERT(this, IsManyClosuresCellMap(feedback_cell_map),
+ feedback_cell_map, feedback_cell);
+ Goto(&cell_done);
+
+ BIND(&no_closures);
+ StoreMapNoWriteBarrier(feedback_cell, Heap::kOneClosureCellMapRootIndex);
+ Goto(&cell_done);
+
+ BIND(&one_closure);
+ StoreMapNoWriteBarrier(feedback_cell, Heap::kManyClosuresCellMapRootIndex);
+ Goto(&cell_done);
+
+ BIND(&cell_done);
+ }
// The calculation of |function_map_index| must be in sync with
// SharedFunctionInfo::function_map_index().
- Node* function_map_index =
+ Node* const compiler_hints = LoadObjectField(
+ shared_function_info, SharedFunctionInfo::kCompilerHintsOffset,
+ MachineType::Uint32());
+ Node* const function_map_index =
IntPtrAdd(DecodeWordFromWord32<SharedFunctionInfo::FunctionMapIndexBits>(
compiler_hints),
IntPtrConstant(Context::FIRST_FUNCTION_MAP_INDEX));
@@ -79,24 +112,24 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
// Get the function map in the current native context and set that
// as the map of the allocated object.
- Node* native_context = LoadNativeContext(context);
- Node* function_map = LoadContextElement(native_context, function_map_index);
+ Node* const native_context = LoadNativeContext(context);
+ Node* const function_map =
+ LoadContextElement(native_context, function_map_index);
// Create a new closure from the given function info in new space
Node* instance_size_in_bytes =
TimesPointerSize(LoadMapInstanceSizeInWords(function_map));
- Node* result = Allocate(instance_size_in_bytes);
+ Node* const result = Allocate(instance_size_in_bytes);
StoreMapNoWriteBarrier(result, function_map);
InitializeJSObjectBodyNoSlackTracking(result, function_map,
instance_size_in_bytes,
JSFunction::kSizeWithoutPrototype);
// Initialize the rest of the function.
- Node* empty_fixed_array = HeapConstant(factory->empty_fixed_array());
- StoreObjectFieldNoWriteBarrier(result, JSObject::kPropertiesOrHashOffset,
- empty_fixed_array);
- StoreObjectFieldNoWriteBarrier(result, JSObject::kElementsOffset,
- empty_fixed_array);
+ StoreObjectFieldRoot(result, JSObject::kPropertiesOrHashOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldRoot(result, JSObject::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
{
// Set function prototype if necessary.
Label done(this), init_prototype(this);
@@ -104,65 +137,23 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
&done);
BIND(&init_prototype);
- StoreObjectFieldNoWriteBarrier(
- result, JSFunction::kPrototypeOrInitialMapOffset, TheHoleConstant());
+ StoreObjectFieldRoot(result, JSFunction::kPrototypeOrInitialMapOffset,
+ Heap::kTheHoleValueRootIndex);
Goto(&done);
-
BIND(&done);
}
- Node* literals_cell = LoadFeedbackVectorSlot(
- feedback_vector, slot, 0, CodeStubAssembler::SMI_PARAMETERS);
- {
- // Bump the closure counter encoded in the cell's map.
- Node* cell_map = LoadMap(literals_cell);
- Label no_closures(this), one_closure(this), cell_done(this);
-
- GotoIf(IsNoClosuresCellMap(cell_map), &no_closures);
- GotoIf(IsOneClosureCellMap(cell_map), &one_closure);
- CSA_ASSERT(this, IsManyClosuresCellMap(cell_map), cell_map, literals_cell,
- feedback_vector, slot);
- Goto(&cell_done);
-
- BIND(&no_closures);
- StoreMapNoWriteBarrier(literals_cell, Heap::kOneClosureCellMapRootIndex);
- Goto(&cell_done);
-
- BIND(&one_closure);
- StoreMapNoWriteBarrier(literals_cell, Heap::kManyClosuresCellMapRootIndex);
- Goto(&cell_done);
-
- BIND(&cell_done);
- }
STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
- StoreObjectFieldNoWriteBarrier(result, JSFunction::kFeedbackVectorOffset,
- literals_cell);
+ StoreObjectFieldNoWriteBarrier(result, JSFunction::kFeedbackCellOffset,
+ feedback_cell);
StoreObjectFieldNoWriteBarrier(result, JSFunction::kSharedFunctionInfoOffset,
- shared_info);
+ shared_function_info);
StoreObjectFieldNoWriteBarrier(result, JSFunction::kContextOffset, context);
Handle<Code> lazy_builtin_handle(
- isolate->builtins()->builtin(Builtins::kCompileLazy));
+ isolate()->builtins()->builtin(Builtins::kCompileLazy));
Node* lazy_builtin = HeapConstant(lazy_builtin_handle);
StoreObjectFieldNoWriteBarrier(result, JSFunction::kCodeOffset, lazy_builtin);
- return result;
-}
-
-Node* ConstructorBuiltinsAssembler::NotHasBoilerplate(Node* literal_site) {
- return TaggedIsSmi(literal_site);
-}
-
-Node* ConstructorBuiltinsAssembler::LoadAllocationSiteBoilerplate(Node* site) {
- CSA_ASSERT(this, IsAllocationSite(site));
- return LoadObjectField(site,
- AllocationSite::kTransitionInfoOrBoilerplateOffset);
-}
-
-TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
- Node* shared = Parameter(FastNewClosureDescriptor::kSharedFunctionInfo);
- Node* context = Parameter(FastNewClosureDescriptor::kContext);
- Node* vector = Parameter(FastNewClosureDescriptor::kVector);
- Node* slot = Parameter(FastNewClosureDescriptor::kSlot);
- Return(EmitFastNewClosure(shared, vector, slot, context));
+ Return(result);
}
TF_BUILTIN(FastNewObject, ConstructorBuiltinsAssembler) {
@@ -418,7 +409,7 @@ Node* ConstructorBuiltinsAssembler::EmitCreateEmptyArrayLiteral(
BIND(&create_empty_array);
CSA_ASSERT(this, IsAllocationSite(allocation_site.value()));
- Node* kind = SmiToWord32(CAST(
+ Node* kind = SmiToInt32(CAST(
LoadObjectField(allocation_site.value(),
AllocationSite::kTransitionInfoOrBoilerplateOffset)));
CSA_ASSERT(this, IsFastElementsKind(kind));
@@ -662,7 +653,7 @@ TF_BUILTIN(ObjectConstructor, ConstructorBuiltinsAssembler) {
args.PopAndReturn(EmitCreateEmptyObjectLiteral(context));
BIND(&return_to_object);
- args.PopAndReturn(CallBuiltin(Builtins::kToObject, context, value));
+ args.PopAndReturn(ToObject(context, value));
}
TF_BUILTIN(ObjectConstructor_ConstructStub, ConstructorBuiltinsAssembler) {
@@ -687,7 +678,7 @@ TF_BUILTIN(ObjectConstructor_ConstructStub, ConstructorBuiltinsAssembler) {
args.PopAndReturn(EmitFastNewObject(context, target, new_target));
BIND(&return_to_object);
- args.PopAndReturn(CallBuiltin(Builtins::kToObject, context, value));
+ args.PopAndReturn(ToObject(context, value));
}
TF_BUILTIN(NumberConstructor, ConstructorBuiltinsAssembler) {
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.h b/deps/v8/src/builtins/builtins-constructor-gen.h
index ac13dcbb6d..f6d71882bc 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.h
+++ b/deps/v8/src/builtins/builtins-constructor-gen.h
@@ -15,8 +15,6 @@ class ConstructorBuiltinsAssembler : public CodeStubAssembler {
explicit ConstructorBuiltinsAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
- Node* EmitFastNewClosure(Node* shared_info, Node* feedback_vector, Node* slot,
- Node* context);
Node* EmitFastNewFunctionContext(Node* closure, Node* slots, Node* context,
ScopeType scope_type);
diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc
index 98e0f2c8b2..dc3e8d53c4 100644
--- a/deps/v8/src/builtins/builtins-conversion-gen.cc
+++ b/deps/v8/src/builtins/builtins-conversion-gen.cc
@@ -62,7 +62,7 @@ void ConversionBuiltinsAssembler::Generate_NonPrimitiveToPrimitive(
BIND(&if_resultisnotprimitive);
{
// Somehow the @@toPrimitive method on {input} didn't yield a primitive.
- TailCallRuntime(Runtime::kThrowCannotConvertToPrimitive, context);
+ ThrowTypeError(context, MessageTemplate::kCannotConvertToPrimitive);
}
}
@@ -99,7 +99,7 @@ TF_BUILTIN(NonPrimitiveToPrimitive_String, ConversionBuiltinsAssembler) {
}
TF_BUILTIN(StringToNumber, CodeStubAssembler) {
- Node* input = Parameter(Descriptor::kArgument);
+ TNode<String> input = CAST(Parameter(Descriptor::kArgument));
Return(StringToNumber(input));
}
@@ -144,7 +144,7 @@ TF_BUILTIN(ToNumber, CodeStubAssembler) {
// ES section #sec-tostring-applied-to-the-number-type
TF_BUILTIN(NumberToString, CodeStubAssembler) {
- Node* input = Parameter(Descriptor::kArgument);
+ TNode<Number> input = CAST(Parameter(Descriptor::kArgument));
Return(NumberToString(input));
}
@@ -208,7 +208,7 @@ void ConversionBuiltinsAssembler::Generate_OrdinaryToPrimitive(
BIND(&if_methodisnotcallable);
}
- TailCallRuntime(Runtime::kThrowCannotConvertToPrimitive, context);
+ ThrowTypeError(context, MessageTemplate::kCannotConvertToPrimitive);
BIND(&return_result);
Return(var_result.value());
@@ -383,20 +383,13 @@ TF_BUILTIN(ToObject, CodeStubAssembler) {
Return(js_value);
BIND(&if_noconstructor);
- TailCallRuntime(Runtime::kThrowUndefinedOrNullToObject, context,
- StringConstant("ToObject"));
+ ThrowTypeError(context, MessageTemplate::kUndefinedOrNullToObject,
+ "ToObject");
BIND(&if_jsreceiver);
Return(object);
}
-// Deprecated ES5 [[Class]] internal property (used to implement %_ClassOf).
-TF_BUILTIN(ClassOf, CodeStubAssembler) {
- Node* object = Parameter(TypeofDescriptor::kObject);
-
- Return(ClassOf(object));
-}
-
// ES6 section 12.5.5 typeof operator
TF_BUILTIN(Typeof, CodeStubAssembler) {
Node* object = Parameter(TypeofDescriptor::kObject);
diff --git a/deps/v8/src/builtins/builtins-dataview.cc b/deps/v8/src/builtins/builtins-dataview.cc
index df7058d377..38b3d90649 100644
--- a/deps/v8/src/builtins/builtins-dataview.cc
+++ b/deps/v8/src/builtins/builtins-dataview.cc
@@ -157,6 +157,21 @@ void FlipBytes(uint8_t* target, uint8_t const* source) {
}
}
+template <typename T>
+MaybeHandle<Object> AllocateResult(Isolate* isolate, T value) {
+ return isolate->factory()->NewNumber(value);
+}
+
+template <>
+MaybeHandle<Object> AllocateResult(Isolate* isolate, int64_t value) {
+ return BigInt::FromInt64(isolate, value);
+}
+
+template <>
+MaybeHandle<Object> AllocateResult(Isolate* isolate, uint64_t value) {
+ return BigInt::FromUint64(isolate, value);
+}
+
// ES6 section 24.2.1.1 GetViewValue (view, requestIndex, isLittleEndian, type)
template <typename T>
MaybeHandle<Object> GetViewValue(Isolate* isolate, Handle<JSDataView> data_view,
@@ -196,50 +211,78 @@ MaybeHandle<Object> GetViewValue(Isolate* isolate, Handle<JSDataView> data_view,
} else {
CopyBytes<sizeof(T)>(v.bytes, source);
}
- return isolate->factory()->NewNumber(v.data);
+ return AllocateResult<T>(isolate, v.data);
+}
+
+template <typename T>
+MaybeHandle<Object> DataViewConvertInput(Isolate* isolate,
+ Handle<Object> input) {
+ return Object::ToNumber(input);
+}
+
+template <>
+MaybeHandle<Object> DataViewConvertInput<int64_t>(Isolate* isolate,
+ Handle<Object> input) {
+ return BigInt::FromObject(isolate, input);
+}
+
+template <>
+MaybeHandle<Object> DataViewConvertInput<uint64_t>(Isolate* isolate,
+ Handle<Object> input) {
+ return BigInt::FromObject(isolate, input);
}
template <typename T>
-T DataViewConvertValue(double value);
+T DataViewConvertValue(Handle<Object> value);
+
+template <>
+int8_t DataViewConvertValue<int8_t>(Handle<Object> value) {
+ return static_cast<int8_t>(DoubleToInt32(value->Number()));
+}
+
+template <>
+int16_t DataViewConvertValue<int16_t>(Handle<Object> value) {
+ return static_cast<int16_t>(DoubleToInt32(value->Number()));
+}
template <>
-int8_t DataViewConvertValue<int8_t>(double value) {
- return static_cast<int8_t>(DoubleToInt32(value));
+int32_t DataViewConvertValue<int32_t>(Handle<Object> value) {
+ return DoubleToInt32(value->Number());
}
template <>
-int16_t DataViewConvertValue<int16_t>(double value) {
- return static_cast<int16_t>(DoubleToInt32(value));
+uint8_t DataViewConvertValue<uint8_t>(Handle<Object> value) {
+ return static_cast<uint8_t>(DoubleToUint32(value->Number()));
}
template <>
-int32_t DataViewConvertValue<int32_t>(double value) {
- return DoubleToInt32(value);
+uint16_t DataViewConvertValue<uint16_t>(Handle<Object> value) {
+ return static_cast<uint16_t>(DoubleToUint32(value->Number()));
}
template <>
-uint8_t DataViewConvertValue<uint8_t>(double value) {
- return static_cast<uint8_t>(DoubleToUint32(value));
+uint32_t DataViewConvertValue<uint32_t>(Handle<Object> value) {
+ return DoubleToUint32(value->Number());
}
template <>
-uint16_t DataViewConvertValue<uint16_t>(double value) {
- return static_cast<uint16_t>(DoubleToUint32(value));
+float DataViewConvertValue<float>(Handle<Object> value) {
+ return static_cast<float>(value->Number());
}
template <>
-uint32_t DataViewConvertValue<uint32_t>(double value) {
- return DoubleToUint32(value);
+double DataViewConvertValue<double>(Handle<Object> value) {
+ return value->Number();
}
template <>
-float DataViewConvertValue<float>(double value) {
- return static_cast<float>(value);
+int64_t DataViewConvertValue<int64_t>(Handle<Object> value) {
+ return BigInt::cast(*value)->AsInt64();
}
template <>
-double DataViewConvertValue<double>(double value) {
- return value;
+uint64_t DataViewConvertValue<uint64_t>(Handle<Object> value) {
+ return BigInt::cast(*value)->AsUint64();
}
// ES6 section 24.2.1.2 SetViewValue (view, requestIndex, isLittleEndian, type,
@@ -253,7 +296,8 @@ MaybeHandle<Object> SetViewValue(Isolate* isolate, Handle<JSDataView> data_view,
Object::ToIndex(isolate, request_index,
MessageTemplate::kInvalidDataViewAccessorOffset),
Object);
- ASSIGN_RETURN_ON_EXCEPTION(isolate, value, Object::ToNumber(value), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, value,
+ DataViewConvertInput<T>(isolate, value), Object);
size_t get_index = 0;
if (!TryNumberToSize(*request_index, &get_index)) {
THROW_NEW_ERROR(
@@ -274,7 +318,7 @@ MaybeHandle<Object> SetViewValue(Isolate* isolate, Handle<JSDataView> data_view,
T data;
uint8_t bytes[sizeof(T)];
} v;
- v.data = DataViewConvertValue<T>(value->Number());
+ v.data = DataViewConvertValue<T>(value);
size_t const buffer_offset = data_view_byte_offset + get_index;
DCHECK(NumberToSize(buffer->byte_length()) >= buffer_offset + sizeof(T));
uint8_t* const target =
@@ -310,6 +354,8 @@ DATA_VIEW_PROTOTYPE_GET(Int32, int32_t)
DATA_VIEW_PROTOTYPE_GET(Uint32, uint32_t)
DATA_VIEW_PROTOTYPE_GET(Float32, float)
DATA_VIEW_PROTOTYPE_GET(Float64, double)
+DATA_VIEW_PROTOTYPE_GET(BigInt64, int64_t)
+DATA_VIEW_PROTOTYPE_GET(BigUint64, uint64_t)
#undef DATA_VIEW_PROTOTYPE_GET
#define DATA_VIEW_PROTOTYPE_SET(Type, type) \
@@ -334,6 +380,8 @@ DATA_VIEW_PROTOTYPE_SET(Int32, int32_t)
DATA_VIEW_PROTOTYPE_SET(Uint32, uint32_t)
DATA_VIEW_PROTOTYPE_SET(Float32, float)
DATA_VIEW_PROTOTYPE_SET(Float64, double)
+DATA_VIEW_PROTOTYPE_SET(BigInt64, int64_t)
+DATA_VIEW_PROTOTYPE_SET(BigUint64, uint64_t)
#undef DATA_VIEW_PROTOTYPE_SET
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-date-gen.cc b/deps/v8/src/builtins/builtins-date-gen.cc
index f6f3563d55..8b58c1ec80 100644
--- a/deps/v8/src/builtins/builtins-date-gen.cc
+++ b/deps/v8/src/builtins/builtins-date-gen.cc
@@ -61,10 +61,7 @@ void DateBuiltinsAssembler::Generate_DatePrototype_GetField(Node* context,
// Raise a TypeError if the receiver is not a date.
BIND(&receiver_not_date);
- {
- CallRuntime(Runtime::kThrowNotDateError, context);
- Unreachable();
- }
+ { ThrowTypeError(context, MessageTemplate::kNotDateObject); }
}
TF_BUILTIN(DatePrototypeGetDate, DateBuiltinsAssembler) {
@@ -240,17 +237,14 @@ TF_BUILTIN(DatePrototypeToPrimitive, CodeStubAssembler) {
// Raise a TypeError if the {hint} is invalid.
BIND(&hint_is_invalid);
- {
- CallRuntime(Runtime::kThrowInvalidHint, context, hint);
- Unreachable();
- }
+ { ThrowTypeError(context, MessageTemplate::kInvalidHint, hint); }
// Raise a TypeError if the {receiver} is not a JSReceiver instance.
BIND(&receiver_is_invalid);
{
- CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
- StringConstant("Date.prototype [ @@toPrimitive ]"), receiver);
- Unreachable();
+ ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
+ StringConstant("Date.prototype [ @@toPrimitive ]"),
+ receiver);
}
}
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index a4a0bb9e2c..bf5b9086aa 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -70,7 +70,7 @@ namespace internal {
ASM(JSConstructStubGenericUnrestrictedReturn) \
ASM(JSBuiltinsConstructStub) \
TFC(FastNewObject, FastNewObject, 1) \
- TFC(FastNewClosure, FastNewClosure, 1) \
+ TFS(FastNewClosure, kSharedFunctionInfo, kFeedbackCell) \
TFC(FastNewFunctionContextEval, FastNewFunctionContext, 1) \
TFC(FastNewFunctionContextFunction, FastNewFunctionContext, 1) \
TFS(CreateRegExpLiteral, kFeedbackVector, kSlot, kPattern, kFlags) \
@@ -92,8 +92,8 @@ namespace internal {
\
/* String helpers */ \
TFC(StringCharAt, StringAt, 1) \
- TFC(StringCharCodeAt, StringAt, 1) \
- TFC(StringCodePointAt, StringAt, 1) \
+ TFC(StringCodePointAtUTF16, StringAt, 1) \
+ TFC(StringCodePointAtUTF32, StringAt, 1) \
TFC(StringEqual, Compare, 1) \
TFC(StringGreaterThan, Compare, 1) \
TFC(StringGreaterThanOrEqual, Compare, 1) \
@@ -101,7 +101,7 @@ namespace internal {
TFC(StringLessThan, Compare, 1) \
TFC(StringLessThanOrEqual, Compare, 1) \
TFS(StringRepeat, kString, kCount) \
- TFS(SubString, kString, kFrom, kTo) \
+ TFC(StringSubstring, StringSubstring, 1) \
\
/* OrderedHashTable helpers */ \
TFS(OrderedHashTableHealIndex, kTable, kIndex) \
@@ -193,7 +193,6 @@ namespace internal {
TFC(ToInteger, TypeConversion, 1) \
TFC(ToInteger_TruncateMinusZero, TypeConversion, 1) \
TFC(ToLength, TypeConversion, 1) \
- TFC(ClassOf, Typeof, 1) \
TFC(Typeof, Typeof, 1) \
TFC(GetSuperConstructor, Typeof, 1) \
\
@@ -216,14 +215,9 @@ namespace internal {
TFH(StoreGlobalIC_Slow, StoreWithVector) \
TFH(StoreIC_Uninitialized, StoreWithVector) \
\
- /* Promise helpers */ \
- TFS(ResolveNativePromise, kPromise, kValue) \
- TFS(RejectNativePromise, kPromise, kValue, kDebugEvent) \
- TFS(PerformNativePromiseThen, kPromise, kResolveReaction, kRejectReaction, \
- kResultPromise) \
+ /* Microtask helpers */ \
TFS(EnqueueMicrotask, kMicrotask) \
TFC(RunMicrotasks, RunMicrotasks, 1) \
- TFS(PromiseResolveThenableJob, kMicrotask) \
\
/* Object property helpers */ \
TFS(HasProperty, kKey, kObject) \
@@ -247,6 +241,10 @@ namespace internal {
CPP(ArrayConcat) \
/* ES6 #sec-array.isarray */ \
TFJ(ArrayIsArray, 1, kArg) \
+ /* ES6 #sec-array.from */ \
+ TFJ(ArrayFrom, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES6 #sec-array.of */ \
+ TFJ(ArrayOf, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES7 #sec-array.prototype.includes */ \
TFJ(ArrayIncludes, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.indexof */ \
@@ -313,6 +311,7 @@ namespace internal {
/* ES6 #sec-array.prototype.reduce */ \
TFS(ArrayReduceLoopContinuation, kReceiver, kCallbackFn, kThisArg, \
kAccumulator, kObject, kInitialK, kLength, kTo) \
+ TFJ(ArrayReducePreLoopEagerDeoptContinuation, 2, kCallbackFn, kLength) \
TFJ(ArrayReduceLoopEagerDeoptContinuation, 4, kCallbackFn, kInitialK, \
kLength, kAccumulator) \
TFJ(ArrayReduceLoopLazyDeoptContinuation, 4, kCallbackFn, kInitialK, \
@@ -321,6 +320,7 @@ namespace internal {
/* ES6 #sec-array.prototype.reduceRight */ \
TFS(ArrayReduceRightLoopContinuation, kReceiver, kCallbackFn, kThisArg, \
kAccumulator, kObject, kInitialK, kLength, kTo) \
+ TFJ(ArrayReduceRightPreLoopEagerDeoptContinuation, 2, kCallbackFn, kLength) \
TFJ(ArrayReduceRightLoopEagerDeoptContinuation, 4, kCallbackFn, kInitialK, \
kLength, kAccumulator) \
TFJ(ArrayReduceRightLoopLazyDeoptContinuation, 4, kCallbackFn, kInitialK, \
@@ -365,17 +365,16 @@ namespace internal {
CPP(ArrayBufferPrototypeSlice) \
\
/* AsyncFunction */ \
- TFJ(AsyncFunctionAwaitCaught, 3, kGenerator, kAwaited, kOuterPromise) \
- TFJ(AsyncFunctionAwaitUncaught, 3, kGenerator, kAwaited, kOuterPromise) \
- TFJ(AsyncFunctionAwaitRejectClosure, 1, kSentError) \
- TFJ(AsyncFunctionAwaitResolveClosure, 1, kSentValue) \
+ TFC(AsyncFunctionAwaitFulfill, PromiseReactionHandler, 1) \
+ TFC(AsyncFunctionAwaitReject, PromiseReactionHandler, 1) \
+ TFS(AsyncFunctionAwaitCaught, kGenerator, kValue, kOuterPromise) \
+ TFS(AsyncFunctionAwaitUncaught, kGenerator, kValue, kOuterPromise) \
TFJ(AsyncFunctionPromiseCreate, 0) \
TFJ(AsyncFunctionPromiseRelease, 1, kPromise) \
\
/* BigInt */ \
CPP(BigIntConstructor) \
CPP(BigIntConstructor_ConstructStub) \
- CPP(BigIntParseInt) \
CPP(BigIntAsUintN) \
CPP(BigIntAsIntN) \
CPP(BigIntPrototypeToLocaleString) \
@@ -457,6 +456,10 @@ namespace internal {
CPP(DataViewPrototypeSetFloat32) \
CPP(DataViewPrototypeGetFloat64) \
CPP(DataViewPrototypeSetFloat64) \
+ CPP(DataViewPrototypeGetBigInt64) \
+ CPP(DataViewPrototypeSetBigInt64) \
+ CPP(DataViewPrototypeGetBigUint64) \
+ CPP(DataViewPrototypeSetBigUint64) \
\
/* Date */ \
CPP(DateConstructor) \
@@ -755,7 +758,7 @@ namespace internal {
CPP(ObjectDefineProperties) \
CPP(ObjectDefineProperty) \
CPP(ObjectDefineSetter) \
- CPP(ObjectEntries) \
+ TFJ(ObjectEntries, 1, kObject) \
CPP(ObjectFreeze) \
TFJ(ObjectGetOwnPropertyDescriptor, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
@@ -785,7 +788,7 @@ namespace internal {
/* ES #sec-object.prototype.tolocalestring */ \
TFJ(ObjectPrototypeToLocaleString, 0) \
CPP(ObjectSeal) \
- CPP(ObjectValues) \
+ TFJ(ObjectValues, 1, kObject) \
\
/* instanceof */ \
TFC(OrdinaryHasInstance, Compare, 1) \
@@ -796,36 +799,42 @@ namespace internal {
TFS(ForInFilter, kKey, kObject) \
\
/* Promise */ \
+ /* ES #sec-fulfillpromise */ \
+ TFS(FulfillPromise, kPromise, kValue) \
+ /* ES #sec-rejectpromise */ \
+ TFS(RejectPromise, kPromise, kReason, kDebugEvent) \
+ /* ES #sec-promise-resolve-functions */ \
+ /* Starting at step 6 of "Promise Resolve Functions" */ \
+ TFS(ResolvePromise, kPromise, kResolution) \
+ /* ES #sec-promise-reject-functions */ \
+ TFJ(PromiseCapabilityDefaultReject, 1, kReason) \
+ /* ES #sec-promise-resolve-functions */ \
+ TFJ(PromiseCapabilityDefaultResolve, 1, kResolution) \
/* ES6 #sec-getcapabilitiesexecutor-functions */ \
TFJ(PromiseGetCapabilitiesExecutor, 2, kResolve, kReject) \
/* ES6 #sec-newpromisecapability */ \
- TFJ(NewPromiseCapability, 2, kConstructor, kDebugEvent) \
+ TFS(NewPromiseCapability, kConstructor, kDebugEvent) \
+ TFJ(PromiseConstructorLazyDeoptContinuation, 2, kPromise, kResult) \
/* ES6 #sec-promise-executor */ \
TFJ(PromiseConstructor, 1, kExecutor) \
- TFJ(PromiseInternalConstructor, 1, kParent) \
CPP(IsPromise) \
- /* ES #sec-promise-resolve-functions */ \
- TFJ(PromiseResolveClosure, 1, kValue) \
- /* ES #sec-promise-reject-functions */ \
- TFJ(PromiseRejectClosure, 1, kValue) \
- TFJ(PromiseAllResolveElementClosure, 1, kValue) \
/* ES #sec-promise.prototype.then */ \
- TFJ(PromisePrototypeThen, 2, kOnFullfilled, kOnRejected) \
+ TFJ(PromisePrototypeThen, 2, kOnFulfilled, kOnRejected) \
+ /* ES #sec-performpromisethen */ \
+ TFS(PerformPromiseThen, kPromise, kOnFulfilled, kOnRejected, kResultPromise) \
/* ES #sec-promise.prototype.catch */ \
TFJ(PromisePrototypeCatch, 1, kOnRejected) \
- /* ES #sec-fulfillpromise */ \
- TFJ(ResolvePromise, 2, kPromise, kValue) \
- TFS(PromiseHandleReject, kPromise, kOnReject, kException) \
- TFS(PromiseHandle, kValue, kHandler, kDeferredPromise, kDeferredOnResolve, \
- kDeferredOnReject) \
- TFJ(PromiseHandleJS, 5, kValue, kHandler, kDeferredPromise, \
- kDeferredOnResolve, kDeferredOnReject) \
+ /* ES #sec-promisereactionjob */ \
+ TFS(PromiseRejectReactionJob, kReason, kHandler, kPayload) \
+ TFS(PromiseFulfillReactionJob, kValue, kHandler, kPayload) \
+ /* ES #sec-promiseresolvethenablejob */ \
+ TFS(PromiseResolveThenableJob, kPromiseToResolve, kThenable, kThen) \
/* ES #sec-promise.resolve */ \
- TFJ(PromiseResolveWrapper, 1, kValue) \
+ TFJ(PromiseResolveTrampoline, 1, kValue) \
+ /* ES #sec-promise-resolve */ \
TFS(PromiseResolve, kConstructor, kValue) \
/* ES #sec-promise.reject */ \
TFJ(PromiseReject, 1, kReason) \
- TFJ(InternalPromiseReject, 3, kPromise, kReason, kDebugEvent) \
TFJ(PromisePrototypeFinally, 1, kOnFinally) \
TFJ(PromiseThenFinally, 1, kValue) \
TFJ(PromiseCatchFinally, 1, kReason) \
@@ -833,8 +842,15 @@ namespace internal {
TFJ(PromiseThrowerFinally, 0) \
/* ES #sec-promise.all */ \
TFJ(PromiseAll, 1, kIterable) \
+ TFJ(PromiseAllResolveElementClosure, 1, kValue) \
/* ES #sec-promise.race */ \
TFJ(PromiseRace, 1, kIterable) \
+ /* V8 Extras: v8.createPromise(parent) */ \
+ TFJ(PromiseInternalConstructor, 1, kParent) \
+ /* V8 Extras: v8.rejectPromise(promise, reason) */ \
+ TFJ(PromiseInternalReject, 2, kPromise, kReason) \
+ /* V8 Extras: v8.resolvePromise(promise, resolution) */ \
+ TFJ(PromiseInternalResolve, 2, kPromise, kResolution) \
\
/* Proxy */ \
TFJ(ProxyConstructor, 0) \
@@ -1032,9 +1048,8 @@ namespace internal {
/* ES6 #sec-string.prototype.tostring */ \
TFJ(StringPrototypeToString, 0) \
TFJ(StringPrototypeTrim, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- TFJ(StringPrototypeTrimLeft, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- TFJ(StringPrototypeTrimRight, \
+ TFJ(StringPrototypeTrimEnd, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(StringPrototypeTrimStart, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.valueof */ \
TFJ(StringPrototypeValueOf, 0) \
@@ -1062,16 +1077,13 @@ namespace internal {
TFJ(SymbolPrototypeValueOf, 0) \
\
/* TypedArray */ \
+ TFS(IterableToList, kIterable, kIteratorFn) \
TFS(TypedArrayInitialize, kHolder, kLength, kElementSize, kInitialize) \
TFS(TypedArrayInitializeWithBuffer, kHolder, kLength, kBuffer, kElementSize, \
kByteOffset) \
- /* ES6 #sec-typedarray-buffer-byteoffset-length */ \
- TFJ(TypedArrayConstructByArrayBuffer, 5, kHolder, kBuffer, kByteOffset, \
- kLength, kElementSize) \
- TFJ(TypedArrayConstructByArrayLike, 4, kHolder, kArrayLike, kLength, \
- kElementSize) \
- /* ES6 #sec-typedarray-length */ \
- TFJ(TypedArrayConstructByLength, 3, kHolder, kLength, kElementSize) \
+ TFJ(TypedArrayConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(TypedArrayConstructor_ConstructStub, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
CPP(TypedArrayPrototypeBuffer) \
/* ES6 #sec-get-%typedarray%.prototype.bytelength */ \
TFJ(TypedArrayPrototypeByteLength, 0) \
@@ -1089,6 +1101,9 @@ namespace internal {
CPP(TypedArrayPrototypeCopyWithin) \
/* ES6 #sec-%typedarray%.prototype.fill */ \
CPP(TypedArrayPrototypeFill) \
+ /* ES6 #sec-%typedarray%.prototype.filter */ \
+ TFJ(TypedArrayPrototypeFilter, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 %TypedArray%.prototype.find */ \
TFJ(TypedArrayPrototypeFind, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
@@ -1106,7 +1121,11 @@ namespace internal {
/* ES6 %TypedArray%.prototype.set */ \
TFJ(TypedArrayPrototypeSet, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-%typedarray%.prototype.slice */ \
- CPP(TypedArrayPrototypeSlice) \
+ TFJ(TypedArrayPrototypeSlice, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES6 %TypedArray%.prototype.subarray */ \
+ TFJ(TypedArrayPrototypeSubArray, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-get-%typedarray%.prototype-@@tostringtag */ \
TFJ(TypedArrayPrototypeToStringTag, 0) \
/* ES6 %TypedArray%.prototype.every */ \
@@ -1126,6 +1145,10 @@ namespace internal {
/* ES6 %TypedArray%.prototype.forEach */ \
TFJ(TypedArrayPrototypeForEach, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES6 %TypedArray%.of */ \
+ TFJ(TypedArrayOf, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES6 %TypedArray%.from */ \
+ TFJ(TypedArrayFrom, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
\
/* Wasm */ \
ASM(WasmCompileLazy) \
@@ -1159,6 +1182,17 @@ namespace internal {
\
/* AsyncGenerator */ \
\
+ /* Await (proposal-async-iteration/#await), with resume behaviour */ \
+ /* specific to Async Generators. Internal / Not exposed to JS code. */ \
+ TFS(AsyncGeneratorAwaitCaught, kGenerator, kValue) \
+ TFS(AsyncGeneratorAwaitUncaught, kGenerator, kValue) \
+ TFC(AsyncGeneratorAwaitFulfill, PromiseReactionHandler, 1) \
+ TFC(AsyncGeneratorAwaitReject, PromiseReactionHandler, 1) \
+ TFC(AsyncGeneratorYieldFulfill, PromiseReactionHandler, 1) \
+ TFC(AsyncGeneratorReturnClosedFulfill, PromiseReactionHandler, 1) \
+ TFC(AsyncGeneratorReturnClosedReject, PromiseReactionHandler, 1) \
+ TFC(AsyncGeneratorReturnFulfill, PromiseReactionHandler, 1) \
+ \
TFS(AsyncGeneratorResolve, kGenerator, kValue, kDone) \
TFS(AsyncGeneratorReject, kGenerator, kValue) \
TFS(AsyncGeneratorYield, kGenerator, kValue, kIsCaught) \
@@ -1181,17 +1215,6 @@ namespace internal {
TFJ(AsyncGeneratorPrototypeThrow, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
\
- /* Await (proposal-async-iteration/#await), with resume behaviour */ \
- /* specific to Async Generators. Internal / Not exposed to JS code. */ \
- TFJ(AsyncGeneratorAwaitCaught, 2, kGenerator, kAwaited) \
- TFJ(AsyncGeneratorAwaitUncaught, 2, kGenerator, kAwaited) \
- TFJ(AsyncGeneratorAwaitResolveClosure, 1, kValue) \
- TFJ(AsyncGeneratorAwaitRejectClosure, 1, kValue) \
- TFJ(AsyncGeneratorYieldResolveClosure, 1, kValue) \
- TFJ(AsyncGeneratorReturnClosedResolveClosure, 1, kValue) \
- TFJ(AsyncGeneratorReturnClosedRejectClosure, 1, kValue) \
- TFJ(AsyncGeneratorReturnResolveClosure, 1, kValue) \
- \
/* Async-from-Sync Iterator */ \
\
/* %AsyncFromSyncIteratorPrototype% */ \
@@ -1240,25 +1263,16 @@ namespace internal {
V(AsyncFromSyncIteratorPrototypeNext) \
V(AsyncFromSyncIteratorPrototypeReturn) \
V(AsyncFromSyncIteratorPrototypeThrow) \
- V(AsyncFunctionAwaitCaught) \
- V(AsyncFunctionAwaitUncaught) \
V(AsyncGeneratorResolve) \
- V(AsyncGeneratorAwaitCaught) \
- V(AsyncGeneratorAwaitUncaught) \
- V(PerformNativePromiseThen) \
V(PromiseAll) \
V(PromiseConstructor) \
- V(PromiseHandle) \
+ V(PromiseFulfillReactionJob) \
V(PromiseRace) \
- V(PromiseResolve) \
- V(PromiseResolveClosure) \
- V(RejectNativePromise) \
- V(ResolveNativePromise) \
V(ResolvePromise)
// The exception thrown in the following builtins are caught internally and will
// not be propagated further or re-thrown
-#define BUILTIN_EXCEPTION_CAUGHT_PREDICTION_LIST(V) V(PromiseHandleReject)
+#define BUILTIN_EXCEPTION_CAUGHT_PREDICTION_LIST(V) V(PromiseRejectReactionJob)
#define IGNORE_BUILTIN(...)
diff --git a/deps/v8/src/builtins/builtins-function.cc b/deps/v8/src/builtins/builtins-function.cc
index 771c7243ac..cc6d237af6 100644
--- a/deps/v8/src/builtins/builtins-function.cc
+++ b/deps/v8/src/builtins/builtins-function.cc
@@ -288,26 +288,22 @@ Object* DoFunctionBind(Isolate* isolate, BuiltinArguments args) {
// ES6 section 19.2.3.2 Function.prototype.bind ( thisArg, ...args )
BUILTIN(FunctionPrototypeBind) { return DoFunctionBind(isolate, args); }
-// TODO(verwaest): This is a temporary helper until the FastFunctionBind stub
-// can tailcall to the builtin directly.
-RUNTIME_FUNCTION(Runtime_FunctionBind) {
- DCHECK_EQ(2, args.length());
- Arguments* incoming = reinterpret_cast<Arguments*>(args[0]);
- // Rewrap the arguments as builtins arguments.
- int argc = incoming->length() + BuiltinArguments::kNumExtraArgsWithReceiver;
- BuiltinArguments caller_args(argc, incoming->arguments() + 1);
- return DoFunctionBind(isolate, caller_args);
-}
-
// ES6 section 19.2.3.5 Function.prototype.toString ( )
BUILTIN(FunctionPrototypeToString) {
HandleScope scope(isolate);
Handle<Object> receiver = args.receiver();
if (receiver->IsJSBoundFunction()) {
return *JSBoundFunction::ToString(Handle<JSBoundFunction>::cast(receiver));
- } else if (receiver->IsJSFunction()) {
+ }
+ if (receiver->IsJSFunction()) {
return *JSFunction::ToString(Handle<JSFunction>::cast(receiver));
}
+ // With the revised toString behavior, all callable objects are valid
+ // receivers for this method.
+ if (FLAG_harmony_function_tostring && receiver->IsJSReceiver() &&
+ JSReceiver::cast(*receiver)->map()->is_callable()) {
+ return isolate->heap()->function_native_code_string();
+ }
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kNotGeneric,
isolate->factory()->NewStringFromAsciiChecked(
diff --git a/deps/v8/src/builtins/builtins-generator-gen.cc b/deps/v8/src/builtins/builtins-generator-gen.cc
index b063b314b5..07a56c86ed 100644
--- a/deps/v8/src/builtins/builtins-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-generator-gen.cc
@@ -84,9 +84,8 @@ void GeneratorBuiltinsAssembler::GeneratorPrototypeResume(
BIND(&if_receiverisincompatible);
{
// The {receiver} is not a valid JSGeneratorObject.
- CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
- StringConstant(method_name), receiver);
- Unreachable();
+ ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
+ StringConstant(method_name), receiver);
}
BIND(&if_receiverisclosed);
@@ -110,10 +109,7 @@ void GeneratorBuiltinsAssembler::GeneratorPrototypeResume(
}
BIND(&if_receiverisrunning);
- {
- CallRuntime(Runtime::kThrowGeneratorRunning, context);
- Unreachable();
- }
+ { ThrowTypeError(context, MessageTemplate::kGeneratorRunning); }
BIND(&if_exception);
{
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index bb4b66e3a4..edc529c798 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -73,8 +73,8 @@ TF_BUILTIN(GrowFastSmiOrObjectElements, CodeStubAssembler) {
TF_BUILTIN(NewArgumentsElements, CodeStubAssembler) {
Node* frame = Parameter(Descriptor::kFrame);
- Node* length = SmiToWord(Parameter(Descriptor::kLength));
- Node* mapped_count = SmiToWord(Parameter(Descriptor::kMappedCount));
+ Node* length = SmiToIntPtr(Parameter(Descriptor::kLength));
+ Node* mapped_count = SmiToIntPtr(Parameter(Descriptor::kMappedCount));
// Check if we can allocate in new space.
ElementsKind kind = PACKED_ELEMENTS;
@@ -164,8 +164,8 @@ TF_BUILTIN(NewArgumentsElements, CodeStubAssembler) {
{
// Allocate in old space (or large object space).
TailCallRuntime(Runtime::kNewArgumentsElements, NoContextConstant(),
- BitcastWordToTagged(frame), SmiFromWord(length),
- SmiFromWord(mapped_count));
+ BitcastWordToTagged(frame), SmiFromIntPtr(length),
+ SmiFromIntPtr(mapped_count));
}
}
@@ -202,7 +202,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
Node* mask;
GetMarkBit(object, &cell, &mask);
- mask = TruncateWordToWord32(mask);
+ mask = TruncateIntPtrToInt32(mask);
Node* bits = Load(MachineType::Int32(), cell);
Node* bit_0 = Word32And(bits, mask);
@@ -239,7 +239,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
Node* cell;
Node* mask;
GetMarkBit(object, &cell, &mask);
- mask = TruncateWordToWord32(mask);
+ mask = TruncateIntPtrToInt32(mask);
// Non-white has 1 for the first bit, so we only need to check for the first
// bit.
return Word32Equal(Word32And(Load(MachineType::Int32(), cell), mask),
@@ -628,6 +628,9 @@ class InternalBuiltinsAssembler : public CodeStubAssembler {
void EnterMicrotaskContext(TNode<Context> context);
void LeaveMicrotaskContext();
+ void RunPromiseHook(Runtime::FunctionId id, TNode<Context> context,
+ SloppyTNode<HeapObject> payload);
+
TNode<Object> GetPendingException() {
auto ref = ExternalReference(kPendingExceptionAddress, isolate());
return TNode<Object>::UncheckedCast(
@@ -745,6 +748,19 @@ void InternalBuiltinsAssembler::LeaveMicrotaskContext() {
}
}
+void InternalBuiltinsAssembler::RunPromiseHook(
+ Runtime::FunctionId id, TNode<Context> context,
+ SloppyTNode<HeapObject> payload) {
+ Label hook(this, Label::kDeferred), done_hook(this);
+ Branch(IsPromiseHookEnabledOrDebugIsActive(), &hook, &done_hook);
+ BIND(&hook);
+ {
+ CallRuntime(id, context, payload);
+ Goto(&done_hook);
+ }
+ BIND(&done_hook);
+}
+
TF_BUILTIN(EnqueueMicrotask, InternalBuiltinsAssembler) {
Node* microtask = Parameter(Descriptor::kMicrotask);
@@ -812,13 +828,15 @@ TF_BUILTIN(EnqueueMicrotask, InternalBuiltinsAssembler) {
}
TF_BUILTIN(RunMicrotasks, InternalBuiltinsAssembler) {
- Label init_queue_loop(this);
+ // Load the current context from the isolate.
+ TNode<Context> current_context = GetCurrentContext();
+ Label init_queue_loop(this);
Goto(&init_queue_loop);
BIND(&init_queue_loop);
{
TVARIABLE(IntPtrT, index, IntPtrConstant(0));
- Label loop(this, &index);
+ Label loop(this, &index), loop_next(this);
TNode<IntPtrT> num_tasks = GetPendingMicrotaskCount();
ReturnIf(IntPtrEqual(num_tasks, IntPtrConstant(0)), UndefinedConstant());
@@ -830,222 +848,193 @@ TF_BUILTIN(RunMicrotasks, InternalBuiltinsAssembler) {
CSA_ASSERT(this, IntPtrGreaterThan(num_tasks, IntPtrConstant(0)));
SetPendingMicrotaskCount(IntPtrConstant(0));
- SetMicrotaskQueue(
- TNode<FixedArray>::UncheckedCast(EmptyFixedArrayConstant()));
+ SetMicrotaskQueue(EmptyFixedArrayConstant());
Goto(&loop);
BIND(&loop);
{
- TNode<HeapObject> microtask =
- TNode<HeapObject>::UncheckedCast(LoadFixedArrayElement(queue, index));
- index = IntPtrAdd(index, IntPtrConstant(1));
+ TNode<HeapObject> microtask = TNode<HeapObject>::UncheckedCast(
+ LoadFixedArrayElement(queue, index.value()));
+ index = IntPtrAdd(index.value(), IntPtrConstant(1));
CSA_ASSERT(this, TaggedIsNotSmi(microtask));
TNode<Map> microtask_map = LoadMap(microtask);
TNode<Int32T> microtask_type = LoadMapInstanceType(microtask_map);
- Label is_call_handler_info(this);
- Label is_function(this);
- Label is_promise_resolve_thenable_job(this);
- Label is_promise_reaction_job(this);
- Label is_unreachable(this);
-
- int32_t case_values[] = {TUPLE3_TYPE, // CallHandlerInfo
- JS_FUNCTION_TYPE,
- PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE,
- PROMISE_REACTION_JOB_INFO_TYPE};
-
- Label* case_labels[] = {&is_call_handler_info, &is_function,
- &is_promise_resolve_thenable_job,
- &is_promise_reaction_job};
-
+ VARIABLE(var_exception, MachineRepresentation::kTagged,
+ TheHoleConstant());
+ Label if_exception(this, Label::kDeferred);
+ Label is_callable(this), is_callback(this),
+ is_promise_fulfill_reaction_job(this),
+ is_promise_reject_reaction_job(this),
+ is_promise_resolve_thenable_job(this),
+ is_unreachable(this, Label::kDeferred);
+
+ int32_t case_values[] = {CALLABLE_TASK_TYPE, CALLBACK_TASK_TYPE,
+ PROMISE_FULFILL_REACTION_JOB_TASK_TYPE,
+ PROMISE_REJECT_REACTION_JOB_TASK_TYPE,
+ PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE};
+ Label* case_labels[] = {
+ &is_callable, &is_callback, &is_promise_fulfill_reaction_job,
+ &is_promise_reject_reaction_job, &is_promise_resolve_thenable_job};
static_assert(arraysize(case_values) == arraysize(case_labels), "");
Switch(microtask_type, &is_unreachable, case_values, case_labels,
arraysize(case_labels));
- BIND(&is_call_handler_info);
+ BIND(&is_callable);
{
- // Bailout to C++ slow path for the remainder of the loop.
- auto index_ref =
- ExternalReference(kMicrotaskQueueBailoutIndexAddress, isolate());
- auto count_ref =
- ExternalReference(kMicrotaskQueueBailoutCountAddress, isolate());
- auto rep = kIntSize == 4 ? MachineRepresentation::kWord32
- : MachineRepresentation::kWord64;
-
- // index was pre-incremented, decrement for bailout to C++.
- Node* value = IntPtrSub(index, IntPtrConstant(1));
-
- if (kPointerSize == 4) {
- DCHECK_EQ(kIntSize, 4);
- StoreNoWriteBarrier(rep, ExternalConstant(index_ref), value);
- StoreNoWriteBarrier(rep, ExternalConstant(count_ref), num_tasks);
- } else {
- Node* count = num_tasks;
- if (kIntSize == 4) {
- value = TruncateInt64ToInt32(value);
- count = TruncateInt64ToInt32(count);
- }
- StoreNoWriteBarrier(rep, ExternalConstant(index_ref), value);
- StoreNoWriteBarrier(rep, ExternalConstant(count_ref), count);
- }
-
- Return(queue);
- }
+ // Enter the context of the {microtask}.
+ TNode<Context> microtask_context =
+ LoadObjectField<Context>(microtask, CallableTask::kContextOffset);
+ TNode<Context> native_context = LoadNativeContext(microtask_context);
- BIND(&is_function);
- {
- Label cont(this);
- VARIABLE(exception, MachineRepresentation::kTagged, TheHoleConstant());
- TNode<Context> old_context = GetCurrentContext();
- TNode<Context> fn_context = TNode<Context>::UncheckedCast(
- LoadObjectField(microtask, JSFunction::kContextOffset));
- TNode<Context> native_context =
- TNode<Context>::UncheckedCast(LoadNativeContext(fn_context));
+ CSA_ASSERT(this, IsNativeContext(native_context));
+ EnterMicrotaskContext(microtask_context);
SetCurrentContext(native_context);
- EnterMicrotaskContext(fn_context);
- Node* const call = CallJS(CodeFactory::Call(isolate()), native_context,
- microtask, UndefinedConstant());
- GotoIfException(call, &cont);
- Goto(&cont);
- BIND(&cont);
+
+ TNode<JSReceiver> callable = LoadObjectField<JSReceiver>(
+ microtask, CallableTask::kCallableOffset);
+ Node* const result = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ microtask_context, callable, UndefinedConstant());
+ GotoIfException(result, &if_exception, &var_exception);
LeaveMicrotaskContext();
- SetCurrentContext(old_context);
- Branch(IntPtrLessThan(index, num_tasks), &loop, &init_queue_loop);
+ SetCurrentContext(current_context);
+ Goto(&loop_next);
+ }
+
+ BIND(&is_callback);
+ {
+ Node* const microtask_callback =
+ LoadObjectField(microtask, CallbackTask::kCallbackOffset);
+ Node* const microtask_data =
+ LoadObjectField(microtask, CallbackTask::kDataOffset);
+
+ // If this turns out to become a bottleneck because of the calls
+ // to C++ via CEntryStub, we can choose to speed them up using a
+ // similar mechanism that we use for the CallApiFunction stub,
+ // except that calling the MicrotaskCallback is even easier, since
+ // it doesn't accept any tagged parameters, doesn't return a value
+ // and ignores exceptions.
+ //
+ // But from our current measurements it doesn't seem to be a
+ // serious performance problem, even if the microtask is full
+ // of CallHandlerTasks (which is not a realistic use case anyways).
+ CallRuntime(Runtime::kRunMicrotaskCallback, current_context,
+ microtask_callback, microtask_data);
+ Goto(&loop_next);
}
BIND(&is_promise_resolve_thenable_job);
{
- VARIABLE(exception, MachineRepresentation::kTagged, TheHoleConstant());
- TNode<Context> old_context = GetCurrentContext();
- TNode<Context> microtask_context =
- TNode<Context>::UncheckedCast(LoadObjectField(
- microtask, PromiseResolveThenableJobInfo::kContextOffset));
- TNode<Context> native_context =
- TNode<Context>::UncheckedCast(LoadNativeContext(microtask_context));
+ // Enter the context of the {microtask}.
+ TNode<Context> microtask_context = LoadObjectField<Context>(
+ microtask, PromiseResolveThenableJobTask::kContextOffset);
+ TNode<Context> native_context = LoadNativeContext(microtask_context);
+ CSA_ASSERT(this, IsNativeContext(native_context));
+ EnterMicrotaskContext(microtask_context);
SetCurrentContext(native_context);
+
+ Node* const promise_to_resolve = LoadObjectField(
+ microtask, PromiseResolveThenableJobTask::kPromiseToResolveOffset);
+ Node* const then = LoadObjectField(
+ microtask, PromiseResolveThenableJobTask::kThenOffset);
+ Node* const thenable = LoadObjectField(
+ microtask, PromiseResolveThenableJobTask::kThenableOffset);
+
+ Node* const result =
+ CallBuiltin(Builtins::kPromiseResolveThenableJob, native_context,
+ promise_to_resolve, thenable, then);
+ GotoIfException(result, &if_exception, &var_exception);
+ LeaveMicrotaskContext();
+ SetCurrentContext(current_context);
+ Goto(&loop_next);
+ }
+
+ BIND(&is_promise_fulfill_reaction_job);
+ {
+ // Enter the context of the {microtask}.
+ TNode<Context> microtask_context = LoadObjectField<Context>(
+ microtask, PromiseReactionJobTask::kContextOffset);
+ TNode<Context> native_context = LoadNativeContext(microtask_context);
+ CSA_ASSERT(this, IsNativeContext(native_context));
EnterMicrotaskContext(microtask_context);
+ SetCurrentContext(native_context);
- Label if_unhandled_exception(this), done(this);
- Node* const ret = CallBuiltin(Builtins::kPromiseResolveThenableJob,
- native_context, microtask);
- GotoIfException(ret, &if_unhandled_exception, &exception);
- Goto(&done);
+ Node* const argument =
+ LoadObjectField(microtask, PromiseReactionJobTask::kArgumentOffset);
+ Node* const handler =
+ LoadObjectField(microtask, PromiseReactionJobTask::kHandlerOffset);
+ Node* const payload =
+ LoadObjectField(microtask, PromiseReactionJobTask::kPayloadOffset);
- BIND(&if_unhandled_exception);
- CallRuntime(Runtime::kReportMessage, native_context, exception.value());
- Goto(&done);
+ // Run the promise before/debug hook if enabled.
+ RunPromiseHook(Runtime::kPromiseHookBefore, microtask_context, payload);
- BIND(&done);
- LeaveMicrotaskContext();
- SetCurrentContext(old_context);
+ Node* const result =
+ CallBuiltin(Builtins::kPromiseFulfillReactionJob, microtask_context,
+ argument, handler, payload);
+ GotoIfException(result, &if_exception, &var_exception);
+
+ // Run the promise after/debug hook if enabled.
+ RunPromiseHook(Runtime::kPromiseHookAfter, microtask_context, payload);
- Branch(IntPtrLessThan(index, num_tasks), &loop, &init_queue_loop);
+ LeaveMicrotaskContext();
+ SetCurrentContext(current_context);
+ Goto(&loop_next);
}
- BIND(&is_promise_reaction_job);
+ BIND(&is_promise_reject_reaction_job);
{
- Label if_multiple(this);
- Label if_single(this);
-
- Node* const value =
- LoadObjectField(microtask, PromiseReactionJobInfo::kValueOffset);
- Node* const tasks =
- LoadObjectField(microtask, PromiseReactionJobInfo::kTasksOffset);
- Node* const deferred_promises = LoadObjectField(
- microtask, PromiseReactionJobInfo::kDeferredPromiseOffset);
- Node* const deferred_on_resolves = LoadObjectField(
- microtask, PromiseReactionJobInfo::kDeferredOnResolveOffset);
- Node* const deferred_on_rejects = LoadObjectField(
- microtask, PromiseReactionJobInfo::kDeferredOnRejectOffset);
-
- TNode<Context> old_context = GetCurrentContext();
- TNode<Context> microtask_context = TNode<Context>::UncheckedCast(
- LoadObjectField(microtask, PromiseReactionJobInfo::kContextOffset));
- TNode<Context> native_context =
- TNode<Context>::UncheckedCast(LoadNativeContext(microtask_context));
- SetCurrentContext(native_context);
+ // Enter the context of the {microtask}.
+ TNode<Context> microtask_context = LoadObjectField<Context>(
+ microtask, PromiseReactionJobTask::kContextOffset);
+ TNode<Context> native_context = LoadNativeContext(microtask_context);
+ CSA_ASSERT(this, IsNativeContext(native_context));
EnterMicrotaskContext(microtask_context);
+ SetCurrentContext(native_context);
+
+ Node* const argument =
+ LoadObjectField(microtask, PromiseReactionJobTask::kArgumentOffset);
+ Node* const handler =
+ LoadObjectField(microtask, PromiseReactionJobTask::kHandlerOffset);
+ Node* const payload =
+ LoadObjectField(microtask, PromiseReactionJobTask::kPayloadOffset);
+
+ // Run the promise before/debug hook if enabled.
+ RunPromiseHook(Runtime::kPromiseHookBefore, microtask_context, payload);
+
+ Node* const result =
+ CallBuiltin(Builtins::kPromiseRejectReactionJob, microtask_context,
+ argument, handler, payload);
+ GotoIfException(result, &if_exception, &var_exception);
+
+ // Run the promise after/debug hook if enabled.
+ RunPromiseHook(Runtime::kPromiseHookAfter, microtask_context, payload);
- Branch(IsFixedArray(deferred_promises), &if_multiple, &if_single);
-
- BIND(&if_single);
- {
- CallBuiltin(Builtins::kPromiseHandle, native_context, value, tasks,
- deferred_promises, deferred_on_resolves,
- deferred_on_rejects);
- LeaveMicrotaskContext();
- SetCurrentContext(old_context);
- Branch(IntPtrLessThan(index, num_tasks), &loop, &init_queue_loop);
- }
-
- BIND(&if_multiple);
- {
- TVARIABLE(IntPtrT, inner_index, IntPtrConstant(0));
- TNode<IntPtrT> inner_length =
- LoadAndUntagFixedArrayBaseLength(deferred_promises);
- Label inner_loop(this, &inner_index), done(this);
-
- CSA_ASSERT(this, IntPtrGreaterThan(inner_length, IntPtrConstant(0)));
- Goto(&inner_loop);
- BIND(&inner_loop);
- {
- Node* const task = LoadFixedArrayElement(tasks, inner_index);
- Node* const deferred_promise =
- LoadFixedArrayElement(deferred_promises, inner_index);
- Node* const deferred_on_resolve =
- LoadFixedArrayElement(deferred_on_resolves, inner_index);
- Node* const deferred_on_reject =
- LoadFixedArrayElement(deferred_on_rejects, inner_index);
- CallBuiltin(Builtins::kPromiseHandle, native_context, value, task,
- deferred_promise, deferred_on_resolve,
- deferred_on_reject);
- inner_index = IntPtrAdd(inner_index, IntPtrConstant(1));
- Branch(IntPtrLessThan(inner_index, inner_length), &inner_loop,
- &done);
- }
- BIND(&done);
-
- LeaveMicrotaskContext();
- SetCurrentContext(old_context);
-
- Branch(IntPtrLessThan(index, num_tasks), &loop, &init_queue_loop);
- }
+ LeaveMicrotaskContext();
+ SetCurrentContext(current_context);
+ Goto(&loop_next);
}
BIND(&is_unreachable);
Unreachable();
- }
- }
-}
-TF_BUILTIN(PromiseResolveThenableJob, InternalBuiltinsAssembler) {
- VARIABLE(exception, MachineRepresentation::kTagged, TheHoleConstant());
- Callable call = CodeFactory::Call(isolate());
- Label reject_promise(this, Label::kDeferred);
- TNode<PromiseResolveThenableJobInfo> microtask =
- TNode<PromiseResolveThenableJobInfo>::UncheckedCast(
- Parameter(Descriptor::kMicrotask));
- TNode<Context> context =
- TNode<Context>::UncheckedCast(Parameter(Descriptor::kContext));
-
- TNode<JSReceiver> thenable = TNode<JSReceiver>::UncheckedCast(LoadObjectField(
- microtask, PromiseResolveThenableJobInfo::kThenableOffset));
- TNode<JSReceiver> then = TNode<JSReceiver>::UncheckedCast(
- LoadObjectField(microtask, PromiseResolveThenableJobInfo::kThenOffset));
- TNode<JSFunction> resolve = TNode<JSFunction>::UncheckedCast(LoadObjectField(
- microtask, PromiseResolveThenableJobInfo::kResolveOffset));
- TNode<JSFunction> reject = TNode<JSFunction>::UncheckedCast(
- LoadObjectField(microtask, PromiseResolveThenableJobInfo::kRejectOffset));
-
- Node* const result = CallJS(call, context, then, thenable, resolve, reject);
- GotoIfException(result, &reject_promise, &exception);
- Return(UndefinedConstant());
+ BIND(&if_exception);
+ {
+ // Report unhandled exceptions from microtasks.
+ CallRuntime(Runtime::kReportMessage, current_context,
+ var_exception.value());
+ LeaveMicrotaskContext();
+ SetCurrentContext(current_context);
+ Goto(&loop_next);
+ }
- BIND(&reject_promise);
- CallJS(call, context, reject, UndefinedConstant(), exception.value());
- Return(UndefinedConstant());
+ BIND(&loop_next);
+ Branch(IntPtrLessThan(index.value(), num_tasks), &loop, &init_queue_loop);
+ }
+ }
}
TF_BUILTIN(AbortJS, CodeStubAssembler) {
diff --git a/deps/v8/src/builtins/builtins-intl.h b/deps/v8/src/builtins/builtins-intl.h
index 8dda0c0898..419ff14db1 100644
--- a/deps/v8/src/builtins/builtins-intl.h
+++ b/deps/v8/src/builtins/builtins-intl.h
@@ -27,4 +27,4 @@ std::vector<NumberFormatSpan> FlattenRegionsToParts(
} // namespace internal
} // namespace v8
-#endif // V8_BUILTINS_BUILTINS_H_
+#endif // V8_BUILTINS_BUILTINS_INTL_H_
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc
index f6a6d85880..21f6039f08 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-iterator-gen.cc
@@ -11,11 +11,24 @@ namespace internal {
using compiler::Node;
+Node* IteratorBuiltinsAssembler::GetIteratorMethod(Node* context,
+ Node* object) {
+ return GetProperty(context, object, factory()->iterator_symbol());
+}
+
IteratorRecord IteratorBuiltinsAssembler::GetIterator(Node* context,
Node* object,
Label* if_exception,
Variable* exception) {
- Node* method = GetProperty(context, object, factory()->iterator_symbol());
+ Node* method = GetIteratorMethod(context, object);
+ return GetIterator(context, object, method, if_exception, exception);
+}
+
+IteratorRecord IteratorBuiltinsAssembler::GetIterator(Node* context,
+ Node* object,
+ Node* method,
+ Label* if_exception,
+ Variable* exception) {
GotoIfException(method, if_exception, exception);
Callable callable = CodeFactory::Call(isolate());
@@ -27,13 +40,7 @@ IteratorRecord IteratorBuiltinsAssembler::GetIterator(Node* context,
Branch(IsJSReceiver(iterator), &get_next, &if_notobject);
BIND(&if_notobject);
- {
- Node* ret =
- CallRuntime(Runtime::kThrowTypeError, context,
- SmiConstant(MessageTemplate::kNotAnIterator), iterator);
- GotoIfException(ret, if_exception, exception);
- Unreachable();
- }
+ { ThrowTypeError(context, MessageTemplate::kNotAnIterator, iterator); }
BIND(&get_next);
Node* const next = GetProperty(context, iterator, factory()->next_string());
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.h b/deps/v8/src/builtins/builtins-iterator-gen.h
index 42627b8437..13464516d6 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.h
+++ b/deps/v8/src/builtins/builtins-iterator-gen.h
@@ -17,11 +17,17 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler {
explicit IteratorBuiltinsAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
+ // Returns object[Symbol.iterator].
+ Node* GetIteratorMethod(Node* context, Node* object);
+
// https://tc39.github.io/ecma262/#sec-getiterator --- never used for
// @@asyncIterator.
IteratorRecord GetIterator(Node* context, Node* object,
Label* if_exception = nullptr,
Variable* exception = nullptr);
+ IteratorRecord GetIterator(Node* context, Node* object, Node* method,
+ Label* if_exception = nullptr,
+ Variable* exception = nullptr);
// https://tc39.github.io/ecma262/#sec-iteratorstep
// Returns `false` if the iterator is done, otherwise returns an
diff --git a/deps/v8/src/builtins/builtins-math-gen.cc b/deps/v8/src/builtins/builtins-math-gen.cc
index d588113cdd..be58e8210e 100644
--- a/deps/v8/src/builtins/builtins-math-gen.cc
+++ b/deps/v8/src/builtins/builtins-math-gen.cc
@@ -268,7 +268,7 @@ TF_BUILTIN(MathClz32, CodeStubAssembler) {
BIND(&if_xissmi);
{
- var_clz32_x.Bind(SmiToWord32(x));
+ var_clz32_x.Bind(SmiToInt32(x));
Goto(&do_clz32);
}
diff --git a/deps/v8/src/builtins/builtins-number-gen.cc b/deps/v8/src/builtins/builtins-number-gen.cc
index 821dac9cc0..1340c33eb1 100644
--- a/deps/v8/src/builtins/builtins-number-gen.cc
+++ b/deps/v8/src/builtins/builtins-number-gen.cc
@@ -319,12 +319,14 @@ TF_BUILTIN(NumberParseInt, CodeStubAssembler) {
GotoIf(Float64Equal(input_value, ChangeInt32ToFloat64(input_value32)),
&if_inputissigned32);
- // Check if the absolute {input} value is in the ]0.01,1e9[ range.
+ // Check if the absolute {input} value is in the [1,1<<31[ range.
+ // Take the generic path for the range [0,1[ because the result
+ // could be -0.
Node* input_value_abs = Float64Abs(input_value);
- GotoIfNot(Float64LessThan(input_value_abs, Float64Constant(1e9)),
+ GotoIfNot(Float64LessThan(input_value_abs, Float64Constant(1u << 31)),
&if_generic);
- Branch(Float64LessThan(Float64Constant(0.01), input_value_abs),
+ Branch(Float64LessThanOrEqual(Float64Constant(1), input_value_abs),
&if_inputissigned32, &if_generic);
// Return the truncated int32 value, and return the tagged result.
@@ -904,8 +906,8 @@ TF_BUILTIN(Divide, NumberBuiltinsAssembler) {
}
BIND(&dividend_is_not_zero);
- Node* untagged_divisor = SmiToWord32(divisor);
- Node* untagged_dividend = SmiToWord32(dividend);
+ Node* untagged_divisor = SmiToInt32(divisor);
+ Node* untagged_dividend = SmiToInt32(dividend);
// Do floating point division if {dividend} is kMinInt (or kMinInt - 1
// if the Smi size is 31) and {divisor} is -1.
@@ -929,7 +931,7 @@ TF_BUILTIN(Divide, NumberBuiltinsAssembler) {
Node* truncated = Int32Mul(untagged_result, untagged_divisor);
// Do floating point division if the remainder is not 0.
GotoIf(Word32NotEqual(untagged_dividend, truncated), &bailout);
- Return(SmiFromWord32(untagged_result));
+ Return(SmiFromInt32(untagged_result));
// Bailout: convert {dividend} and {divisor} to double and do double
// division.
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index 4cd012e6f0..1ebfbacf38 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -16,6 +16,8 @@ namespace internal {
// ES6 section 19.1 Object Objects
typedef compiler::Node Node;
+template <class T>
+using TNode = CodeStubAssembler::TNode<T>;
class ObjectBuiltinsAssembler : public CodeStubAssembler {
public:
@@ -34,6 +36,46 @@ class ObjectBuiltinsAssembler : public CodeStubAssembler {
Node* ConstructDataDescriptor(Node* context, Node* value, Node* writable,
Node* enumerable, Node* configurable);
Node* GetAccessorOrUndefined(Node* accessor, Label* if_bailout);
+
+ Node* IsSpecialReceiverMap(SloppyTNode<Map> map);
+};
+
+class ObjectEntriesValuesBuiltinsAssembler : public ObjectBuiltinsAssembler {
+ public:
+ explicit ObjectEntriesValuesBuiltinsAssembler(
+ compiler::CodeAssemblerState* state)
+ : ObjectBuiltinsAssembler(state) {}
+
+ protected:
+ enum CollectType { kEntries, kValues };
+
+ TNode<Word32T> IsStringWrapperElementsKind(TNode<Map> map);
+
+ TNode<BoolT> IsPropertyEnumerable(TNode<Uint32T> details);
+
+ TNode<BoolT> IsPropertyKindAccessor(TNode<Uint32T> kind);
+
+ TNode<BoolT> IsPropertyKindData(TNode<Uint32T> kind);
+
+ TNode<Uint32T> HasHiddenPrototype(TNode<Map> map);
+
+ TNode<Uint32T> LoadPropertyKind(TNode<Uint32T> details) {
+ return DecodeWord32<PropertyDetails::KindField>(details);
+ }
+
+ void GetOwnValuesOrEntries(TNode<Context> context, TNode<Object> maybe_object,
+ CollectType collect_type);
+
+ void GotoIfMapHasSlowProperties(TNode<Map> map, Label* if_slow);
+
+ TNode<JSArray> FastGetOwnValuesOrEntries(
+ TNode<Context> context, TNode<JSObject> object,
+ Label* if_call_runtime_with_fast_path, Label* if_no_properties,
+ CollectType collect_type);
+
+ TNode<JSArray> FinalizeValuesOrEntriesJSArray(
+ TNode<Context> context, TNode<FixedArray> values_or_entries,
+ TNode<IntPtrT> size, TNode<Map> array_map, Label* if_empty);
};
void ObjectBuiltinsAssembler::ReturnToStringFormat(Node* context,
@@ -97,6 +139,253 @@ Node* ObjectBuiltinsAssembler::ConstructDataDescriptor(Node* context,
return js_desc;
}
+Node* ObjectBuiltinsAssembler::IsSpecialReceiverMap(SloppyTNode<Map> map) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
+ Node* is_special = IsSpecialReceiverInstanceType(LoadMapInstanceType(map));
+ uint32_t mask =
+ Map::HasNamedInterceptorBit::kMask | Map::IsAccessCheckNeededBit::kMask;
+ USE(mask);
+ // Interceptors or access checks imply special receiver.
+ CSA_ASSERT(this,
+ SelectConstant(IsSetWord32(LoadMapBitField(map), mask), is_special,
+ Int32Constant(1), MachineRepresentation::kWord32));
+ return is_special;
+}
+
+TNode<Word32T>
+ObjectEntriesValuesBuiltinsAssembler::IsStringWrapperElementsKind(
+ TNode<Map> map) {
+ Node* kind = LoadMapElementsKind(map);
+ return Word32Or(
+ Word32Equal(kind, Int32Constant(FAST_STRING_WRAPPER_ELEMENTS)),
+ Word32Equal(kind, Int32Constant(SLOW_STRING_WRAPPER_ELEMENTS)));
+}
+
+TNode<BoolT> ObjectEntriesValuesBuiltinsAssembler::IsPropertyEnumerable(
+ TNode<Uint32T> details) {
+ TNode<Uint32T> attributes =
+ DecodeWord32<PropertyDetails::AttributesField>(details);
+ return IsNotSetWord32(attributes, PropertyAttributes::DONT_ENUM);
+}
+
+TNode<BoolT> ObjectEntriesValuesBuiltinsAssembler::IsPropertyKindAccessor(
+ TNode<Uint32T> kind) {
+ return Word32Equal(kind, Int32Constant(PropertyKind::kAccessor));
+}
+
+TNode<BoolT> ObjectEntriesValuesBuiltinsAssembler::IsPropertyKindData(
+ TNode<Uint32T> kind) {
+ return Word32Equal(kind, Int32Constant(PropertyKind::kData));
+}
+
+TNode<Uint32T> ObjectEntriesValuesBuiltinsAssembler::HasHiddenPrototype(
+ TNode<Map> map) {
+ TNode<Uint32T> bit_field3 = LoadMapBitField3(map);
+ return DecodeWord32<Map::HasHiddenPrototypeBit>(bit_field3);
+}
+
+void ObjectEntriesValuesBuiltinsAssembler::GetOwnValuesOrEntries(
+ TNode<Context> context, TNode<Object> maybe_object,
+ CollectType collect_type) {
+ TNode<JSReceiver> receiver = ToObject(context, maybe_object);
+
+ Label if_call_runtime_with_fast_path(this, Label::kDeferred),
+ if_call_runtime(this, Label::kDeferred),
+ if_no_properties(this, Label::kDeferred);
+
+ TNode<Map> map = LoadMap(receiver);
+ GotoIfNot(IsJSObjectMap(map), &if_call_runtime);
+ GotoIfMapHasSlowProperties(map, &if_call_runtime);
+
+ TNode<JSObject> object = CAST(receiver);
+ TNode<FixedArrayBase> elements = LoadElements(object);
+ // If the object has elements, we treat it as slow case.
+ // So, we go to runtime call.
+ GotoIfNot(IsEmptyFixedArray(elements), &if_call_runtime_with_fast_path);
+
+ TNode<JSArray> result = FastGetOwnValuesOrEntries(
+ context, object, &if_call_runtime_with_fast_path, &if_no_properties,
+ collect_type);
+ Return(result);
+
+ BIND(&if_no_properties);
+ {
+ Node* native_context = LoadNativeContext(context);
+ Node* array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
+ Node* empty_array = AllocateJSArray(PACKED_ELEMENTS, array_map,
+ IntPtrConstant(0), SmiConstant(0));
+ Return(empty_array);
+ }
+
+ BIND(&if_call_runtime_with_fast_path);
+ {
+ // In slow case, we simply call runtime.
+ if (collect_type == CollectType::kEntries) {
+ Return(CallRuntime(Runtime::kObjectEntries, context, object));
+ } else {
+ DCHECK(collect_type == CollectType::kValues);
+ Return(CallRuntime(Runtime::kObjectValues, context, object));
+ }
+ }
+
+ BIND(&if_call_runtime);
+ {
+ // In slow case, we simply call runtime.
+ if (collect_type == CollectType::kEntries) {
+ Return(
+ CallRuntime(Runtime::kObjectEntriesSkipFastPath, context, receiver));
+ } else {
+ DCHECK(collect_type == CollectType::kValues);
+ Return(
+ CallRuntime(Runtime::kObjectValuesSkipFastPath, context, receiver));
+ }
+ }
+}
+
+void ObjectEntriesValuesBuiltinsAssembler::GotoIfMapHasSlowProperties(
+ TNode<Map> map, Label* if_slow) {
+ GotoIf(IsStringWrapperElementsKind(map), if_slow);
+ GotoIf(IsSpecialReceiverMap(map), if_slow);
+ GotoIf(HasHiddenPrototype(map), if_slow);
+ GotoIf(IsDictionaryMap(map), if_slow);
+}
+
+TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries(
+ TNode<Context> context, TNode<JSObject> object,
+ Label* if_call_runtime_with_fast_path, Label* if_no_properties,
+ CollectType collect_type) {
+ Node* native_context = LoadNativeContext(context);
+ TNode<Map> array_map =
+ LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
+ TNode<Map> map = LoadMap(object);
+ TNode<Uint32T> bit_field3 = LoadMapBitField3(map);
+
+ Label if_has_enum_cache(this), if_not_has_enum_cache(this),
+ collect_entries(this);
+ Node* object_enum_length =
+ DecodeWordFromWord32<Map::EnumLengthBits>(bit_field3);
+ Node* has_enum_cache = WordNotEqual(
+ object_enum_length, IntPtrConstant(kInvalidEnumCacheSentinel));
+
+ // In case, we found enum_cache in object,
+ // we use it as array_length becuase it has same size for
+ // Object.(entries/values) result array object length.
+ // So object_enum_length use less memory space than
+ // NumberOfOwnDescriptorsBits value.
+ // And in case, if enum_cache_not_found,
+ // we call runtime and initialize enum_cache for subsequent call of
+ // CSA fast path.
+ Branch(has_enum_cache, &if_has_enum_cache, if_call_runtime_with_fast_path);
+
+ BIND(&if_has_enum_cache);
+ {
+ GotoIf(WordEqual(object_enum_length, IntPtrConstant(0)), if_no_properties);
+ TNode<FixedArray> values_or_entries = TNode<FixedArray>::UncheckedCast(
+ AllocateFixedArray(PACKED_ELEMENTS, object_enum_length,
+ INTPTR_PARAMETERS, kAllowLargeObjectAllocation));
+
+ // If in case we have enum_cache,
+ // we can't detect accessor of object until loop through descritpros.
+ // So if object might have accessor,
+ // we will remain invalid addresses of FixedArray.
+ // Because in that case, we need to jump to runtime call.
+ // So the array filled by the-hole even if enum_cache exists.
+ FillFixedArrayWithValue(PACKED_ELEMENTS, values_or_entries,
+ IntPtrConstant(0), object_enum_length,
+ Heap::kTheHoleValueRootIndex);
+
+ TVARIABLE(IntPtrT, var_result_index, IntPtrConstant(0));
+ TVARIABLE(IntPtrT, var_descriptor_number, IntPtrConstant(0));
+ Variable* vars[] = {&var_descriptor_number, &var_result_index};
+ // Let desc be ? O.[[GetOwnProperty]](key).
+ TNode<DescriptorArray> descriptors = LoadMapDescriptors(map);
+ Label loop(this, 2, vars), after_loop(this), loop_condition(this);
+ Branch(IntPtrEqual(var_descriptor_number.value(), object_enum_length),
+ &after_loop, &loop);
+
+ // We dont use BuildFastLoop.
+ // Instead, we use hand-written loop
+ // because of we need to use 'continue' functionality.
+ BIND(&loop);
+ {
+ // Currently, we will not invoke getters,
+ // so, map will not be changed.
+ CSA_ASSERT(this, WordEqual(map, LoadMap(object)));
+ TNode<Uint32T> descriptor_index = TNode<Uint32T>::UncheckedCast(
+ TruncateIntPtrToInt32(var_descriptor_number.value()));
+ Node* next_key = DescriptorArrayGetKey(descriptors, descriptor_index);
+
+ // Skip Symbols.
+ GotoIf(IsSymbol(next_key), &loop_condition);
+
+ TNode<Uint32T> details = TNode<Uint32T>::UncheckedCast(
+ DescriptorArrayGetDetails(descriptors, descriptor_index));
+ TNode<Uint32T> kind = LoadPropertyKind(details);
+
+ // If property is accessor, we escape fast path and call runtime.
+ GotoIf(IsPropertyKindAccessor(kind), if_call_runtime_with_fast_path);
+ CSA_ASSERT(this, IsPropertyKindData(kind));
+
+ // If desc is not undefined and desc.[[Enumerable]] is true, then
+ GotoIfNot(IsPropertyEnumerable(details), &loop_condition);
+
+ VARIABLE(var_property_value, MachineRepresentation::kTagged,
+ UndefinedConstant());
+ Node* descriptor_name_index = DescriptorArrayToKeyIndex(
+ TruncateIntPtrToInt32(var_descriptor_number.value()));
+
+ // Let value be ? Get(O, key).
+ LoadPropertyFromFastObject(object, map, descriptors,
+ descriptor_name_index, details,
+ &var_property_value);
+
+ // If kind is "value", append value to properties.
+ Node* value = var_property_value.value();
+
+ if (collect_type == CollectType::kEntries) {
+ // Let entry be CreateArrayFromList(« key, value »).
+ Node* array = nullptr;
+ Node* elements = nullptr;
+ std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
+ PACKED_ELEMENTS, array_map, SmiConstant(2), nullptr,
+ IntPtrConstant(2));
+ StoreFixedArrayElement(elements, 0, next_key, SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(elements, 1, value, SKIP_WRITE_BARRIER);
+ value = array;
+ }
+
+ StoreFixedArrayElement(values_or_entries, var_result_index.value(),
+ value);
+ Increment(&var_result_index, 1);
+ Goto(&loop_condition);
+
+ BIND(&loop_condition);
+ {
+ Increment(&var_descriptor_number, 1);
+ Branch(IntPtrEqual(var_descriptor_number.value(), object_enum_length),
+ &after_loop, &loop);
+ }
+ }
+ BIND(&after_loop);
+ return FinalizeValuesOrEntriesJSArray(context, values_or_entries,
+ var_result_index.value(), array_map,
+ if_no_properties);
+ }
+}
+
+TNode<JSArray>
+ObjectEntriesValuesBuiltinsAssembler::FinalizeValuesOrEntriesJSArray(
+ TNode<Context> context, TNode<FixedArray> result, TNode<IntPtrT> size,
+ TNode<Map> array_map, Label* if_empty) {
+ CSA_ASSERT(this, IsJSArrayMap(array_map));
+
+ GotoIf(IntPtrEqual(size, IntPtrConstant(0)), if_empty);
+ Node* array = AllocateUninitializedJSArrayWithoutElements(
+ array_map, SmiTag(size), nullptr);
+ StoreObjectField(array, JSArray::kElementsOffset, result);
+ return TNode<JSArray>::UncheckedCast(array);
+}
+
TF_BUILTIN(ObjectPrototypeToLocaleString, CodeStubAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
@@ -105,7 +394,7 @@ TF_BUILTIN(ObjectPrototypeToLocaleString, CodeStubAssembler) {
GotoIf(IsNullOrUndefined(receiver), &if_null_or_undefined);
TNode<Object> method =
- CAST(GetProperty(context, receiver, factory()->toString_string()));
+ GetProperty(context, receiver, factory()->toString_string());
Return(CallJS(CodeFactory::Call(isolate()), context, method, receiver));
BIND(&if_null_or_undefined);
@@ -266,6 +555,22 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
}
}
+TF_BUILTIN(ObjectValues, ObjectEntriesValuesBuiltinsAssembler) {
+ TNode<JSObject> object =
+ TNode<JSObject>::UncheckedCast(Parameter(Descriptor::kObject));
+ TNode<Context> context =
+ TNode<Context>::UncheckedCast(Parameter(Descriptor::kContext));
+ GetOwnValuesOrEntries(context, object, CollectType::kValues);
+}
+
+TF_BUILTIN(ObjectEntries, ObjectEntriesValuesBuiltinsAssembler) {
+ TNode<JSObject> object =
+ TNode<JSObject>::UncheckedCast(Parameter(Descriptor::kObject));
+ TNode<Context> context =
+ TNode<Context>::UncheckedCast(Parameter(Descriptor::kContext));
+ GetOwnValuesOrEntries(context, object, CollectType::kEntries);
+}
+
// ES #sec-object.prototype.isprototypeof
TF_BUILTIN(ObjectPrototypeIsPrototypeOf, ObjectBuiltinsAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -304,7 +609,7 @@ TF_BUILTIN(ObjectPrototypeIsPrototypeOf, ObjectBuiltinsAssembler) {
GotoIfNot(IsJSReceiver(value), &if_valueisnotreceiver);
// Simulate the ToObject invocation on {receiver}.
- CallBuiltin(Builtins::kToObject, context, receiver);
+ ToObject(context, receiver);
Unreachable();
}
@@ -367,9 +672,7 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) {
Branch(IsString(var_tag.value()), &if_tagisstring, &if_tagisnotstring);
BIND(&if_tagisnotstring);
{
- var_tag.Bind(
- CallStub(Builtins::CallableFor(isolate(), Builtins::kClassOf),
- context, receiver));
+ var_tag.Bind(CallRuntime(Runtime::kClassOf, context, receiver));
Goto(&if_tagisstring);
}
BIND(&if_tagisstring);
@@ -574,9 +877,8 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) {
BIND(&return_generic);
{
- Node* tag = GetProperty(
- context, CallBuiltin(Builtins::kToObject, context, receiver),
- LoadRoot(Heap::kto_string_tag_symbolRootIndex));
+ Node* tag = GetProperty(context, ToObject(context, receiver),
+ LoadRoot(Heap::kto_string_tag_symbolRootIndex));
GotoIf(TaggedIsSmi(tag), &return_default);
GotoIfNot(IsString(tag), &return_default);
ReturnToStringFormat(context, tag);
@@ -592,7 +894,7 @@ TF_BUILTIN(ObjectPrototypeValueOf, CodeStubAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* context = Parameter(Descriptor::kContext);
- Return(CallBuiltin(Builtins::kToObject, context, receiver));
+ Return(ToObject(context, receiver));
}
// ES #sec-object.create
@@ -760,7 +1062,7 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
// Get the initial map from the function, jumping to the runtime if we don't
// have one.
- Label runtime(this);
+ Label done(this), runtime(this);
GotoIfNot(IsFunctionWithPrototypeSlotMap(LoadMap(closure)), &runtime);
Node* maybe_map =
LoadObjectField(closure, JSFunction::kPrototypeOrInitialMapOffset);
@@ -790,7 +1092,13 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
Node* executing = SmiConstant(JSGeneratorObject::kGeneratorExecuting);
StoreObjectFieldNoWriteBarrier(result, JSGeneratorObject::kContinuationOffset,
executing);
- Return(result);
+ GotoIfNot(HasInstanceType(maybe_map, JS_ASYNC_GENERATOR_OBJECT_TYPE), &done);
+ StoreObjectFieldNoWriteBarrier(
+ result, JSAsyncGeneratorObject::kIsAwaitingOffset, SmiConstant(0));
+ Goto(&done);
+
+ BIND(&done);
+ { Return(result); }
BIND(&runtime);
{
@@ -810,7 +1118,7 @@ TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) {
Node* key = args.GetOptionalArgumentValue(1);
// 1. Let obj be ? ToObject(O).
- object = CallBuiltin(Builtins::kToObject, context, object);
+ object = ToObject(context, object);
// 2. Let key be ? ToPropertyKey(P).
key = ToName(context, key);
diff --git a/deps/v8/src/builtins/builtins-object.cc b/deps/v8/src/builtins/builtins-object.cc
index 36f7ebfc0a..4e353b9260 100644
--- a/deps/v8/src/builtins/builtins-object.cc
+++ b/deps/v8/src/builtins/builtins-object.cc
@@ -395,31 +395,6 @@ BUILTIN(ObjectIsSealed) {
return isolate->heap()->ToBoolean(result.FromJust());
}
-BUILTIN(ObjectValues) {
- HandleScope scope(isolate);
- Handle<Object> object = args.atOrUndefined(isolate, 1);
- Handle<JSReceiver> receiver;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
- Object::ToObject(isolate, object));
- Handle<FixedArray> values;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, values, JSReceiver::GetOwnValues(receiver, ENUMERABLE_STRINGS));
- return *isolate->factory()->NewJSArrayWithElements(values);
-}
-
-BUILTIN(ObjectEntries) {
- HandleScope scope(isolate);
- Handle<Object> object = args.atOrUndefined(isolate, 1);
- Handle<JSReceiver> receiver;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
- Object::ToObject(isolate, object));
- Handle<FixedArray> entries;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, entries,
- JSReceiver::GetOwnEntries(receiver, ENUMERABLE_STRINGS));
- return *isolate->factory()->NewJSArrayWithElements(entries);
-}
-
BUILTIN(ObjectGetOwnPropertyDescriptors) {
HandleScope scope(isolate);
Handle<Object> object = args.atOrUndefined(isolate, 1);
diff --git a/deps/v8/src/builtins/builtins-promise-gen.cc b/deps/v8/src/builtins/builtins-promise-gen.cc
index 1a3ebcd892..d3ea3f82e2 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.cc
+++ b/deps/v8/src/builtins/builtins-promise-gen.cc
@@ -22,19 +22,26 @@ Node* PromiseBuiltinsAssembler::AllocateJSPromise(Node* context) {
Node* const promise_fun =
LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
CSA_ASSERT(this, IsFunctionWithPrototypeSlotMap(LoadMap(promise_fun)));
- Node* const initial_map =
+ Node* const promise_map =
LoadObjectField(promise_fun, JSFunction::kPrototypeOrInitialMapOffset);
- Node* const instance = AllocateJSObjectFromMap(initial_map);
- return instance;
+ Node* const promise = Allocate(JSPromise::kSizeWithEmbedderFields);
+ StoreMapNoWriteBarrier(promise, promise_map);
+ StoreObjectFieldRoot(promise, JSPromise::kPropertiesOrHashOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldRoot(promise, JSPromise::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ return promise;
}
void PromiseBuiltinsAssembler::PromiseInit(Node* promise) {
STATIC_ASSERT(v8::Promise::kPending == 0);
+ StoreObjectFieldNoWriteBarrier(promise, JSPromise::kReactionsOrResultOffset,
+ SmiConstant(Smi::kZero));
StoreObjectFieldNoWriteBarrier(promise, JSPromise::kFlagsOffset,
- SmiConstant(0));
+ SmiConstant(Smi::kZero));
for (int i = 0; i < v8::Promise::kEmbedderFieldCount; i++) {
int offset = JSPromise::kSize + i * kPointerSize;
- StoreObjectFieldNoWriteBarrier(promise, offset, SmiConstant(0));
+ StoreObjectFieldNoWriteBarrier(promise, offset, SmiConstant(Smi::kZero));
}
}
@@ -58,9 +65,11 @@ Node* PromiseBuiltinsAssembler::AllocateAndInitJSPromise(Node* context,
Node* PromiseBuiltinsAssembler::AllocateAndSetJSPromise(
Node* context, v8::Promise::PromiseState status, Node* result) {
- Node* const instance = AllocateJSPromise(context);
+ DCHECK_NE(Promise::kPending, status);
- StoreObjectFieldNoWriteBarrier(instance, JSPromise::kResultOffset, result);
+ Node* const instance = AllocateJSPromise(context);
+ StoreObjectFieldNoWriteBarrier(instance, JSPromise::kReactionsOrResultOffset,
+ result);
STATIC_ASSERT(JSPromise::kStatusShift == 0);
StoreObjectFieldNoWriteBarrier(instance, JSPromise::kFlagsOffset,
SmiConstant(status));
@@ -86,66 +95,68 @@ PromiseBuiltinsAssembler::CreatePromiseResolvingFunctions(
promise, debug_event, native_context);
Node* const map = LoadContextElement(
native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
- Node* const resolve_info =
- LoadContextElement(native_context, Context::PROMISE_RESOLVE_SHARED_FUN);
+ Node* const resolve_info = LoadContextElement(
+ native_context,
+ Context::PROMISE_CAPABILITY_DEFAULT_RESOLVE_SHARED_FUN_INDEX);
Node* const resolve =
AllocateFunctionWithMapAndContext(map, resolve_info, promise_context);
- Node* const reject_info =
- LoadContextElement(native_context, Context::PROMISE_REJECT_SHARED_FUN);
+ Node* const reject_info = LoadContextElement(
+ native_context,
+ Context::PROMISE_CAPABILITY_DEFAULT_REJECT_SHARED_FUN_INDEX);
Node* const reject =
AllocateFunctionWithMapAndContext(map, reject_info, promise_context);
return std::make_pair(resolve, reject);
}
-Node* PromiseBuiltinsAssembler::NewPromiseCapability(Node* context,
- Node* constructor,
- Node* debug_event) {
- if (debug_event == nullptr) {
- debug_event = TrueConstant();
- }
+// ES #sec-newpromisecapability
+TF_BUILTIN(NewPromiseCapability, PromiseBuiltinsAssembler) {
+ Node* const context = Parameter(Descriptor::kContext);
+ Node* const constructor = Parameter(Descriptor::kConstructor);
+ Node* const debug_event = Parameter(Descriptor::kDebugEvent);
+ Node* const native_context = LoadNativeContext(context);
- Label if_not_constructor(this, Label::kDeferred);
+ Label if_not_constructor(this, Label::kDeferred),
+ if_notcallable(this, Label::kDeferred), if_fast_promise_capability(this),
+ if_slow_promise_capability(this, Label::kDeferred);
GotoIf(TaggedIsSmi(constructor), &if_not_constructor);
GotoIfNot(IsConstructorMap(LoadMap(constructor)), &if_not_constructor);
-
- Node* native_context = LoadNativeContext(context);
-
- Node* map = LoadRoot(Heap::kTuple3MapRootIndex);
- Node* capability = AllocateStruct(map);
-
- VARIABLE(var_result, MachineRepresentation::kTagged);
- var_result.Bind(capability);
-
- Label if_builtin_promise(this), if_custom_promise(this, Label::kDeferred),
- out(this);
Branch(WordEqual(constructor,
LoadContextElement(native_context,
Context::PROMISE_FUNCTION_INDEX)),
- &if_builtin_promise, &if_custom_promise);
+ &if_fast_promise_capability, &if_slow_promise_capability);
- BIND(&if_builtin_promise);
+ BIND(&if_fast_promise_capability);
{
- Node* promise = AllocateJSPromise(context);
- PromiseInit(promise);
- StoreObjectField(capability, PromiseCapability::kPromiseOffset, promise);
+ Node* promise =
+ AllocateAndInitJSPromise(native_context, UndefinedConstant());
Node* resolve = nullptr;
Node* reject = nullptr;
-
std::tie(resolve, reject) =
CreatePromiseResolvingFunctions(promise, debug_event, native_context);
- StoreObjectField(capability, PromiseCapability::kResolveOffset, resolve);
- StoreObjectField(capability, PromiseCapability::kRejectOffset, reject);
- GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &out);
- CallRuntime(Runtime::kPromiseHookInit, context, promise,
- UndefinedConstant());
- Goto(&out);
+ Node* capability = Allocate(PromiseCapability::kSize);
+ StoreMapNoWriteBarrier(capability, Heap::kPromiseCapabilityMapRootIndex);
+ StoreObjectFieldNoWriteBarrier(capability,
+ PromiseCapability::kPromiseOffset, promise);
+ StoreObjectFieldNoWriteBarrier(capability,
+ PromiseCapability::kResolveOffset, resolve);
+ StoreObjectFieldNoWriteBarrier(capability, PromiseCapability::kRejectOffset,
+ reject);
+ Return(capability);
}
- BIND(&if_custom_promise);
+ BIND(&if_slow_promise_capability);
{
- Label if_notcallable(this, Label::kDeferred);
+ Node* capability = Allocate(PromiseCapability::kSize);
+ StoreMapNoWriteBarrier(capability, Heap::kPromiseCapabilityMapRootIndex);
+ StoreObjectFieldRoot(capability, PromiseCapability::kPromiseOffset,
+ Heap::kUndefinedValueRootIndex);
+ StoreObjectFieldRoot(capability, PromiseCapability::kResolveOffset,
+ Heap::kUndefinedValueRootIndex);
+ StoreObjectFieldRoot(capability, PromiseCapability::kRejectOffset,
+ Heap::kUndefinedValueRootIndex);
+
Node* executor_context =
CreatePromiseGetCapabilitiesExecutorContext(capability, native_context);
Node* executor_info = LoadContextElement(
@@ -155,8 +166,9 @@ Node* PromiseBuiltinsAssembler::NewPromiseCapability(Node* context,
Node* executor = AllocateFunctionWithMapAndContext(
function_map, executor_info, executor_context);
- Node* promise = ConstructJS(CodeFactory::Construct(isolate()), context,
- constructor, executor);
+ Node* promise = ConstructJS(CodeFactory::Construct(isolate()),
+ native_context, constructor, executor);
+ StoreObjectField(capability, PromiseCapability::kPromiseOffset, promise);
Node* resolve =
LoadObjectField(capability, PromiseCapability::kResolveOffset);
@@ -167,26 +179,14 @@ Node* PromiseBuiltinsAssembler::NewPromiseCapability(Node* context,
LoadObjectField(capability, PromiseCapability::kRejectOffset);
GotoIf(TaggedIsSmi(reject), &if_notcallable);
GotoIfNot(IsCallable(reject), &if_notcallable);
-
- StoreObjectField(capability, PromiseCapability::kPromiseOffset, promise);
-
- Goto(&out);
-
- BIND(&if_notcallable);
- StoreObjectField(capability, PromiseCapability::kPromiseOffset,
- UndefinedConstant());
- StoreObjectField(capability, PromiseCapability::kResolveOffset,
- UndefinedConstant());
- StoreObjectField(capability, PromiseCapability::kRejectOffset,
- UndefinedConstant());
- ThrowTypeError(context, MessageTemplate::kPromiseNonCallable);
+ Return(capability);
}
BIND(&if_not_constructor);
ThrowTypeError(context, MessageTemplate::kNotConstructor, constructor);
- BIND(&out);
- return var_result.value();
+ BIND(&if_notcallable);
+ ThrowTypeError(context, MessageTemplate::kPromiseNonCallable);
}
Node* PromiseBuiltinsAssembler::CreatePromiseContext(Node* native_context,
@@ -236,7 +236,7 @@ Node* PromiseBuiltinsAssembler::IsPromiseStatus(
Node* PromiseBuiltinsAssembler::PromiseStatus(Node* promise) {
STATIC_ASSERT(JSPromise::kStatusShift == 0);
Node* const flags = LoadObjectField(promise, JSPromise::kFlagsOffset);
- return Word32And(SmiToWord32(flags), Int32Constant(JSPromise::kStatusMask));
+ return Word32And(SmiToInt32(flags), Int32Constant(JSPromise::kStatusMask));
}
void PromiseBuiltinsAssembler::PromiseSetStatus(
@@ -258,579 +258,299 @@ void PromiseBuiltinsAssembler::PromiseSetHandledHint(Node* promise) {
StoreObjectFieldNoWriteBarrier(promise, JSPromise::kFlagsOffset, new_flags);
}
-Node* PromiseBuiltinsAssembler::SpeciesConstructor(Node* context, Node* object,
- Node* default_constructor) {
- Isolate* isolate = this->isolate();
- VARIABLE(var_result, MachineRepresentation::kTagged);
- var_result.Bind(default_constructor);
-
- // 2. Let C be ? Get(O, "constructor").
- Node* const constructor =
- GetProperty(context, object, isolate->factory()->constructor_string());
-
- // 3. If C is undefined, return defaultConstructor.
- Label out(this);
- GotoIf(IsUndefined(constructor), &out);
-
- // 4. If Type(C) is not Object, throw a TypeError exception.
- ThrowIfNotJSReceiver(context, constructor,
- MessageTemplate::kConstructorNotReceiver);
-
- // 5. Let S be ? Get(C, @@species).
- Node* const species =
- GetProperty(context, constructor, isolate->factory()->species_symbol());
-
- // 6. If S is either undefined or null, return defaultConstructor.
- GotoIf(IsNullOrUndefined(species), &out);
-
- // 7. If IsConstructor(S) is true, return S.
- Label throw_error(this);
- GotoIf(TaggedIsSmi(species), &throw_error);
- GotoIfNot(IsConstructorMap(LoadMap(species)), &throw_error);
- var_result.Bind(species);
- Goto(&out);
-
- // 8. Throw a TypeError exception.
- BIND(&throw_error);
- ThrowTypeError(context, MessageTemplate::kSpeciesNotConstructor);
-
- BIND(&out);
- return var_result.value();
-}
-
-void PromiseBuiltinsAssembler::AppendPromiseCallback(int offset, Node* promise,
- Node* value) {
- Node* elements = LoadObjectField(promise, offset);
- Node* length = LoadFixedArrayBaseLength(elements);
- CodeStubAssembler::ParameterMode mode = OptimalParameterMode();
- length = TaggedToParameter(length, mode);
-
- Node* delta = IntPtrOrSmiConstant(1, mode);
- Node* new_capacity = IntPtrOrSmiAdd(length, delta, mode);
-
- const WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER;
- int additional_offset = 0;
-
- ExtractFixedArrayFlags flags;
- flags |= ExtractFixedArrayFlag::kFixedArrays;
- Node* new_elements =
- ExtractFixedArray(elements, nullptr, length, new_capacity, flags, mode);
-
- StoreFixedArrayElement(new_elements, length, value, barrier_mode,
- additional_offset, mode);
-
- StoreObjectField(promise, offset, new_elements);
-}
-
-Node* PromiseBuiltinsAssembler::InternalPromiseThen(Node* context,
- Node* promise,
- Node* on_resolve,
- Node* on_reject) {
- Isolate* isolate = this->isolate();
-
- // 2. If IsPromise(promise) is false, throw a TypeError exception.
- ThrowIfNotInstanceType(context, promise, JS_PROMISE_TYPE,
- "Promise.prototype.then");
-
- Node* const native_context = LoadNativeContext(context);
- Node* const promise_fun =
- LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
-
- // 3. Let C be ? SpeciesConstructor(promise, %Promise%).
- Node* constructor = SpeciesConstructor(context, promise, promise_fun);
-
- // 4. Let resultCapability be ? NewPromiseCapability(C).
- Callable call_callable = CodeFactory::Call(isolate);
- Label fast_promise_capability(this), promise_capability(this),
- perform_promise_then(this);
- VARIABLE(var_deferred_promise, MachineRepresentation::kTagged);
- VARIABLE(var_deferred_on_resolve, MachineRepresentation::kTagged);
- VARIABLE(var_deferred_on_reject, MachineRepresentation::kTagged);
-
- Branch(WordEqual(promise_fun, constructor), &fast_promise_capability,
- &promise_capability);
-
- BIND(&fast_promise_capability);
- {
- Node* const deferred_promise = AllocateAndInitJSPromise(context, promise);
- var_deferred_promise.Bind(deferred_promise);
- var_deferred_on_resolve.Bind(UndefinedConstant());
- var_deferred_on_reject.Bind(UndefinedConstant());
- Goto(&perform_promise_then);
- }
-
- BIND(&promise_capability);
- {
- Node* const capability = NewPromiseCapability(context, constructor);
- var_deferred_promise.Bind(
- LoadObjectField(capability, PromiseCapability::kPromiseOffset));
- var_deferred_on_resolve.Bind(
- LoadObjectField(capability, PromiseCapability::kResolveOffset));
- var_deferred_on_reject.Bind(
- LoadObjectField(capability, PromiseCapability::kRejectOffset));
- Goto(&perform_promise_then);
- }
-
- // 5. Return PerformPromiseThen(promise, onFulfilled, onRejected,
- // resultCapability).
- BIND(&perform_promise_then);
- Node* const result = InternalPerformPromiseThen(
- context, promise, on_resolve, on_reject, var_deferred_promise.value(),
- var_deferred_on_resolve.value(), var_deferred_on_reject.value());
- return result;
-}
-
-Node* PromiseBuiltinsAssembler::InternalPerformPromiseThen(
- Node* context, Node* promise, Node* on_resolve, Node* on_reject,
- Node* deferred_promise, Node* deferred_on_resolve,
- Node* deferred_on_reject) {
- VARIABLE(var_on_resolve, MachineRepresentation::kTagged);
- VARIABLE(var_on_reject, MachineRepresentation::kTagged);
-
- var_on_resolve.Bind(on_resolve);
- var_on_reject.Bind(on_reject);
-
- Label out(this), if_onresolvenotcallable(this), onrejectcheck(this),
- append_callbacks(this);
- GotoIf(TaggedIsSmi(on_resolve), &if_onresolvenotcallable);
-
- Branch(IsCallable(on_resolve), &onrejectcheck, &if_onresolvenotcallable);
-
- BIND(&if_onresolvenotcallable);
- {
- var_on_resolve.Bind(PromiseDefaultResolveHandlerSymbolConstant());
- Goto(&onrejectcheck);
- }
-
- BIND(&onrejectcheck);
+// ES #sec-performpromisethen
+void PromiseBuiltinsAssembler::PerformPromiseThen(
+ Node* context, Node* promise, Node* on_fulfilled, Node* on_rejected,
+ Node* result_promise_or_capability) {
+ CSA_ASSERT(this, TaggedIsNotSmi(promise));
+ CSA_ASSERT(this, IsJSPromise(promise));
+ CSA_ASSERT(this,
+ Word32Or(IsCallable(on_fulfilled), IsUndefined(on_fulfilled)));
+ CSA_ASSERT(this, Word32Or(IsCallable(on_rejected), IsUndefined(on_rejected)));
+ CSA_ASSERT(this, TaggedIsNotSmi(result_promise_or_capability));
+ CSA_ASSERT(this, Word32Or(IsJSPromise(result_promise_or_capability),
+ IsPromiseCapability(result_promise_or_capability)));
+
+ Label if_pending(this), if_notpending(this), done(this);
+ Node* const status = PromiseStatus(promise);
+ Branch(IsPromiseStatus(status, v8::Promise::kPending), &if_pending,
+ &if_notpending);
+
+ BIND(&if_pending);
{
- Label if_onrejectnotcallable(this);
- GotoIf(TaggedIsSmi(on_reject), &if_onrejectnotcallable);
-
- Branch(IsCallable(on_reject), &append_callbacks, &if_onrejectnotcallable);
-
- BIND(&if_onrejectnotcallable);
- {
- var_on_reject.Bind(PromiseDefaultRejectHandlerSymbolConstant());
- Goto(&append_callbacks);
- }
+ // The {promise} is still in "Pending" state, so we just record a new
+ // PromiseReaction holding both the onFulfilled and onRejected callbacks.
+ // Once the {promise} is resolved we decide on the concrete handler to
+ // push onto the microtask queue.
+ Node* const promise_reactions =
+ LoadObjectField(promise, JSPromise::kReactionsOrResultOffset);
+ Node* const reaction =
+ AllocatePromiseReaction(promise_reactions, result_promise_or_capability,
+ on_fulfilled, on_rejected);
+ StoreObjectField(promise, JSPromise::kReactionsOrResultOffset, reaction);
+ Goto(&done);
}
- BIND(&append_callbacks);
+ BIND(&if_notpending);
{
- Label fulfilled_check(this);
- Node* const status = PromiseStatus(promise);
- GotoIfNot(IsPromiseStatus(status, v8::Promise::kPending), &fulfilled_check);
-
- Node* const existing_deferred_promise =
- LoadObjectField(promise, JSPromise::kDeferredPromiseOffset);
-
- Label if_noexistingcallbacks(this), if_existingcallbacks(this);
- Branch(IsUndefined(existing_deferred_promise), &if_noexistingcallbacks,
- &if_existingcallbacks);
-
- BIND(&if_noexistingcallbacks);
+ VARIABLE(var_map, MachineRepresentation::kTagged);
+ VARIABLE(var_handler, MachineRepresentation::kTagged);
+ Label if_fulfilled(this), if_rejected(this, Label::kDeferred),
+ enqueue(this);
+ Branch(IsPromiseStatus(status, v8::Promise::kFulfilled), &if_fulfilled,
+ &if_rejected);
+
+ BIND(&if_fulfilled);
{
- // Store callbacks directly in the slots.
- StoreObjectField(promise, JSPromise::kDeferredPromiseOffset,
- deferred_promise);
- StoreObjectField(promise, JSPromise::kDeferredOnResolveOffset,
- deferred_on_resolve);
- StoreObjectField(promise, JSPromise::kDeferredOnRejectOffset,
- deferred_on_reject);
- StoreObjectField(promise, JSPromise::kFulfillReactionsOffset,
- var_on_resolve.value());
- StoreObjectField(promise, JSPromise::kRejectReactionsOffset,
- var_on_reject.value());
- Goto(&out);
+ var_map.Bind(LoadRoot(Heap::kPromiseFulfillReactionJobTaskMapRootIndex));
+ var_handler.Bind(on_fulfilled);
+ Goto(&enqueue);
}
- BIND(&if_existingcallbacks);
+ BIND(&if_rejected);
{
- Label if_singlecallback(this), if_multiplecallbacks(this);
- BranchIfJSObject(existing_deferred_promise, &if_singlecallback,
- &if_multiplecallbacks);
-
- BIND(&if_singlecallback);
- {
- // Create new FixedArrays to store callbacks, and migrate
- // existing callbacks.
- Node* const deferred_promise_arr =
- AllocateFixedArray(PACKED_ELEMENTS, IntPtrConstant(2));
- StoreFixedArrayElement(deferred_promise_arr, 0,
- existing_deferred_promise);
- StoreFixedArrayElement(deferred_promise_arr, 1, deferred_promise);
-
- Node* const deferred_on_resolve_arr =
- AllocateFixedArray(PACKED_ELEMENTS, IntPtrConstant(2));
- StoreFixedArrayElement(
- deferred_on_resolve_arr, 0,
- LoadObjectField(promise, JSPromise::kDeferredOnResolveOffset));
- StoreFixedArrayElement(deferred_on_resolve_arr, 1, deferred_on_resolve);
-
- Node* const deferred_on_reject_arr =
- AllocateFixedArray(PACKED_ELEMENTS, IntPtrConstant(2));
- StoreFixedArrayElement(
- deferred_on_reject_arr, 0,
- LoadObjectField(promise, JSPromise::kDeferredOnRejectOffset));
- StoreFixedArrayElement(deferred_on_reject_arr, 1, deferred_on_reject);
-
- Node* const fulfill_reactions =
- AllocateFixedArray(PACKED_ELEMENTS, IntPtrConstant(2));
- StoreFixedArrayElement(
- fulfill_reactions, 0,
- LoadObjectField(promise, JSPromise::kFulfillReactionsOffset));
- StoreFixedArrayElement(fulfill_reactions, 1, var_on_resolve.value());
-
- Node* const reject_reactions =
- AllocateFixedArray(PACKED_ELEMENTS, IntPtrConstant(2));
- StoreFixedArrayElement(
- reject_reactions, 0,
- LoadObjectField(promise, JSPromise::kRejectReactionsOffset));
- StoreFixedArrayElement(reject_reactions, 1, var_on_reject.value());
-
- // Store new FixedArrays in promise.
- StoreObjectField(promise, JSPromise::kDeferredPromiseOffset,
- deferred_promise_arr);
- StoreObjectField(promise, JSPromise::kDeferredOnResolveOffset,
- deferred_on_resolve_arr);
- StoreObjectField(promise, JSPromise::kDeferredOnRejectOffset,
- deferred_on_reject_arr);
- StoreObjectField(promise, JSPromise::kFulfillReactionsOffset,
- fulfill_reactions);
- StoreObjectField(promise, JSPromise::kRejectReactionsOffset,
- reject_reactions);
- Goto(&out);
- }
-
- BIND(&if_multiplecallbacks);
- {
- AppendPromiseCallback(JSPromise::kDeferredPromiseOffset, promise,
- deferred_promise);
- AppendPromiseCallback(JSPromise::kDeferredOnResolveOffset, promise,
- deferred_on_resolve);
- AppendPromiseCallback(JSPromise::kDeferredOnRejectOffset, promise,
- deferred_on_reject);
- AppendPromiseCallback(JSPromise::kFulfillReactionsOffset, promise,
- var_on_resolve.value());
- AppendPromiseCallback(JSPromise::kRejectReactionsOffset, promise,
- var_on_reject.value());
- Goto(&out);
- }
+ CSA_ASSERT(this, IsPromiseStatus(status, v8::Promise::kRejected));
+ var_map.Bind(LoadRoot(Heap::kPromiseRejectReactionJobTaskMapRootIndex));
+ var_handler.Bind(on_rejected);
+ GotoIf(PromiseHasHandler(promise), &enqueue);
+ CallRuntime(Runtime::kPromiseRevokeReject, context, promise);
+ Goto(&enqueue);
}
- BIND(&fulfilled_check);
- {
- Label reject(this);
- Node* const result = LoadObjectField(promise, JSPromise::kResultOffset);
- GotoIfNot(IsPromiseStatus(status, v8::Promise::kFulfilled), &reject);
-
- Node* info = AllocatePromiseReactionJobInfo(
- result, var_on_resolve.value(), deferred_promise, deferred_on_resolve,
- deferred_on_reject, context);
- CallBuiltin(Builtins::kEnqueueMicrotask, NoContextConstant(), info);
- Goto(&out);
-
- BIND(&reject);
- {
- CSA_ASSERT(this, IsPromiseStatus(status, v8::Promise::kRejected));
- Node* const has_handler = PromiseHasHandler(promise);
- Label enqueue(this);
-
- // TODO(gsathya): Fold these runtime calls and move to TF.
- GotoIf(has_handler, &enqueue);
- CallRuntime(Runtime::kPromiseRevokeReject, context, promise);
- Goto(&enqueue);
-
- BIND(&enqueue);
- {
- Node* info = AllocatePromiseReactionJobInfo(
- result, var_on_reject.value(), deferred_promise,
- deferred_on_resolve, deferred_on_reject, context);
- CallBuiltin(Builtins::kEnqueueMicrotask, NoContextConstant(), info);
- Goto(&out);
- }
- }
- }
+ BIND(&enqueue);
+ Node* argument =
+ LoadObjectField(promise, JSPromise::kReactionsOrResultOffset);
+ Node* microtask = AllocatePromiseReactionJobTask(
+ var_map.value(), context, argument, var_handler.value(),
+ result_promise_or_capability);
+ CallBuiltin(Builtins::kEnqueueMicrotask, NoContextConstant(), microtask);
+ Goto(&done);
}
- BIND(&out);
+ BIND(&done);
PromiseSetHasHandler(promise);
- return deferred_promise;
}
-// Promise fast path implementations rely on unmodified JSPromise instances.
-// We use a fairly coarse granularity for this and simply check whether both
-// the promise itself is unmodified (i.e. its map has not changed) and its
-// prototype is unmodified.
-// TODO(gsathya): Refactor this out to prevent code dupe with builtins-regexp
-void PromiseBuiltinsAssembler::BranchIfFastPath(Node* context, Node* promise,
- Label* if_isunmodified,
- Label* if_ismodified) {
- Node* const native_context = LoadNativeContext(context);
- Node* const promise_fun =
- LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
- BranchIfFastPath(native_context, promise_fun, promise, if_isunmodified,
- if_ismodified);
-}
-
-void PromiseBuiltinsAssembler::BranchIfFastPath(Node* native_context,
- Node* promise_fun,
- Node* promise,
- Label* if_isunmodified,
- Label* if_ismodified) {
- CSA_ASSERT(this, IsNativeContext(native_context));
- CSA_ASSERT(this,
- WordEqual(promise_fun,
- LoadContextElement(native_context,
- Context::PROMISE_FUNCTION_INDEX)));
-
- GotoIfForceSlowPath(if_ismodified);
-
- Node* const map = LoadMap(promise);
- Node* const initial_map =
- LoadObjectField(promise_fun, JSFunction::kPrototypeOrInitialMapOffset);
- Node* const has_initialmap = WordEqual(map, initial_map);
-
- GotoIfNot(has_initialmap, if_ismodified);
+// ES #sec-performpromisethen
+TF_BUILTIN(PerformPromiseThen, PromiseBuiltinsAssembler) {
+ Node* const context = Parameter(Descriptor::kContext);
+ Node* const promise = Parameter(Descriptor::kPromise);
+ Node* const on_fulfilled = Parameter(Descriptor::kOnFulfilled);
+ Node* const on_rejected = Parameter(Descriptor::kOnRejected);
+ Node* const result_promise = Parameter(Descriptor::kResultPromise);
- Node* const initial_proto_initial_map =
- LoadContextElement(native_context, Context::PROMISE_PROTOTYPE_MAP_INDEX);
- Node* const proto_map = LoadMap(CAST(LoadMapPrototype(map)));
- Node* const proto_has_initialmap =
- WordEqual(proto_map, initial_proto_initial_map);
+ CSA_ASSERT(this, TaggedIsNotSmi(result_promise));
+ CSA_ASSERT(this, IsJSPromise(result_promise));
- Branch(proto_has_initialmap, if_isunmodified, if_ismodified);
+ PerformPromiseThen(context, promise, on_fulfilled, on_rejected,
+ result_promise);
+ Return(result_promise);
}
-Node* PromiseBuiltinsAssembler::AllocatePromiseResolveThenableJobInfo(
- Node* thenable, Node* then, Node* resolve, Node* reject, Node* context) {
- Node* const info = Allocate(PromiseResolveThenableJobInfo::kSize);
- StoreMapNoWriteBarrier(info,
- Heap::kPromiseResolveThenableJobInfoMapRootIndex);
+Node* PromiseBuiltinsAssembler::AllocatePromiseReaction(Node* next,
+ Node* payload,
+ Node* fulfill_handler,
+ Node* reject_handler) {
+ Node* const reaction = Allocate(PromiseReaction::kSize);
+ StoreMapNoWriteBarrier(reaction, Heap::kPromiseReactionMapRootIndex);
+ StoreObjectFieldNoWriteBarrier(reaction, PromiseReaction::kNextOffset, next);
+ StoreObjectFieldNoWriteBarrier(reaction, PromiseReaction::kPayloadOffset,
+ payload);
+ StoreObjectFieldNoWriteBarrier(
+ reaction, PromiseReaction::kFulfillHandlerOffset, fulfill_handler);
StoreObjectFieldNoWriteBarrier(
- info, PromiseResolveThenableJobInfo::kThenableOffset, thenable);
+ reaction, PromiseReaction::kRejectHandlerOffset, reject_handler);
+ return reaction;
+}
+
+Node* PromiseBuiltinsAssembler::AllocatePromiseReactionJobTask(
+ Node* map, Node* context, Node* argument, Node* handler, Node* payload) {
+ Node* const microtask = Allocate(PromiseReactionJobTask::kSize);
+ StoreMapNoWriteBarrier(microtask, map);
StoreObjectFieldNoWriteBarrier(
- info, PromiseResolveThenableJobInfo::kThenOffset, then);
+ microtask, PromiseReactionJobTask::kArgumentOffset, argument);
StoreObjectFieldNoWriteBarrier(
- info, PromiseResolveThenableJobInfo::kResolveOffset, resolve);
+ microtask, PromiseReactionJobTask::kContextOffset, context);
StoreObjectFieldNoWriteBarrier(
- info, PromiseResolveThenableJobInfo::kRejectOffset, reject);
+ microtask, PromiseReactionJobTask::kHandlerOffset, handler);
StoreObjectFieldNoWriteBarrier(
- info, PromiseResolveThenableJobInfo::kContextOffset, context);
- return info;
+ microtask, PromiseReactionJobTask::kPayloadOffset, payload);
+ return microtask;
}
-void PromiseBuiltinsAssembler::InternalResolvePromise(Node* context,
- Node* promise,
- Node* result) {
- Isolate* isolate = this->isolate();
-
- VARIABLE(var_reason, MachineRepresentation::kTagged);
- VARIABLE(var_then, MachineRepresentation::kTagged);
-
- Label do_enqueue(this), fulfill(this), if_nocycle(this),
- if_cycle(this, Label::kDeferred),
- if_rejectpromise(this, Label::kDeferred), out(this);
-
- Label cycle_check(this);
- GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &cycle_check);
- CallRuntime(Runtime::kPromiseHookResolve, context, promise);
- Goto(&cycle_check);
-
- BIND(&cycle_check);
- // 6. If SameValue(resolution, promise) is true, then
- BranchIfSameValue(promise, result, &if_cycle, &if_nocycle);
- BIND(&if_nocycle);
-
- // 7. If Type(resolution) is not Object, then
- GotoIf(TaggedIsSmi(result), &fulfill);
- GotoIfNot(IsJSReceiver(result), &fulfill);
+Node* PromiseBuiltinsAssembler::AllocatePromiseReactionJobTask(
+ Heap::RootListIndex map_root_index, Node* context, Node* argument,
+ Node* handler, Node* payload) {
+ DCHECK(map_root_index == Heap::kPromiseFulfillReactionJobTaskMapRootIndex ||
+ map_root_index == Heap::kPromiseRejectReactionJobTaskMapRootIndex);
+ Node* const map = LoadRoot(map_root_index);
+ return AllocatePromiseReactionJobTask(map, context, argument, handler,
+ payload);
+}
- Label if_nativepromise(this), if_notnativepromise(this, Label::kDeferred);
- Node* const native_context = LoadNativeContext(context);
- Node* const promise_fun =
- LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
- BranchIfFastPath(native_context, promise_fun, result, &if_nativepromise,
- &if_notnativepromise);
+Node* PromiseBuiltinsAssembler::AllocatePromiseResolveThenableJobTask(
+ Node* promise_to_resolve, Node* then, Node* thenable, Node* context) {
+ Node* const microtask = Allocate(PromiseResolveThenableJobTask::kSize);
+ StoreMapNoWriteBarrier(microtask,
+ Heap::kPromiseResolveThenableJobTaskMapRootIndex);
+ StoreObjectFieldNoWriteBarrier(
+ microtask, PromiseResolveThenableJobTask::kContextOffset, context);
+ StoreObjectFieldNoWriteBarrier(
+ microtask, PromiseResolveThenableJobTask::kPromiseToResolveOffset,
+ promise_to_resolve);
+ StoreObjectFieldNoWriteBarrier(
+ microtask, PromiseResolveThenableJobTask::kThenOffset, then);
+ StoreObjectFieldNoWriteBarrier(
+ microtask, PromiseResolveThenableJobTask::kThenableOffset, thenable);
+ return microtask;
+}
- // Resolution is a native promise and if it's already resolved or
- // rejected, shortcircuit the resolution procedure by directly
- // reusing the value from the promise.
- BIND(&if_nativepromise);
+// ES #sec-triggerpromisereactions
+Node* PromiseBuiltinsAssembler::TriggerPromiseReactions(
+ Node* context, Node* reactions, Node* argument,
+ PromiseReaction::Type type) {
+ // We need to reverse the {reactions} here, since we record them on the
+ // JSPromise in the reverse order.
{
- Node* const thenable_status = PromiseStatus(result);
- Node* const thenable_value =
- LoadObjectField(result, JSPromise::kResultOffset);
+ VARIABLE(var_current, MachineRepresentation::kTagged, reactions);
+ VARIABLE(var_reversed, MachineRepresentation::kTagged,
+ SmiConstant(Smi::kZero));
- Label if_isnotpending(this);
- GotoIfNot(IsPromiseStatus(thenable_status, v8::Promise::kPending),
- &if_isnotpending);
-
- // TODO(gsathya): Use a marker here instead of the actual then
- // callback, and check for the marker in PromiseResolveThenableJob
- // and perform PromiseThen.
- Node* const then =
- LoadContextElement(native_context, Context::PROMISE_THEN_INDEX);
- var_then.Bind(then);
- Goto(&do_enqueue);
-
- BIND(&if_isnotpending);
+ Label loop(this, {&var_current, &var_reversed}), done_loop(this);
+ Goto(&loop);
+ BIND(&loop);
{
- Label if_fulfilled(this), if_rejected(this);
- Branch(IsPromiseStatus(thenable_status, v8::Promise::kFulfilled),
- &if_fulfilled, &if_rejected);
-
- BIND(&if_fulfilled);
- {
- PromiseFulfill(context, promise, thenable_value,
- v8::Promise::kFulfilled);
- PromiseSetHasHandler(promise);
- Goto(&out);
- }
-
- BIND(&if_rejected);
- {
- Label reject(this);
- Node* const has_handler = PromiseHasHandler(result);
-
- // Promise has already been rejected, but had no handler.
- // Revoke previously triggered reject event.
- GotoIf(has_handler, &reject);
- CallRuntime(Runtime::kPromiseRevokeReject, context, result);
- Goto(&reject);
-
- BIND(&reject);
- // Don't cause a debug event as this case is forwarding a rejection.
- InternalPromiseReject(context, promise, thenable_value, false);
- PromiseSetHasHandler(result);
- Goto(&out);
- }
+ Node* current = var_current.value();
+ GotoIf(TaggedIsSmi(current), &done_loop);
+ var_current.Bind(LoadObjectField(current, PromiseReaction::kNextOffset));
+ StoreObjectField(current, PromiseReaction::kNextOffset,
+ var_reversed.value());
+ var_reversed.Bind(current);
+ Goto(&loop);
}
+ BIND(&done_loop);
+ reactions = var_reversed.value();
}
- BIND(&if_notnativepromise);
+ // Morph the {reactions} into PromiseReactionJobTasks and push them
+ // onto the microtask queue.
{
- // 8. Let then be Get(resolution, "then").
- Node* const then =
- GetProperty(context, result, isolate->factory()->then_string());
-
- // 9. If then is an abrupt completion, then
- GotoIfException(then, &if_rejectpromise, &var_reason);
+ VARIABLE(var_current, MachineRepresentation::kTagged, reactions);
- // 11. If IsCallable(thenAction) is false, then
- GotoIf(TaggedIsSmi(then), &fulfill);
- Node* const then_map = LoadMap(then);
- GotoIfNot(IsCallableMap(then_map), &fulfill);
- var_then.Bind(then);
- Goto(&do_enqueue);
+ Label loop(this, {&var_current}), done_loop(this);
+ Goto(&loop);
+ BIND(&loop);
+ {
+ Node* current = var_current.value();
+ GotoIf(TaggedIsSmi(current), &done_loop);
+ var_current.Bind(LoadObjectField(current, PromiseReaction::kNextOffset));
+
+ // Morph {current} from a PromiseReaction into a PromiseReactionJobTask
+ // and schedule that on the microtask queue. We try to minimize the number
+ // of stores here to avoid screwing up the store buffer.
+ STATIC_ASSERT(PromiseReaction::kSize == PromiseReactionJobTask::kSize);
+ if (type == PromiseReaction::kFulfill) {
+ StoreMapNoWriteBarrier(
+ current, Heap::kPromiseFulfillReactionJobTaskMapRootIndex);
+ StoreObjectField(current, PromiseReactionJobTask::kArgumentOffset,
+ argument);
+ StoreObjectField(current, PromiseReactionJobTask::kContextOffset,
+ context);
+ STATIC_ASSERT(PromiseReaction::kFulfillHandlerOffset ==
+ PromiseReactionJobTask::kHandlerOffset);
+ STATIC_ASSERT(PromiseReaction::kPayloadOffset ==
+ PromiseReactionJobTask::kPayloadOffset);
+ } else {
+ Node* handler =
+ LoadObjectField(current, PromiseReaction::kRejectHandlerOffset);
+ StoreMapNoWriteBarrier(current,
+ Heap::kPromiseRejectReactionJobTaskMapRootIndex);
+ StoreObjectField(current, PromiseReactionJobTask::kArgumentOffset,
+ argument);
+ StoreObjectField(current, PromiseReactionJobTask::kContextOffset,
+ context);
+ StoreObjectField(current, PromiseReactionJobTask::kHandlerOffset,
+ handler);
+ STATIC_ASSERT(PromiseReaction::kPayloadOffset ==
+ PromiseReactionJobTask::kPayloadOffset);
+ }
+ CallBuiltin(Builtins::kEnqueueMicrotask, NoContextConstant(), current);
+ Goto(&loop);
+ }
+ BIND(&done_loop);
}
- BIND(&do_enqueue);
- {
- // TODO(gsathya): Add fast path for native promises with unmodified
- // PromiseThen (which don't need these resolving functions, but
- // instead can just call resolve/reject directly).
- Node* resolve = nullptr;
- Node* reject = nullptr;
- std::tie(resolve, reject) = CreatePromiseResolvingFunctions(
- promise, FalseConstant(), native_context);
-
- Node* const info = AllocatePromiseResolveThenableJobInfo(
- result, var_then.value(), resolve, reject, context);
-
- Label enqueue(this);
- GotoIfNot(IsDebugActive(), &enqueue);
-
- GotoIf(TaggedIsSmi(result), &enqueue);
- GotoIfNot(HasInstanceType(result, JS_PROMISE_TYPE), &enqueue);
-
- // Mark the dependency of the new promise on the resolution
- Node* const key =
- HeapConstant(isolate->factory()->promise_handled_by_symbol());
- CallRuntime(Runtime::kSetProperty, context, result, key, promise,
- SmiConstant(LanguageMode::kStrict));
- Goto(&enqueue);
-
- // 12. Perform EnqueueJob("PromiseJobs",
- // PromiseResolveThenableJob, « promise, resolution, thenAction»).
- BIND(&enqueue);
- CallBuiltin(Builtins::kEnqueueMicrotask, NoContextConstant(), info);
- Goto(&out);
- }
+ return UndefinedConstant();
+}
- // 7.b Return FulfillPromise(promise, resolution).
- BIND(&fulfill);
- {
- PromiseFulfill(context, promise, result, v8::Promise::kFulfilled);
- Goto(&out);
- }
+template <typename... TArgs>
+Node* PromiseBuiltinsAssembler::InvokeThen(Node* native_context, Node* receiver,
+ TArgs... args) {
+ CSA_ASSERT(this, IsNativeContext(native_context));
- BIND(&if_cycle);
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+ Label if_fast(this), if_slow(this, Label::kDeferred), done(this, &var_result);
+ GotoIf(TaggedIsSmi(receiver), &if_slow);
+ Node* const receiver_map = LoadMap(receiver);
+ // We can skip the "then" lookup on {receiver} if it's [[Prototype]]
+ // is the (initial) Promise.prototype and the Promise#then protector
+ // is intact, as that guards the lookup path for the "then" property
+ // on JSPromise instances which have the (initial) %PromisePrototype%.
+ BranchIfPromiseThenLookupChainIntact(native_context, receiver_map, &if_fast,
+ &if_slow);
+
+ BIND(&if_fast);
{
- // 6.a Let selfResolutionError be a newly created TypeError object.
- Node* const message_id = SmiConstant(MessageTemplate::kPromiseCyclic);
- Node* const error =
- CallRuntime(Runtime::kNewTypeError, context, message_id, result);
- var_reason.Bind(error);
-
- // 6.b Return RejectPromise(promise, selfResolutionError).
- Goto(&if_rejectpromise);
+ Node* const then =
+ LoadContextElement(native_context, Context::PROMISE_THEN_INDEX);
+ Node* const result =
+ CallJS(CodeFactory::CallFunction(
+ isolate(), ConvertReceiverMode::kNotNullOrUndefined),
+ native_context, then, receiver, args...);
+ var_result.Bind(result);
+ Goto(&done);
}
- // 9.a Return RejectPromise(promise, then.[[Value]]).
- BIND(&if_rejectpromise);
+ BIND(&if_slow);
{
- // Don't cause a debug event as this case is forwarding a rejection.
- InternalPromiseReject(context, promise, var_reason.value(), false);
- Goto(&out);
+ Node* const then = GetProperty(native_context, receiver,
+ isolate()->factory()->then_string());
+ Node* const result = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
+ native_context, then, receiver, args...);
+ var_result.Bind(result);
+ Goto(&done);
}
- BIND(&out);
+ BIND(&done);
+ return var_result.value();
}
-void PromiseBuiltinsAssembler::PromiseFulfill(
- Node* context, Node* promise, Node* result,
- v8::Promise::PromiseState status) {
- Label do_promisereset(this);
-
- Node* const deferred_promise =
- LoadObjectField(promise, JSPromise::kDeferredPromiseOffset);
-
- GotoIf(IsUndefined(deferred_promise), &do_promisereset);
-
- Node* const tasks =
- status == v8::Promise::kFulfilled
- ? LoadObjectField(promise, JSPromise::kFulfillReactionsOffset)
- : LoadObjectField(promise, JSPromise::kRejectReactionsOffset);
-
- Node* const deferred_on_resolve =
- LoadObjectField(promise, JSPromise::kDeferredOnResolveOffset);
- Node* const deferred_on_reject =
- LoadObjectField(promise, JSPromise::kDeferredOnRejectOffset);
+void PromiseBuiltinsAssembler::BranchIfPromiseSpeciesLookupChainIntact(
+ Node* native_context, Node* promise_map, Label* if_fast, Label* if_slow) {
+ CSA_ASSERT(this, IsNativeContext(native_context));
+ CSA_ASSERT(this, IsJSPromiseMap(promise_map));
- Node* const info = AllocatePromiseReactionJobInfo(
- result, tasks, deferred_promise, deferred_on_resolve, deferred_on_reject,
- context);
+ Node* const promise_prototype =
+ LoadContextElement(native_context, Context::PROMISE_PROTOTYPE_INDEX);
+ GotoIfForceSlowPath(if_slow);
+ GotoIfNot(WordEqual(LoadMapPrototype(promise_map), promise_prototype),
+ if_slow);
+ Branch(IsSpeciesProtectorCellInvalid(), if_slow, if_fast);
+}
- CallBuiltin(Builtins::kEnqueueMicrotask, NoContextConstant(), info);
- Goto(&do_promisereset);
+void PromiseBuiltinsAssembler::BranchIfPromiseThenLookupChainIntact(
+ Node* native_context, Node* receiver_map, Label* if_fast, Label* if_slow) {
+ CSA_ASSERT(this, IsMap(receiver_map));
+ CSA_ASSERT(this, IsNativeContext(native_context));
- BIND(&do_promisereset);
- {
- PromiseSetStatus(promise, status);
- StoreObjectField(promise, JSPromise::kResultOffset, result);
- StoreObjectFieldRoot(promise, JSPromise::kDeferredPromiseOffset,
- Heap::kUndefinedValueRootIndex);
- StoreObjectFieldRoot(promise, JSPromise::kDeferredOnResolveOffset,
- Heap::kUndefinedValueRootIndex);
- StoreObjectFieldRoot(promise, JSPromise::kDeferredOnRejectOffset,
- Heap::kUndefinedValueRootIndex);
- StoreObjectFieldRoot(promise, JSPromise::kFulfillReactionsOffset,
- Heap::kUndefinedValueRootIndex);
- StoreObjectFieldRoot(promise, JSPromise::kRejectReactionsOffset,
- Heap::kUndefinedValueRootIndex);
- }
+ GotoIfForceSlowPath(if_slow);
+ GotoIfNot(IsJSPromiseMap(receiver_map), if_slow);
+ Node* const promise_prototype =
+ LoadContextElement(native_context, Context::PROMISE_PROTOTYPE_INDEX);
+ GotoIfNot(WordEqual(LoadMapPrototype(receiver_map), promise_prototype),
+ if_slow);
+ Branch(IsPromiseThenProtectorCellInvalid(), if_slow, if_fast);
}
void PromiseBuiltinsAssembler::BranchIfAccessCheckFailed(
@@ -878,43 +598,6 @@ void PromiseBuiltinsAssembler::BranchIfAccessCheckFailed(
BIND(&has_access);
}
-void PromiseBuiltinsAssembler::InternalPromiseReject(Node* context,
- Node* promise, Node* value,
- Node* debug_event) {
- Label out(this);
- GotoIfNot(IsDebugActive(), &out);
- GotoIfNot(WordEqual(TrueConstant(), debug_event), &out);
- CallRuntime(Runtime::kDebugPromiseReject, context, promise, value);
- Goto(&out);
-
- BIND(&out);
- InternalPromiseReject(context, promise, value, false);
-}
-
-// This duplicates a lot of logic from PromiseRejectEvent in
-// runtime-promise.cc
-void PromiseBuiltinsAssembler::InternalPromiseReject(Node* context,
- Node* promise, Node* value,
- bool debug_event) {
- Label fulfill(this), exit(this);
-
- GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &fulfill);
- if (debug_event) {
- CallRuntime(Runtime::kDebugPromiseReject, context, promise, value);
- }
- CallRuntime(Runtime::kPromiseHookResolve, context, promise);
- Goto(&fulfill);
-
- BIND(&fulfill);
- PromiseFulfill(context, promise, value, v8::Promise::kRejected);
-
- GotoIf(PromiseHasHandler(promise), &exit);
- CallRuntime(Runtime::kReportPromiseReject, context, promise, value);
- Goto(&exit);
-
- BIND(&exit);
-}
-
void PromiseBuiltinsAssembler::SetForwardingHandlerIfTrue(
Node* context, Node* condition, const NodeGenerator& object) {
Label done(this);
@@ -940,40 +623,52 @@ void PromiseBuiltinsAssembler::SetPromiseHandledByIfTrue(
BIND(&done);
}
-void PromiseBuiltinsAssembler::PerformFulfillClosure(Node* context, Node* value,
- bool should_resolve) {
- Label out(this);
+// ES #sec-promise-reject-functions
+TF_BUILTIN(PromiseCapabilityDefaultReject, PromiseBuiltinsAssembler) {
+ Node* const reason = Parameter(Descriptor::kReason);
+ Node* const context = Parameter(Descriptor::kContext);
// 2. Let promise be F.[[Promise]].
- Node* const promise_slot = IntPtrConstant(kPromiseSlot);
- Node* const promise = LoadContextElement(context, promise_slot);
-
- // We use `undefined` as a marker to know that this callback was
- // already called.
- GotoIf(IsUndefined(promise), &out);
+ Node* const promise = LoadContextElement(context, kPromiseSlot);
- if (should_resolve) {
- InternalResolvePromise(context, promise, value);
- } else {
- Node* const debug_event =
- LoadContextElement(context, IntPtrConstant(kDebugEventSlot));
- InternalPromiseReject(context, promise, value, debug_event);
- }
+ // 3. Let alreadyResolved be F.[[AlreadyResolved]].
+ // 4. If alreadyResolved.[[Value]] is true, return undefined.
+ // We use undefined as a marker for the [[AlreadyResolved]] state.
+ ReturnIf(IsUndefined(promise), UndefinedConstant());
- StoreContextElement(context, promise_slot, UndefinedConstant());
- Goto(&out);
+ // 5. Set alreadyResolved.[[Value]] to true.
+ StoreContextElementNoWriteBarrier(context, kPromiseSlot, UndefinedConstant());
- BIND(&out);
+ // 6. Return RejectPromise(promise, reason).
+ Node* const debug_event = LoadContextElement(context, kDebugEventSlot);
+ Return(CallBuiltin(Builtins::kRejectPromise, context, promise, reason,
+ debug_event));
}
-// ES#sec-promise-reject-functions
-// Promise Reject Functions
-TF_BUILTIN(PromiseRejectClosure, PromiseBuiltinsAssembler) {
- Node* const value = Parameter(Descriptor::kValue);
+// ES #sec-promise-resolve-functions
+TF_BUILTIN(PromiseCapabilityDefaultResolve, PromiseBuiltinsAssembler) {
+ Node* const resolution = Parameter(Descriptor::kResolution);
Node* const context = Parameter(Descriptor::kContext);
- PerformFulfillClosure(context, value, false);
- Return(UndefinedConstant());
+ // 2. Let promise be F.[[Promise]].
+ Node* const promise = LoadContextElement(context, kPromiseSlot);
+
+ // 3. Let alreadyResolved be F.[[AlreadyResolved]].
+ // 4. If alreadyResolved.[[Value]] is true, return undefined.
+ // We use undefined as a marker for the [[AlreadyResolved]] state.
+ ReturnIf(IsUndefined(promise), UndefinedConstant());
+
+ // 5. Set alreadyResolved.[[Value]] to true.
+ StoreContextElementNoWriteBarrier(context, kPromiseSlot, UndefinedConstant());
+
+ // The rest of the logic (and the catch prediction) is
+ // encapsulated in the dedicated ResolvePromise builtin.
+ Return(CallBuiltin(Builtins::kResolvePromise, context, promise, resolution));
+}
+
+TF_BUILTIN(PromiseConstructorLazyDeoptContinuation, PromiseBuiltinsAssembler) {
+ Node* promise = Parameter(Descriptor::kPromise);
+ Return(promise);
}
// ES6 #sec-promise-executor
@@ -1089,231 +784,357 @@ TF_BUILTIN(PromiseConstructor, PromiseBuiltinsAssembler) {
}
}
+// V8 Extras: v8.createPromise(parent)
TF_BUILTIN(PromiseInternalConstructor, PromiseBuiltinsAssembler) {
Node* const parent = Parameter(Descriptor::kParent);
Node* const context = Parameter(Descriptor::kContext);
Return(AllocateAndInitJSPromise(context, parent));
}
+// V8 Extras: v8.rejectPromise(promise, reason)
+TF_BUILTIN(PromiseInternalReject, PromiseBuiltinsAssembler) {
+ Node* const promise = Parameter(Descriptor::kPromise);
+ Node* const reason = Parameter(Descriptor::kReason);
+ Node* const context = Parameter(Descriptor::kContext);
+ // We pass true to trigger the debugger's on exception handler.
+ Return(CallBuiltin(Builtins::kRejectPromise, context, promise, reason,
+ TrueConstant()));
+}
+
+// V8 Extras: v8.resolvePromise(promise, resolution)
+TF_BUILTIN(PromiseInternalResolve, PromiseBuiltinsAssembler) {
+ Node* const promise = Parameter(Descriptor::kPromise);
+ Node* const resolution = Parameter(Descriptor::kResolution);
+ Node* const context = Parameter(Descriptor::kContext);
+ Return(CallBuiltin(Builtins::kResolvePromise, context, promise, resolution));
+}
+
// ES#sec-promise.prototype.then
// Promise.prototype.then ( onFulfilled, onRejected )
TF_BUILTIN(PromisePrototypeThen, PromiseBuiltinsAssembler) {
// 1. Let promise be the this value.
Node* const promise = Parameter(Descriptor::kReceiver);
- Node* const on_resolve = Parameter(Descriptor::kOnFullfilled);
- Node* const on_reject = Parameter(Descriptor::kOnRejected);
+ Node* const on_fulfilled = Parameter(Descriptor::kOnFulfilled);
+ Node* const on_rejected = Parameter(Descriptor::kOnRejected);
Node* const context = Parameter(Descriptor::kContext);
- Node* const result =
- InternalPromiseThen(context, promise, on_resolve, on_reject);
- Return(result);
-}
+ // 2. If IsPromise(promise) is false, throw a TypeError exception.
+ ThrowIfNotInstanceType(context, promise, JS_PROMISE_TYPE,
+ "Promise.prototype.then");
-// ES#sec-promise-resolve-functions
-// Promise Resolve Functions
-TF_BUILTIN(PromiseResolveClosure, PromiseBuiltinsAssembler) {
- Node* const value = Parameter(Descriptor::kValue);
- Node* const context = Parameter(Descriptor::kContext);
+ // 3. Let C be ? SpeciesConstructor(promise, %Promise%).
+ Label fast_promise_capability(this), slow_constructor(this, Label::kDeferred),
+ slow_promise_capability(this, Label::kDeferred);
+ Node* const native_context = LoadNativeContext(context);
+ Node* const promise_fun =
+ LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+ Node* const promise_map = LoadMap(promise);
+ BranchIfPromiseSpeciesLookupChainIntact(
+ native_context, promise_map, &fast_promise_capability, &slow_constructor);
- PerformFulfillClosure(context, value, true);
- Return(UndefinedConstant());
-}
+ BIND(&slow_constructor);
+ Node* const constructor =
+ SpeciesConstructor(native_context, promise, promise_fun);
+ Branch(WordEqual(constructor, promise_fun), &fast_promise_capability,
+ &slow_promise_capability);
-// ES #sec-fulfillpromise
-TF_BUILTIN(ResolvePromise, PromiseBuiltinsAssembler) {
- Node* const promise = Parameter(Descriptor::kPromise);
- Node* const result = Parameter(Descriptor::kValue);
- Node* const context = Parameter(Descriptor::kContext);
+ // 4. Let resultCapability be ? NewPromiseCapability(C).
+ Label perform_promise_then(this);
+ VARIABLE(var_result_promise, MachineRepresentation::kTagged);
+ VARIABLE(var_result_promise_or_capability, MachineRepresentation::kTagged);
- InternalResolvePromise(context, promise, result);
- Return(UndefinedConstant());
+ BIND(&fast_promise_capability);
+ {
+ Node* const result_promise = AllocateAndInitJSPromise(context, promise);
+ var_result_promise_or_capability.Bind(result_promise);
+ var_result_promise.Bind(result_promise);
+ Goto(&perform_promise_then);
+ }
+
+ BIND(&slow_promise_capability);
+ {
+ Node* const debug_event = TrueConstant();
+ Node* const capability = CallBuiltin(Builtins::kNewPromiseCapability,
+ context, constructor, debug_event);
+ var_result_promise.Bind(
+ LoadObjectField(capability, PromiseCapability::kPromiseOffset));
+ var_result_promise_or_capability.Bind(capability);
+ Goto(&perform_promise_then);
+ }
+
+ // 5. Return PerformPromiseThen(promise, onFulfilled, onRejected,
+ // resultCapability).
+ BIND(&perform_promise_then);
+ {
+ // We do some work of the PerformPromiseThen operation here, in that
+ // we check the handlers and turn non-callable handlers into undefined.
+ // This is because this is the one and only callsite of PerformPromiseThen
+ // that has to do this.
+
+ // 3. If IsCallable(onFulfilled) is false, then
+ // a. Set onFulfilled to undefined.
+ VARIABLE(var_on_fulfilled, MachineRepresentation::kTagged, on_fulfilled);
+ Label if_fulfilled_done(this), if_fulfilled_notcallable(this);
+ GotoIf(TaggedIsSmi(on_fulfilled), &if_fulfilled_notcallable);
+ Branch(IsCallable(on_fulfilled), &if_fulfilled_done,
+ &if_fulfilled_notcallable);
+ BIND(&if_fulfilled_notcallable);
+ var_on_fulfilled.Bind(UndefinedConstant());
+ Goto(&if_fulfilled_done);
+ BIND(&if_fulfilled_done);
+
+ // 4. If IsCallable(onRejected) is false, then
+ // a. Set onRejected to undefined.
+ VARIABLE(var_on_rejected, MachineRepresentation::kTagged, on_rejected);
+ Label if_rejected_done(this), if_rejected_notcallable(this);
+ GotoIf(TaggedIsSmi(on_rejected), &if_rejected_notcallable);
+ Branch(IsCallable(on_rejected), &if_rejected_done,
+ &if_rejected_notcallable);
+ BIND(&if_rejected_notcallable);
+ var_on_rejected.Bind(UndefinedConstant());
+ Goto(&if_rejected_done);
+ BIND(&if_rejected_done);
+
+ PerformPromiseThen(context, promise, var_on_fulfilled.value(),
+ var_on_rejected.value(),
+ var_result_promise_or_capability.value());
+ Return(var_result_promise.value());
+ }
}
-TF_BUILTIN(PromiseHandleReject, PromiseBuiltinsAssembler) {
- Node* const promise = Parameter(Descriptor::kPromise);
- Node* const on_reject = Parameter(Descriptor::kOnReject);
- Node* const exception = Parameter(Descriptor::kException);
+// ES#sec-promise.prototype.catch
+// Promise.prototype.catch ( onRejected )
+TF_BUILTIN(PromisePrototypeCatch, PromiseBuiltinsAssembler) {
+ // 1. Let promise be the this value.
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const on_fulfilled = UndefinedConstant();
+ Node* const on_rejected = Parameter(Descriptor::kOnRejected);
Node* const context = Parameter(Descriptor::kContext);
- VARIABLE(var_unused, MachineRepresentation::kTagged);
+ // 2. Return ? Invoke(promise, "then", « undefined, onRejected »).
+ Node* const native_context = LoadNativeContext(context);
+ Return(InvokeThen(native_context, receiver, on_fulfilled, on_rejected));
+}
- Label if_internalhandler(this), if_customhandler(this, Label::kDeferred);
- Branch(IsUndefined(on_reject), &if_internalhandler, &if_customhandler);
+// ES #sec-promiseresolvethenablejob
+TF_BUILTIN(PromiseResolveThenableJob, PromiseBuiltinsAssembler) {
+ Node* const native_context = Parameter(Descriptor::kContext);
+ Node* const promise_to_resolve = Parameter(Descriptor::kPromiseToResolve);
+ Node* const thenable = Parameter(Descriptor::kThenable);
+ Node* const then = Parameter(Descriptor::kThen);
+
+ CSA_ASSERT(this, TaggedIsNotSmi(thenable));
+ CSA_ASSERT(this, IsJSReceiver(thenable));
+ CSA_ASSERT(this, IsJSPromise(promise_to_resolve));
+ CSA_ASSERT(this, IsNativeContext(native_context));
- BIND(&if_internalhandler);
+ // We can use a simple optimization here if we know that {then} is the initial
+ // Promise.prototype.then method, and {thenable} is a JSPromise whose
+ // @@species lookup chain is intact: We can connect {thenable} and
+ // {promise_to_resolve} directly in that case and avoid the allocation of a
+ // temporary JSPromise and the closures plus context.
+ //
+ // We take the generic (slow-)path if a PromiseHook is enabled or the debugger
+ // is active, to make sure we expose spec compliant behavior.
+ Label if_fast(this), if_slow(this, Label::kDeferred);
+ Node* const promise_then =
+ LoadContextElement(native_context, Context::PROMISE_THEN_INDEX);
+ GotoIfNot(WordEqual(then, promise_then), &if_slow);
+ Node* const thenable_map = LoadMap(thenable);
+ GotoIfNot(IsJSPromiseMap(thenable_map), &if_slow);
+ GotoIf(IsPromiseHookEnabledOrDebugIsActive(), &if_slow);
+ BranchIfPromiseSpeciesLookupChainIntact(native_context, thenable_map,
+ &if_fast, &if_slow);
+
+ BIND(&if_fast);
{
- InternalPromiseReject(context, promise, exception, false);
- Return(UndefinedConstant());
+ // We know that the {thenable} is a JSPromise, which doesn't require
+ // any special treatment and that {then} corresponds to the initial
+ // Promise.prototype.then method. So instead of allocating a temporary
+ // JSPromise to connect the {thenable} with the {promise_to_resolve},
+ // we can directly schedule the {promise_to_resolve} with default
+ // handlers onto the {thenable} promise. This does not only save the
+ // JSPromise allocation, but also avoids the allocation of the two
+ // resolving closures and the shared context.
+ //
+ // What happens normally in this case is
+ //
+ // resolve, reject = CreateResolvingFunctions(promise_to_resolve)
+ // result_capability = NewPromiseCapability(%Promise%)
+ // PerformPromiseThen(thenable, resolve, reject, result_capability)
+ //
+ // which means that PerformPromiseThen will either schedule a new
+ // PromiseReaction with resolve and reject or a PromiseReactionJob
+ // with resolve or reject based on the state of {thenable}. And
+ // resolve or reject will just invoke the default [[Resolve]] or
+ // [[Reject]] functions on the {promise_to_resolve}.
+ //
+ // This is the same as just doing
+ //
+ // PerformPromiseThen(thenable, undefined, undefined, promise_to_resolve)
+ //
+ // which performs exactly the same (observable) steps.
+ TailCallBuiltin(Builtins::kPerformPromiseThen, native_context, thenable,
+ UndefinedConstant(), UndefinedConstant(),
+ promise_to_resolve);
}
- BIND(&if_customhandler);
+ BIND(&if_slow);
{
+ Node* resolve = nullptr;
+ Node* reject = nullptr;
+ std::tie(resolve, reject) = CreatePromiseResolvingFunctions(
+ promise_to_resolve, FalseConstant(), native_context);
+
+ Label if_exception(this, Label::kDeferred);
VARIABLE(var_exception, MachineRepresentation::kTagged, TheHoleConstant());
- Label if_exception(this);
- Node* const ret = CallJS(
- CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
- context, on_reject, UndefinedConstant(), exception);
- GotoIfException(ret, &if_exception, &var_exception);
- Return(UndefinedConstant());
+ Node* const result = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
+ native_context, then, thenable, resolve, reject);
+ GotoIfException(result, &if_exception, &var_exception);
+ Return(result);
+
BIND(&if_exception);
- CallRuntime(Runtime::kReportMessage, context, var_exception.value());
- Return(UndefinedConstant());
+ {
+ // We need to reject the {thenable}.
+ Node* const result = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ native_context, UndefinedConstant(), var_exception.value());
+ Return(result);
+ }
}
}
-TF_BUILTIN(PromiseHandle, PromiseBuiltinsAssembler) {
- Node* const value = Parameter(Descriptor::kValue);
- Node* const handler = Parameter(Descriptor::kHandler);
- Node* const deferred_promise = Parameter(Descriptor::kDeferredPromise);
- Node* const deferred_on_resolve = Parameter(Descriptor::kDeferredOnResolve);
- Node* const deferred_on_reject = Parameter(Descriptor::kDeferredOnReject);
- Node* const context = Parameter(Descriptor::kContext);
- Isolate* isolate = this->isolate();
-
- VARIABLE(var_reason, MachineRepresentation::kTagged);
+// ES #sec-promisereactionjob
+void PromiseBuiltinsAssembler::PromiseReactionJob(Node* context, Node* argument,
+ Node* handler, Node* payload,
+ PromiseReaction::Type type) {
+ CSA_ASSERT(this, TaggedIsNotSmi(handler));
+ CSA_ASSERT(this, Word32Or(IsCallable(handler),
+ Word32Or(IsCode(handler), IsUndefined(handler))));
+ CSA_ASSERT(this, TaggedIsNotSmi(payload));
- Node* const is_debug_active = IsDebugActive();
- Label run_handler(this), if_rejectpromise(this), promisehook_before(this),
- promisehook_after(this), debug_pop(this);
+ VARIABLE(var_handler_result, MachineRepresentation::kTagged, argument);
+ Label if_handler_callable(this), if_fulfill(this), if_reject(this),
+ if_code_handler(this);
- GotoIfNot(is_debug_active, &promisehook_before);
- CallRuntime(Runtime::kDebugPushPromise, context, deferred_promise);
- Goto(&promisehook_before);
+ GotoIf(IsUndefined(handler),
+ type == PromiseReaction::kFulfill ? &if_fulfill : &if_reject);
+ Branch(IsCode(handler), &if_code_handler, &if_handler_callable);
- BIND(&promisehook_before);
+ BIND(&if_code_handler);
{
- GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &run_handler);
- CallRuntime(Runtime::kPromiseHookBefore, context, deferred_promise);
- Goto(&run_handler);
+ // The {handler} is a Code object that knows how to deal with
+ // the {payload} and the {argument}.
+ PromiseReactionHandlerDescriptor descriptor(isolate());
+ TailCallStub(descriptor, handler, context, argument, payload);
}
- BIND(&run_handler);
+ BIND(&if_handler_callable);
{
- Label if_defaulthandler(this), if_callablehandler(this),
- if_internalhandler(this), if_customhandler(this, Label::kDeferred);
- VARIABLE(var_result, MachineRepresentation::kTagged);
+ Node* const result = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ context, handler, UndefinedConstant(), argument);
+ GotoIfException(result, &if_reject, &var_handler_result);
+ var_handler_result.Bind(result);
+ Goto(&if_fulfill);
+ }
- Branch(IsSymbol(handler), &if_defaulthandler, &if_callablehandler);
+ BIND(&if_fulfill);
+ {
+ Label if_promise(this), if_promise_capability(this, Label::kDeferred);
+ Node* const value = var_handler_result.value();
+ Branch(IsPromiseCapability(payload), &if_promise_capability, &if_promise);
- BIND(&if_defaulthandler);
+ BIND(&if_promise);
{
- Label if_resolve(this), if_reject(this);
- Branch(IsPromiseDefaultResolveHandlerSymbol(handler), &if_resolve,
- &if_reject);
-
- BIND(&if_resolve);
- {
- var_result.Bind(value);
- Branch(IsUndefined(deferred_on_resolve), &if_internalhandler,
- &if_customhandler);
- }
-
- BIND(&if_reject);
- {
- var_reason.Bind(value);
- Goto(&if_rejectpromise);
- }
+ // For fast native promises we can skip the indirection
+ // via the promiseCapability.[[Resolve]] function and
+ // run the resolve logic directly from here.
+ TailCallBuiltin(Builtins::kResolvePromise, context, payload, value);
}
- BIND(&if_callablehandler);
+ BIND(&if_promise_capability);
{
+ // In the general case we need to call the (user provided)
+ // promiseCapability.[[Resolve]] function.
+ Node* const resolve =
+ LoadObjectField(payload, PromiseCapability::kResolveOffset);
Node* const result = CallJS(
- CodeFactory::Call(isolate, ConvertReceiverMode::kNullOrUndefined),
- context, handler, UndefinedConstant(), value);
- var_result.Bind(result);
- GotoIfException(result, &if_rejectpromise, &var_reason);
- Branch(IsUndefined(deferred_on_resolve), &if_internalhandler,
- &if_customhandler);
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ context, resolve, UndefinedConstant(), value);
+ GotoIfException(result, &if_reject, &var_handler_result);
+ Return(result);
}
+ }
- BIND(&if_internalhandler);
- InternalResolvePromise(context, deferred_promise, var_result.value());
- Goto(&promisehook_after);
+ BIND(&if_reject);
+ if (type == PromiseReaction::kReject) {
+ Label if_promise(this), if_promise_capability(this, Label::kDeferred);
+ Node* const reason = var_handler_result.value();
+ Branch(IsPromiseCapability(payload), &if_promise_capability, &if_promise);
- BIND(&if_customhandler);
+ BIND(&if_promise);
{
- Node* const maybe_exception = CallJS(
- CodeFactory::Call(isolate, ConvertReceiverMode::kNullOrUndefined),
- context, deferred_on_resolve, UndefinedConstant(),
- var_result.value());
- GotoIfException(maybe_exception, &if_rejectpromise, &var_reason);
- Goto(&promisehook_after);
+ // For fast native promises we can skip the indirection
+ // via the promiseCapability.[[Reject]] function and
+ // run the resolve logic directly from here.
+ TailCallBuiltin(Builtins::kRejectPromise, context, payload, reason,
+ FalseConstant());
}
- }
-
- BIND(&if_rejectpromise);
- {
- CallBuiltin(Builtins::kPromiseHandleReject, context, deferred_promise,
- deferred_on_reject, var_reason.value());
- Goto(&promisehook_after);
- }
-
- BIND(&promisehook_after);
- {
- GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &debug_pop);
- CallRuntime(Runtime::kPromiseHookAfter, context, deferred_promise);
- Goto(&debug_pop);
- }
-
- BIND(&debug_pop);
- {
- Label out(this);
- GotoIfNot(is_debug_active, &out);
- CallRuntime(Runtime::kDebugPopPromise, context);
- Goto(&out);
+ BIND(&if_promise_capability);
+ {
+ // In the general case we need to call the (user provided)
+ // promiseCapability.[[Reject]] function.
+ Label if_exception(this, Label::kDeferred);
+ VARIABLE(var_exception, MachineRepresentation::kTagged,
+ TheHoleConstant());
+ Node* const reject =
+ LoadObjectField(payload, PromiseCapability::kRejectOffset);
+ Node* const result = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ context, reject, UndefinedConstant(), reason);
+ GotoIfException(result, &if_exception, &var_exception);
+ Return(result);
- BIND(&out);
- Return(UndefinedConstant());
+ // Swallow the exception here.
+ BIND(&if_exception);
+ TailCallRuntime(Runtime::kReportMessage, context, var_exception.value());
+ }
+ } else {
+ // We have to call out to the dedicated PromiseRejectReactionJob builtin
+ // here, instead of just doing the work inline, as otherwise the catch
+ // predictions in the debugger will be wrong, which just walks the stack
+ // and checks for certain builtins.
+ TailCallBuiltin(Builtins::kPromiseRejectReactionJob, context,
+ var_handler_result.value(), UndefinedConstant(), payload);
}
}
-TF_BUILTIN(PromiseHandleJS, PromiseBuiltinsAssembler) {
+// ES #sec-promisereactionjob
+TF_BUILTIN(PromiseFulfillReactionJob, PromiseBuiltinsAssembler) {
+ Node* const context = Parameter(Descriptor::kContext);
Node* const value = Parameter(Descriptor::kValue);
Node* const handler = Parameter(Descriptor::kHandler);
- Node* const deferred_promise = Parameter(Descriptor::kDeferredPromise);
- Node* const deferred_on_resolve = Parameter(Descriptor::kDeferredOnResolve);
- Node* const deferred_on_reject = Parameter(Descriptor::kDeferredOnReject);
- Node* const context = Parameter(Descriptor::kContext);
+ Node* const payload = Parameter(Descriptor::kPayload);
- Node* const result =
- CallBuiltin(Builtins::kPromiseHandle, context, value, handler,
- deferred_promise, deferred_on_resolve, deferred_on_reject);
- Return(result);
+ PromiseReactionJob(context, value, handler, payload,
+ PromiseReaction::kFulfill);
}
-// ES#sec-promise.prototype.catch
-// Promise.prototype.catch ( onRejected )
-TF_BUILTIN(PromisePrototypeCatch, PromiseBuiltinsAssembler) {
- // 1. Let promise be the this value.
- Node* const promise = Parameter(Descriptor::kReceiver);
- Node* const on_resolve = UndefinedConstant();
- Node* const on_reject = Parameter(Descriptor::kOnRejected);
+// ES #sec-promisereactionjob
+TF_BUILTIN(PromiseRejectReactionJob, PromiseBuiltinsAssembler) {
Node* const context = Parameter(Descriptor::kContext);
+ Node* const reason = Parameter(Descriptor::kReason);
+ Node* const handler = Parameter(Descriptor::kHandler);
+ Node* const payload = Parameter(Descriptor::kPayload);
- Label if_internalthen(this), if_customthen(this, Label::kDeferred);
- GotoIf(TaggedIsSmi(promise), &if_customthen);
- BranchIfFastPath(context, promise, &if_internalthen, &if_customthen);
-
- BIND(&if_internalthen);
- {
- Node* const result =
- InternalPromiseThen(context, promise, on_resolve, on_reject);
- Return(result);
- }
-
- BIND(&if_customthen);
- {
- Node* const then =
- GetProperty(context, promise, isolate()->factory()->then_string());
- Node* const result = CallJS(
- CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
- context, then, promise, on_resolve, on_reject);
- Return(result);
- }
+ PromiseReactionJob(context, reason, handler, payload,
+ PromiseReaction::kReject);
}
-TF_BUILTIN(PromiseResolveWrapper, PromiseBuiltinsAssembler) {
+TF_BUILTIN(PromiseResolveTrampoline, PromiseBuiltinsAssembler) {
// 1. Let C be the this value.
Node* receiver = Parameter(Descriptor::kReceiver);
Node* value = Parameter(Descriptor::kValue);
@@ -1331,51 +1152,49 @@ TF_BUILTIN(PromiseResolve, PromiseBuiltinsAssembler) {
Node* constructor = Parameter(Descriptor::kConstructor);
Node* value = Parameter(Descriptor::kValue);
Node* context = Parameter(Descriptor::kContext);
- Isolate* isolate = this->isolate();
+
+ CSA_ASSERT(this, IsJSReceiver(constructor));
Node* const native_context = LoadNativeContext(context);
Node* const promise_fun =
LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
- Label if_value_is_native_promise(this),
- if_value_or_constructor_are_not_native_promise(this),
- if_need_to_allocate(this);
+ Label if_slow_constructor(this, Label::kDeferred), if_need_to_allocate(this);
+ // Check if {value} is a JSPromise.
GotoIf(TaggedIsSmi(value), &if_need_to_allocate);
-
- // This shortcircuits the constructor lookups.
- GotoIfNot(HasInstanceType(value, JS_PROMISE_TYPE), &if_need_to_allocate);
-
- // This adds a fast path as non-subclassed native promises don't have
- // an observable constructor lookup.
- BranchIfFastPath(native_context, promise_fun, value,
- &if_value_is_native_promise,
- &if_value_or_constructor_are_not_native_promise);
-
- BIND(&if_value_is_native_promise);
- {
- GotoIfNot(WordEqual(promise_fun, constructor),
- &if_value_or_constructor_are_not_native_promise);
- Return(value);
- }
+ Node* const value_map = LoadMap(value);
+ GotoIfNot(IsJSPromiseMap(value_map), &if_need_to_allocate);
+
+ // We can skip the "constructor" lookup on {value} if it's [[Prototype]]
+ // is the (initial) Promise.prototype and the @@species protector is
+ // intact, as that guards the lookup path for "constructor" on
+ // JSPromise instances which have the (initial) Promise.prototype.
+ Node* const promise_prototype =
+ LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+ GotoIfNot(WordEqual(LoadMapPrototype(value_map), promise_prototype),
+ &if_slow_constructor);
+ GotoIf(IsSpeciesProtectorCellInvalid(), &if_slow_constructor);
+
+ // If the {constructor} is the Promise function, we just immediately
+ // return the {value} here and don't bother wrapping it into a
+ // native Promise.
+ GotoIfNot(WordEqual(promise_fun, constructor), &if_slow_constructor);
+ Return(value);
// At this point, value or/and constructor are not native promises, but
// they could be of the same subclass.
- BIND(&if_value_or_constructor_are_not_native_promise);
+ BIND(&if_slow_constructor);
{
- Label if_return(this);
- Node* const xConstructor =
- GetProperty(context, value, isolate->factory()->constructor_string());
- BranchIfSameValue(xConstructor, constructor, &if_return,
- &if_need_to_allocate);
-
- BIND(&if_return);
+ Node* const value_constructor =
+ GetProperty(context, value, isolate()->factory()->constructor_string());
+ GotoIfNot(WordEqual(value_constructor, constructor), &if_need_to_allocate);
Return(value);
}
BIND(&if_need_to_allocate);
{
- Label if_nativepromise(this), if_notnativepromise(this);
+ Label if_nativepromise(this), if_notnativepromise(this, Label::kDeferred);
Branch(WordEqual(promise_fun, constructor), &if_nativepromise,
&if_notnativepromise);
@@ -1384,18 +1203,21 @@ TF_BUILTIN(PromiseResolve, PromiseBuiltinsAssembler) {
BIND(&if_nativepromise);
{
Node* const result = AllocateAndInitJSPromise(context);
- InternalResolvePromise(context, result, value);
+ CallBuiltin(Builtins::kResolvePromise, context, result, value);
Return(result);
}
BIND(&if_notnativepromise);
{
- Node* const capability = NewPromiseCapability(context, constructor);
+ Node* const debug_event = TrueConstant();
+ Node* const capability = CallBuiltin(Builtins::kNewPromiseCapability,
+ context, constructor, debug_event);
Node* const resolve =
LoadObjectField(capability, PromiseCapability::kResolveOffset);
- CallJS(CodeFactory::Call(isolate, ConvertReceiverMode::kNullOrUndefined),
- context, resolve, UndefinedConstant(), value);
+ CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ context, resolve, UndefinedConstant(), value);
Node* const result =
LoadObjectField(capability, PromiseCapability::kPromiseOffset);
@@ -1429,17 +1251,6 @@ TF_BUILTIN(PromiseGetCapabilitiesExecutor, PromiseBuiltinsAssembler) {
ThrowTypeError(context, MessageTemplate::kPromiseExecutorAlreadyInvoked);
}
-// ES6 #sec-newpromisecapability
-TF_BUILTIN(NewPromiseCapability, PromiseBuiltinsAssembler) {
- Node* constructor = Parameter(Descriptor::kConstructor);
- Node* debug_event = Parameter(Descriptor::kDebugEvent);
- Node* context = Parameter(Descriptor::kContext);
-
- CSA_ASSERT_JS_ARGC_EQ(this, 2);
-
- Return(NewPromiseCapability(context, constructor, debug_event));
-}
-
TF_BUILTIN(PromiseReject, PromiseBuiltinsAssembler) {
// 1. Let C be the this value.
Node* const receiver = Parameter(Descriptor::kReceiver);
@@ -1470,7 +1281,9 @@ TF_BUILTIN(PromiseReject, PromiseBuiltinsAssembler) {
BIND(&if_custompromise);
{
// 3. Let promiseCapability be ? NewPromiseCapability(C).
- Node* const capability = NewPromiseCapability(context, receiver);
+ Node* const debug_event = TrueConstant();
+ Node* const capability = CallBuiltin(Builtins::kNewPromiseCapability,
+ context, receiver, debug_event);
// 4. Perform ? Call(promiseCapability.[[Reject]], undefined, « r »).
Node* const reject =
@@ -1485,16 +1298,6 @@ TF_BUILTIN(PromiseReject, PromiseBuiltinsAssembler) {
}
}
-TF_BUILTIN(InternalPromiseReject, PromiseBuiltinsAssembler) {
- Node* const promise = Parameter(Descriptor::kPromise);
- Node* const reason = Parameter(Descriptor::kReason);
- Node* const debug_event = Parameter(Descriptor::kDebugEvent);
- Node* const context = Parameter(Descriptor::kContext);
-
- InternalPromiseReject(context, promise, reason, debug_event);
- Return(UndefinedConstant());
-}
-
std::pair<Node*, Node*> PromiseBuiltinsAssembler::CreatePromiseFinallyFunctions(
Node* on_finally, Node* constructor, Node* native_context) {
Node* const promise_context =
@@ -1565,16 +1368,11 @@ TF_BUILTIN(PromiseThenFinally, PromiseBuiltinsAssembler) {
CallBuiltin(Builtins::kPromiseResolve, context, constructor, result);
// 7. Let valueThunk be equivalent to a function that returns value.
- Node* native_context = LoadNativeContext(context);
+ Node* const native_context = LoadNativeContext(context);
Node* const value_thunk = CreateValueThunkFunction(value, native_context);
// 8. Return ? Invoke(promise, "then", « valueThunk »).
- Node* const promise_then =
- GetProperty(context, promise, factory()->then_string());
- Node* const result_promise = CallJS(
- CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
- context, promise_then, promise, value_thunk);
- Return(result_promise);
+ Return(InvokeThen(native_context, promise, value_thunk));
}
TF_BUILTIN(PromiseThrowerFinally, PromiseBuiltinsAssembler) {
@@ -1627,35 +1425,44 @@ TF_BUILTIN(PromiseCatchFinally, PromiseBuiltinsAssembler) {
CallBuiltin(Builtins::kPromiseResolve, context, constructor, result);
// 7. Let thrower be equivalent to a function that throws reason.
- Node* native_context = LoadNativeContext(context);
+ Node* const native_context = LoadNativeContext(context);
Node* const thrower = CreateThrowerFunction(reason, native_context);
// 8. Return ? Invoke(promise, "then", « thrower »).
- Node* const promise_then =
- GetProperty(context, promise, factory()->then_string());
- Node* const result_promise = CallJS(
- CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
- context, promise_then, promise, thrower);
- Return(result_promise);
+ Return(InvokeThen(native_context, promise, thrower));
}
TF_BUILTIN(PromisePrototypeFinally, PromiseBuiltinsAssembler) {
CSA_ASSERT_JS_ARGC_EQ(this, 1);
// 1. Let promise be the this value.
- Node* const promise = Parameter(Descriptor::kReceiver);
+ Node* const receiver = Parameter(Descriptor::kReceiver);
Node* const on_finally = Parameter(Descriptor::kOnFinally);
Node* const context = Parameter(Descriptor::kContext);
// 2. If Type(promise) is not Object, throw a TypeError exception.
- ThrowIfNotJSReceiver(context, promise, MessageTemplate::kCalledOnNonObject,
+ ThrowIfNotJSReceiver(context, receiver, MessageTemplate::kCalledOnNonObject,
"Promise.prototype.finally");
// 3. Let C be ? SpeciesConstructor(promise, %Promise%).
Node* const native_context = LoadNativeContext(context);
Node* const promise_fun =
LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
- Node* const constructor = SpeciesConstructor(context, promise, promise_fun);
+ VARIABLE(var_constructor, MachineRepresentation::kTagged, promise_fun);
+ Label slow_constructor(this, Label::kDeferred), done_constructor(this);
+ Node* const receiver_map = LoadMap(receiver);
+ GotoIfNot(IsJSPromiseMap(receiver_map), &slow_constructor);
+ BranchIfPromiseSpeciesLookupChainIntact(native_context, receiver_map,
+ &done_constructor, &slow_constructor);
+ BIND(&slow_constructor);
+ {
+ Node* const constructor =
+ SpeciesConstructor(context, receiver, promise_fun);
+ var_constructor.Bind(constructor);
+ Goto(&done_constructor);
+ }
+ BIND(&done_constructor);
+ Node* const constructor = var_constructor.value();
// 4. Assert: IsConstructor(C) is true.
CSA_ASSERT(this, IsConstructor(constructor));
@@ -1697,50 +1504,172 @@ TF_BUILTIN(PromisePrototypeFinally, PromiseBuiltinsAssembler) {
// 7. Return ? Invoke(promise, "then", « thenFinally, catchFinally »).
BIND(&perform_finally);
- Node* const promise_then =
- GetProperty(context, promise, factory()->then_string());
- Node* const result_promise = CallJS(
- CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
- context, promise_then, promise, var_then_finally.value(),
- var_catch_finally.value());
- Return(result_promise);
+ Return(InvokeThen(native_context, receiver, var_then_finally.value(),
+ var_catch_finally.value()));
}
-TF_BUILTIN(ResolveNativePromise, PromiseBuiltinsAssembler) {
+// ES #sec-fulfillpromise
+TF_BUILTIN(FulfillPromise, PromiseBuiltinsAssembler) {
Node* const promise = Parameter(Descriptor::kPromise);
Node* const value = Parameter(Descriptor::kValue);
Node* const context = Parameter(Descriptor::kContext);
- CSA_ASSERT(this, HasInstanceType(promise, JS_PROMISE_TYPE));
- InternalResolvePromise(context, promise, value);
- Return(UndefinedConstant());
+ CSA_ASSERT(this, TaggedIsNotSmi(promise));
+ CSA_ASSERT(this, IsJSPromise(promise));
+
+ // 2. Let reactions be promise.[[PromiseFulfillReactions]].
+ Node* const reactions =
+ LoadObjectField(promise, JSPromise::kReactionsOrResultOffset);
+
+ // 3. Set promise.[[PromiseResult]] to value.
+ // 4. Set promise.[[PromiseFulfillReactions]] to undefined.
+ // 5. Set promise.[[PromiseRejectReactions]] to undefined.
+ StoreObjectField(promise, JSPromise::kReactionsOrResultOffset, value);
+
+ // 6. Set promise.[[PromiseState]] to "fulfilled".
+ PromiseSetStatus(promise, Promise::kFulfilled);
+
+ // 7. Return TriggerPromiseReactions(reactions, value).
+ Return(TriggerPromiseReactions(context, reactions, value,
+ PromiseReaction::kFulfill));
}
-TF_BUILTIN(RejectNativePromise, PromiseBuiltinsAssembler) {
+// ES #sec-rejectpromise
+TF_BUILTIN(RejectPromise, PromiseBuiltinsAssembler) {
Node* const promise = Parameter(Descriptor::kPromise);
- Node* const value = Parameter(Descriptor::kValue);
+ Node* const reason = Parameter(Descriptor::kReason);
Node* const debug_event = Parameter(Descriptor::kDebugEvent);
Node* const context = Parameter(Descriptor::kContext);
- CSA_ASSERT(this, HasInstanceType(promise, JS_PROMISE_TYPE));
+ CSA_ASSERT(this, TaggedIsNotSmi(promise));
+ CSA_ASSERT(this, IsJSPromise(promise));
CSA_ASSERT(this, IsBoolean(debug_event));
- InternalPromiseReject(context, promise, value, debug_event);
- Return(UndefinedConstant());
+ Label if_runtime(this, Label::kDeferred);
+
+ // If promise hook is enabled or the debugger is active, let
+ // the runtime handle this operation, which greatly reduces
+ // the complexity here and also avoids a couple of back and
+ // forth between JavaScript and C++ land.
+ GotoIf(IsPromiseHookEnabledOrDebugIsActive(), &if_runtime);
+
+ // 7. If promise.[[PromiseIsHandled]] is false, perform
+ // HostPromiseRejectionTracker(promise, "reject").
+ // We don't try to handle rejecting {promise} without handler
+ // here, but we let the C++ code take care of this completely.
+ GotoIfNot(PromiseHasHandler(promise), &if_runtime);
+
+ // 2. Let reactions be promise.[[PromiseRejectReactions]].
+ Node* reactions =
+ LoadObjectField(promise, JSPromise::kReactionsOrResultOffset);
+
+ // 3. Set promise.[[PromiseResult]] to reason.
+ // 4. Set promise.[[PromiseFulfillReactions]] to undefined.
+ // 5. Set promise.[[PromiseRejectReactions]] to undefined.
+ StoreObjectField(promise, JSPromise::kReactionsOrResultOffset, reason);
+
+ // 6. Set promise.[[PromiseState]] to "rejected".
+ PromiseSetStatus(promise, Promise::kRejected);
+
+ // 7. Return TriggerPromiseReactions(reactions, reason).
+ Return(TriggerPromiseReactions(context, reactions, reason,
+ PromiseReaction::kReject));
+
+ BIND(&if_runtime);
+ TailCallRuntime(Runtime::kRejectPromise, context, promise, reason,
+ debug_event);
}
-TF_BUILTIN(PerformNativePromiseThen, PromiseBuiltinsAssembler) {
+// ES #sec-promise-resolve-functions
+TF_BUILTIN(ResolvePromise, PromiseBuiltinsAssembler) {
Node* const promise = Parameter(Descriptor::kPromise);
- Node* const resolve_reaction = Parameter(Descriptor::kResolveReaction);
- Node* const reject_reaction = Parameter(Descriptor::kRejectReaction);
- Node* const result_promise = Parameter(Descriptor::kResultPromise);
+ Node* const resolution = Parameter(Descriptor::kResolution);
Node* const context = Parameter(Descriptor::kContext);
- CSA_ASSERT(this, HasInstanceType(result_promise, JS_PROMISE_TYPE));
+ CSA_ASSERT(this, TaggedIsNotSmi(promise));
+ CSA_ASSERT(this, IsJSPromise(promise));
- InternalPerformPromiseThen(context, promise, resolve_reaction,
- reject_reaction, result_promise,
- UndefinedConstant(), UndefinedConstant());
- Return(result_promise);
+ Label do_enqueue(this), if_fulfill(this), if_reject(this, Label::kDeferred),
+ if_runtime(this, Label::kDeferred);
+ VARIABLE(var_reason, MachineRepresentation::kTagged);
+ VARIABLE(var_then, MachineRepresentation::kTagged);
+
+ // If promise hook is enabled or the debugger is active, let
+ // the runtime handle this operation, which greatly reduces
+ // the complexity here and also avoids a couple of back and
+ // forth between JavaScript and C++ land.
+ GotoIf(IsPromiseHookEnabledOrDebugIsActive(), &if_runtime);
+
+ // 6. If SameValue(resolution, promise) is true, then
+ // We can use pointer comparison here, since the {promise} is guaranteed
+ // to be a JSPromise inside this function and thus is reference comparable.
+ GotoIf(WordEqual(promise, resolution), &if_runtime);
+
+ // 7. If Type(resolution) is not Object, then
+ GotoIf(TaggedIsSmi(resolution), &if_fulfill);
+ Node* const result_map = LoadMap(resolution);
+ GotoIfNot(IsJSReceiverMap(result_map), &if_fulfill);
+
+ // We can skip the "then" lookup on {resolution} if its [[Prototype]]
+ // is the (initial) Promise.prototype and the Promise#then protector
+ // is intact, as that guards the lookup path for the "then" property
+ // on JSPromise instances which have the (initial) %PromisePrototype%.
+ Label if_fast(this), if_slow(this, Label::kDeferred);
+ Node* const native_context = LoadNativeContext(context);
+ BranchIfPromiseThenLookupChainIntact(native_context, result_map, &if_fast,
+ &if_slow);
+
+ // Resolution is a native promise and if it's already resolved or
+ // rejected, shortcircuit the resolution procedure by directly
+ // reusing the value from the promise.
+ BIND(&if_fast);
+ {
+ Node* const then =
+ LoadContextElement(native_context, Context::PROMISE_THEN_INDEX);
+ var_then.Bind(then);
+ Goto(&do_enqueue);
+ }
+
+ BIND(&if_slow);
+ {
+ // 8. Let then be Get(resolution, "then").
+ Node* const then =
+ GetProperty(context, resolution, isolate()->factory()->then_string());
+
+ // 9. If then is an abrupt completion, then
+ GotoIfException(then, &if_reject, &var_reason);
+
+ // 11. If IsCallable(thenAction) is false, then
+ GotoIf(TaggedIsSmi(then), &if_fulfill);
+ Node* const then_map = LoadMap(then);
+ GotoIfNot(IsCallableMap(then_map), &if_fulfill);
+ var_then.Bind(then);
+ Goto(&do_enqueue);
+ }
+
+ BIND(&do_enqueue);
+ {
+ // 12. Perform EnqueueJob("PromiseJobs", PromiseResolveThenableJob,
+ // «promise, resolution, thenAction»).
+ Node* const task = AllocatePromiseResolveThenableJobTask(
+ promise, var_then.value(), resolution, native_context);
+ TailCallBuiltin(Builtins::kEnqueueMicrotask, native_context, task);
+ }
+
+ BIND(&if_fulfill);
+ {
+ // 7.b Return FulfillPromise(promise, resolution).
+ TailCallBuiltin(Builtins::kFulfillPromise, context, promise, resolution);
+ }
+
+ BIND(&if_runtime);
+ Return(CallRuntime(Runtime::kResolvePromise, context, promise, resolution));
+
+ BIND(&if_reject);
+ {
+ // 9.a Return RejectPromise(promise, then.[[Value]]).
+ TailCallBuiltin(Builtins::kRejectPromise, context, promise,
+ var_reason.value(), FalseConstant());
+ }
}
Node* PromiseBuiltinsAssembler::PerformPromiseAll(
@@ -1802,9 +1731,6 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
Node* const resolve_context =
CreatePromiseContext(native_context, kPromiseAllResolveElementLength);
StoreContextElementNoWriteBarrier(
- resolve_context, kPromiseAllResolveElementAlreadyVisitedSlot,
- SmiConstant(0));
- StoreContextElementNoWriteBarrier(
resolve_context, kPromiseAllResolveElementIndexSlot, var_index.value());
StoreContextElementNoWriteBarrier(
resolve_context, kPromiseAllResolveElementRemainingElementsSlot,
@@ -1944,7 +1870,8 @@ TF_BUILTIN(PromiseAll, PromiseBuiltinsAssembler) {
// Don't fire debugEvent so that forwarding the rejection through all does not
// trigger redundant ExceptionEvents
Node* const debug_event = FalseConstant();
- Node* const capability = NewPromiseCapability(context, receiver, debug_event);
+ Node* const capability = CallBuiltin(Builtins::kNewPromiseCapability, context,
+ receiver, debug_event);
VARIABLE(var_exception, MachineRepresentation::kTagged, TheHoleConstant());
Label reject_promise(this, &var_exception, Label::kDeferred);
@@ -1987,19 +1914,16 @@ TF_BUILTIN(PromiseAllResolveElementClosure, PromiseBuiltinsAssembler) {
CSA_ASSERT(this, SmiEqual(LoadFixedArrayBaseLength(context),
SmiConstant(kPromiseAllResolveElementLength)));
- Label already_called(this), resolve_promise(this);
- GotoIf(SmiEqual(LoadContextElement(
- context, kPromiseAllResolveElementAlreadyVisitedSlot),
- SmiConstant(1)),
- &already_called);
- StoreContextElementNoWriteBarrier(
- context, kPromiseAllResolveElementAlreadyVisitedSlot, SmiConstant(1));
-
Node* const index =
LoadContextElement(context, kPromiseAllResolveElementIndexSlot);
Node* const values_array =
LoadContextElement(context, kPromiseAllResolveElementValuesArraySlot);
+ Label already_called(this, Label::kDeferred), resolve_promise(this);
+ GotoIf(SmiLessThan(index, SmiConstant(Smi::kZero)), &already_called);
+ StoreContextElementNoWriteBarrier(context, kPromiseAllResolveElementIndexSlot,
+ SmiConstant(-1));
+
// Set element in FixedArray
Label runtime_set_element(this), did_set_element(this);
GotoIfNot(TaggedIsPositiveSmi(index), &runtime_set_element);
@@ -2070,7 +1994,8 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) {
// Don't fire debugEvent so that forwarding the rejection through all does not
// trigger redundant ExceptionEvents
Node* const debug_event = FalseConstant();
- Node* const capability = NewPromiseCapability(context, receiver, debug_event);
+ Node* const capability = CallBuiltin(Builtins::kNewPromiseCapability, context,
+ receiver, debug_event);
Node* const resolve =
LoadObjectField(capability, PromiseCapability::kResolveOffset);
diff --git a/deps/v8/src/builtins/builtins-promise-gen.h b/deps/v8/src/builtins/builtins-promise-gen.h
index 366c7c22cd..2130101e84 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.h
+++ b/deps/v8/src/builtins/builtins-promise-gen.h
@@ -2,11 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_BUILTINS_BUILTINS_PROMISE_H_
-#define V8_BUILTINS_BUILTINS_PROMISE_H_
+#ifndef V8_BUILTINS_BUILTINS_PROMISE_GEN_H_
+#define V8_BUILTINS_BUILTINS_PROMISE_GEN_H_
#include "src/code-stub-assembler.h"
#include "src/contexts.h"
+#include "src/objects/promise.h"
namespace v8 {
namespace internal {
@@ -29,11 +30,8 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
protected:
enum PromiseAllResolveElementContextSlots {
- // Whether the resolve callback was already called.
- kPromiseAllResolveElementAlreadyVisitedSlot = Context::MIN_CONTEXT_SLOTS,
-
- // Index into the values array
- kPromiseAllResolveElementIndexSlot,
+ // Index into the values array, or -1 if the callback was already called
+ kPromiseAllResolveElementIndexSlot = Context::MIN_CONTEXT_SLOTS,
// Remaining elements count (mutable HeapNumber)
kPromiseAllResolveElementRemainingElementsSlot,
@@ -90,8 +88,16 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
Node* AllocateAndSetJSPromise(Node* context, v8::Promise::PromiseState status,
Node* result);
- Node* AllocatePromiseResolveThenableJobInfo(Node* result, Node* then,
- Node* resolve, Node* reject,
+ Node* AllocatePromiseReaction(Node* next, Node* payload,
+ Node* fulfill_handler, Node* reject_handler);
+
+ Node* AllocatePromiseReactionJobTask(Heap::RootListIndex map_root_index,
+ Node* context, Node* argument,
+ Node* handler, Node* payload);
+ Node* AllocatePromiseReactionJobTask(Node* map, Node* context, Node* argument,
+ Node* handler, Node* payload);
+ Node* AllocatePromiseResolveThenableJobTask(Node* promise_to_resolve,
+ Node* then, Node* thenable,
Node* context);
std::pair<Node*, Node*> CreatePromiseResolvingFunctions(
@@ -105,50 +111,44 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
Node* CreatePromiseGetCapabilitiesExecutorContext(Node* native_context,
Node* promise_capability);
- Node* NewPromiseCapability(Node* context, Node* constructor,
- Node* debug_event = nullptr);
-
protected:
void PromiseInit(Node* promise);
- Node* SpeciesConstructor(Node* context, Node* object,
- Node* default_constructor);
-
void PromiseSetHasHandler(Node* promise);
void PromiseSetHandledHint(Node* promise);
- void AppendPromiseCallback(int offset, compiler::Node* promise,
- compiler::Node* value);
+ void PerformPromiseThen(Node* context, Node* promise, Node* on_fulfilled,
+ Node* on_rejected,
+ Node* result_promise_or_capability);
- Node* InternalPromiseThen(Node* context, Node* promise, Node* on_resolve,
- Node* on_reject);
-
- Node* InternalPerformPromiseThen(Node* context, Node* promise,
- Node* on_resolve, Node* on_reject,
- Node* deferred_promise,
- Node* deferred_on_resolve,
- Node* deferred_on_reject);
+ Node* CreatePromiseContext(Node* native_context, int slots);
- void InternalResolvePromise(Node* context, Node* promise, Node* result);
+ Node* TriggerPromiseReactions(Node* context, Node* promise, Node* result,
+ PromiseReaction::Type type);
- void BranchIfFastPath(Node* context, Node* promise, Label* if_isunmodified,
- Label* if_ismodified);
+ // We can shortcut the SpeciesConstructor on {promise_map} if it's
+ // [[Prototype]] is the (initial) Promise.prototype and the @@species
+ // protector is intact, as that guards the lookup path for the "constructor"
+ // property on JSPromise instances which have the %PromisePrototype%.
+ void BranchIfPromiseSpeciesLookupChainIntact(Node* native_context,
+ Node* promise_map,
+ Label* if_fast, Label* if_slow);
- void BranchIfFastPath(Node* native_context, Node* promise_fun, Node* promise,
- Label* if_isunmodified, Label* if_ismodified);
+ // We can skip the "then" lookup on {receiver_map} if it's [[Prototype]]
+ // is the (initial) Promise.prototype and the Promise#then() protector
+ // is intact, as that guards the lookup path for the "then" property
+ // on JSPromise instances which have the (initial) %PromisePrototype%.
+ void BranchIfPromiseThenLookupChainIntact(Node* native_context,
+ Node* receiver_map, Label* if_fast,
+ Label* if_slow);
- Node* CreatePromiseContext(Node* native_context, int slots);
- void PromiseFulfill(Node* context, Node* promise, Node* result,
- v8::Promise::PromiseState status);
+ template <typename... TArgs>
+ Node* InvokeThen(Node* native_context, Node* receiver, TArgs... args);
void BranchIfAccessCheckFailed(Node* context, Node* native_context,
Node* promise_constructor, Node* executor,
Label* if_noaccess);
- void InternalPromiseReject(Node* context, Node* promise, Node* value,
- bool debug_event);
- void InternalPromiseReject(Node* context, Node* promise, Node* value,
- Node* debug_event);
std::pair<Node*, Node*> CreatePromiseFinallyFunctions(Node* on_finally,
Node* constructor,
Node* native_context);
@@ -174,9 +174,10 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
const NodeGenerator& handled_by);
Node* PromiseStatus(Node* promise);
- void PerformFulfillClosure(Node* context, Node* value, bool should_resolve);
- private:
+ void PromiseReactionJob(Node* context, Node* argument, Node* handler,
+ Node* payload, PromiseReaction::Type type);
+
Node* IsPromiseStatus(Node* actual, v8::Promise::PromiseState expected);
void PromiseSetStatus(Node* promise, v8::Promise::PromiseState status);
@@ -186,4 +187,4 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
} // namespace internal
} // namespace v8
-#endif // V8_BUILTINS_BUILTINS_PROMISE_H_
+#endif // V8_BUILTINS_BUILTINS_PROMISE_GEN_H_
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc
index 64e838d53a..fb35f48a15 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.cc
+++ b/deps/v8/src/builtins/builtins-proxy-gen.cc
@@ -73,22 +73,57 @@ Node* ProxiesCodeStubAssembler::AllocateProxy(Node* target, Node* handler,
Node* ProxiesCodeStubAssembler::AllocateJSArrayForCodeStubArguments(
Node* context, CodeStubArguments& args, Node* argc, ParameterMode mode) {
+ Comment("AllocateJSArrayForCodeStubArguments");
+
+ Label if_empty_array(this), allocate_js_array(this);
+ // Do not use AllocateJSArray since {elements} might end up in LOS.
+ VARIABLE(elements, MachineRepresentation::kTagged);
+
+ TNode<Smi> length = ParameterToTagged(argc, mode);
+ GotoIf(SmiEqual(length, SmiConstant(0)), &if_empty_array);
+ {
+ Label if_large_object(this, Label::kDeferred);
+ Node* allocated_elements = AllocateFixedArray(PACKED_ELEMENTS, argc, mode,
+ kAllowLargeObjectAllocation);
+ elements.Bind(allocated_elements);
+
+ VARIABLE(index, MachineType::PointerRepresentation(),
+ IntPtrConstant(FixedArrayBase::kHeaderSize - kHeapObjectTag));
+ VariableList list({&index}, zone());
+
+ GotoIf(SmiGreaterThan(length, SmiConstant(FixedArray::kMaxRegularLength)),
+ &if_large_object);
+ args.ForEach(list, [=, &index](Node* arg) {
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, allocated_elements,
+ index.value(), arg);
+ Increment(&index, kPointerSize);
+ });
+ Goto(&allocate_js_array);
+
+ BIND(&if_large_object);
+ {
+ args.ForEach(list, [=, &index](Node* arg) {
+ Store(allocated_elements, index.value(), arg);
+ Increment(&index, kPointerSize);
+ });
+ Goto(&allocate_js_array);
+ }
+ }
+
+ BIND(&if_empty_array);
+ {
+ elements.Bind(EmptyFixedArrayConstant());
+ Goto(&allocate_js_array);
+ }
+
+ BIND(&allocate_js_array);
+ // Allocate the result JSArray.
Node* native_context = LoadNativeContext(context);
Node* array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
- Node* argc_smi = ParameterToTagged(argc, mode);
-
- Node* array = AllocateJSArray(PACKED_ELEMENTS, array_map, argc, argc_smi,
- nullptr, mode);
- Node* elements = LoadElements(array);
-
- VARIABLE(index, MachineType::PointerRepresentation(),
- IntPtrConstant(FixedArrayBase::kHeaderSize - kHeapObjectTag));
- VariableList list({&index}, zone());
- args.ForEach(list, [=, &index](Node* arg) {
- StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, index.value(),
- arg);
- Increment(&index, kPointerSize);
- });
+ Node* array = AllocateUninitializedJSArrayWithoutElements(array_map, length);
+ StoreObjectFieldNoWriteBarrier(array, JSObject::kElementsOffset,
+ elements.value());
+
return array;
}
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index 4227c628d1..45329eed70 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -7,6 +7,7 @@
#include "src/builtins/builtins-constructor-gen.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
+#include "src/builtins/growable-fixed-array-gen.h"
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
#include "src/counters.h"
@@ -135,10 +136,9 @@ void RegExpBuiltinsAssembler::StoreLastIndex(Node* context, Node* regexp,
Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
Node* const context, Node* const regexp, Node* const match_info,
- Node* const string) {
+ TNode<String> const string) {
CSA_ASSERT(this, IsFixedArrayMap(LoadMap(match_info)));
CSA_ASSERT(this, IsJSRegExp(regexp));
- CSA_ASSERT(this, IsString(string));
Label named_captures(this), out(this);
@@ -152,7 +152,8 @@ Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
// Calculate the substring of the first match before creating the result array
// to avoid an unnecessary write barrier storing the first result.
- Node* const first = SubString(context, string, start, end);
+
+ TNode<String> const first = SubString(string, SmiUntag(start), SmiUntag(end));
Node* const result =
AllocateRegExpResult(context, num_results, start, string);
@@ -188,7 +189,8 @@ Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
Node* const from_cursor_plus1 = IntPtrAdd(from_cursor, IntPtrConstant(1));
Node* const end = LoadFixedArrayElement(match_info, from_cursor_plus1);
- Node* const capture = SubString(context, string, start, end);
+ TNode<String> const capture =
+ SubString(string, SmiUntag(start), SmiUntag(end));
StoreFixedArrayElement(result_elements, to_cursor, capture);
Goto(&next_iter);
@@ -441,18 +443,11 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
// contains the uninitialized sentinel as a smi.
Node* const code = var_code.value();
-#ifdef DEBUG
- {
- Label next(this);
- GotoIfNot(TaggedIsSmi(code), &next);
-
- CSA_ASSERT(this,
- SmiEqual(code, SmiConstant(JSRegExp::kUninitializedValue)));
- Goto(&next);
-
- BIND(&next);
- }
-#endif
+ CSA_ASSERT_BRANCH(this, [=](Label* ok, Label* not_ok) {
+ GotoIfNot(TaggedIsSmi(code), ok);
+ Branch(SmiEqual(code, SmiConstant(JSRegExp::kUninitializedValue)), ok,
+ not_ok);
+ });
GotoIf(TaggedIsSmi(code), &runtime);
CSA_ASSERT(this, HasInstanceType(code, CODE_TYPE));
@@ -475,7 +470,7 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
// Argument 1: Previous index.
MachineType arg1_type = type_int32;
- Node* const arg1 = TruncateWordToWord32(int_last_index);
+ Node* const arg1 = TruncateIntPtrToInt32(int_last_index);
// Argument 2: Start of string data.
MachineType arg2_type = type_ptr;
@@ -582,7 +577,7 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
[=, &var_to_offset](Node* offset) {
Node* const value = Load(MachineType::Int32(),
static_offsets_vector_address, offset);
- Node* const smi_value = SmiFromWord32(value);
+ Node* const smi_value = SmiFromInt32(value);
StoreNoWriteBarrier(MachineRepresentation::kTagged, match_info,
var_to_offset.value(), smi_value);
Increment(&var_to_offset, kPointerSize);
@@ -766,10 +761,9 @@ Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult(
// ES#sec-regexp.prototype.exec
// RegExp.prototype.exec ( string )
-Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBody(Node* const context,
- Node* const regexp,
- Node* const string,
- const bool is_fastpath) {
+Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBody(
+ Node* const context, Node* const regexp, TNode<String> const string,
+ const bool is_fastpath) {
VARIABLE(var_result, MachineRepresentation::kTagged);
Label if_didnotmatch(this), out(this);
@@ -944,7 +938,7 @@ void RegExpBuiltinsAssembler::BranchIfFastRegExpResult(Node* const context,
// Slow path stub for RegExpPrototypeExec to decrease code size.
TF_BUILTIN(RegExpPrototypeExecSlow, RegExpBuiltinsAssembler) {
Node* const regexp = Parameter(Descriptor::kReceiver);
- Node* const string = Parameter(Descriptor::kString);
+ TNode<String> const string = CAST(Parameter(Descriptor::kString));
Node* const context = Parameter(Descriptor::kContext);
Return(RegExpPrototypeExecBody(context, regexp, string, false));
@@ -1030,7 +1024,7 @@ TF_BUILTIN(RegExpPrototypeExec, RegExpBuiltinsAssembler) {
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
- Node* const string = ToString_Inline(context, maybe_string);
+ TNode<String> const string = ToString_Inline(context, maybe_string);
Label if_isfastpath(this), if_isslowpath(this);
Branch(IsFastRegExpNoPrototype(context, receiver), &if_isfastpath,
@@ -1069,13 +1063,13 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
Node* const flags_smi = LoadObjectField(regexp, JSRegExp::kFlagsOffset);
var_flags = SmiUntag(flags_smi);
-#define CASE_FOR_FLAG(FLAG) \
- do { \
- Label next(this); \
- GotoIfNot(IsSetWord(var_flags, FLAG), &next); \
- var_length = SmiAdd(var_length, SmiConstant(1)); \
- Goto(&next); \
- BIND(&next); \
+#define CASE_FOR_FLAG(FLAG) \
+ do { \
+ Label next(this); \
+ GotoIfNot(IsSetWord(var_flags.value(), FLAG), &next); \
+ var_length = SmiAdd(var_length.value(), SmiConstant(1)); \
+ Goto(&next); \
+ BIND(&next); \
} while (false)
CASE_FOR_FLAG(JSRegExp::kGlobal);
@@ -1099,8 +1093,8 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
Label if_isflagset(this); \
BranchIfToBooleanIsTrue(flag, &if_isflagset, &next); \
BIND(&if_isflagset); \
- var_length = SmiAdd(var_length, SmiConstant(1)); \
- var_flags = Signed(WordOr(var_flags, IntPtrConstant(FLAG))); \
+ var_length = SmiAdd(var_length.value(), SmiConstant(1)); \
+ var_flags = Signed(WordOr(var_flags.value(), IntPtrConstant(FLAG))); \
Goto(&next); \
BIND(&next); \
} while (false)
@@ -1118,7 +1112,7 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
// char for each set flag.
{
- Node* const result = AllocateSeqOneByteString(context, var_length);
+ Node* const result = AllocateSeqOneByteString(context, var_length.value());
VARIABLE(var_offset, MachineType::PointerRepresentation(),
IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag));
@@ -1126,7 +1120,7 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
#define CASE_FOR_FLAG(FLAG, CHAR) \
do { \
Label next(this); \
- GotoIfNot(IsSetWord(var_flags, FLAG), &next); \
+ GotoIfNot(IsSetWord(var_flags.value(), FLAG), &next); \
Node* const value = Int32Constant(CHAR); \
StoreNoWriteBarrier(MachineRepresentation::kWord8, result, \
var_offset.value(), value); \
@@ -1384,8 +1378,7 @@ TF_BUILTIN(RegExpPrototypeCompile, RegExpBuiltinsAssembler) {
Label next(this);
GotoIf(IsUndefined(maybe_flags), &next);
- Node* const message_id = SmiConstant(MessageTemplate::kRegExpFlags);
- TailCallRuntime(Runtime::kThrowTypeError, context, message_id);
+ ThrowTypeError(context, MessageTemplate::kRegExpFlags);
BIND(&next);
}
@@ -1450,12 +1443,8 @@ TF_BUILTIN(RegExpPrototypeSourceGetter, RegExpBuiltinsAssembler) {
BIND(&if_isnotprototype);
{
- Node* const message_id = SmiConstant(MessageTemplate::kRegExpNonRegExp);
- Node* const method_name_str =
- HeapConstant(isolate->factory()->NewStringFromAsciiChecked(
- "RegExp.prototype.source"));
- TailCallRuntime(Runtime::kThrowTypeError, context, message_id,
- method_name_str);
+ ThrowTypeError(context, MessageTemplate::kRegExpNonRegExp,
+ "RegExp.prototype.source");
}
}
}
@@ -1465,7 +1454,7 @@ Node* RegExpBuiltinsAssembler::FastFlagGetter(Node* const regexp,
JSRegExp::Flag flag) {
Node* const flags = LoadObjectField(regexp, JSRegExp::kFlagsOffset);
Node* const mask = SmiConstant(flag);
- return SmiToWord32(SmiAnd(flags, mask));
+ return SmiToInt32(SmiAnd(flags, mask));
}
// Load through the GetProperty stub.
@@ -1533,8 +1522,6 @@ Node* RegExpBuiltinsAssembler::FlagGetter(Node* const context,
void RegExpBuiltinsAssembler::FlagGetter(Node* context, Node* receiver,
JSRegExp::Flag flag, int counter,
const char* method_name) {
- Isolate* isolate = this->isolate();
-
// Check whether we have an unmodified regexp instance.
Label if_isunmodifiedjsregexp(this),
if_isnotunmodifiedjsregexp(this, Label::kDeferred);
@@ -1573,14 +1560,7 @@ void RegExpBuiltinsAssembler::FlagGetter(Node* context, Node* receiver,
}
BIND(&if_isnotprototype);
- {
- Node* const message_id = SmiConstant(MessageTemplate::kRegExpNonRegExp);
- Node* const method_name_str = HeapConstant(
- isolate->factory()->NewStringFromAsciiChecked(method_name));
- CallRuntime(Runtime::kThrowTypeError, context, message_id,
- method_name_str);
- Unreachable();
- }
+ { ThrowTypeError(context, MessageTemplate::kRegExpNonRegExp, method_name); }
}
}
@@ -1707,7 +1687,7 @@ TF_BUILTIN(RegExpPrototypeTest, RegExpBuiltinsAssembler) {
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
- Node* const string = ToString_Inline(context, maybe_string);
+ TNode<String> const string = ToString_Inline(context, maybe_string);
Label fast_path(this), slow_path(this);
BranchIfFastRegExp(context, receiver, &fast_path, &slow_path);
@@ -1795,163 +1775,14 @@ Node* RegExpBuiltinsAssembler::AdvanceStringIndex(Node* const string,
return var_result.value();
}
-namespace {
-
-// Utility class implementing a growable fixed array through CSA.
-class GrowableFixedArray {
- typedef CodeStubAssembler::Label Label;
- typedef CodeStubAssembler::Variable Variable;
-
- public:
- explicit GrowableFixedArray(CodeStubAssembler* a)
- : assembler_(a),
- var_array_(a, MachineRepresentation::kTagged),
- var_length_(a, MachineType::PointerRepresentation()),
- var_capacity_(a, MachineType::PointerRepresentation()) {
- Initialize();
- }
-
- Node* length() const { return var_length_.value(); }
-
- Variable* var_array() { return &var_array_; }
- Variable* var_length() { return &var_length_; }
- Variable* var_capacity() { return &var_capacity_; }
-
- void Push(Node* const value) {
- CodeStubAssembler* a = assembler_;
-
- Node* const length = var_length_.value();
- Node* const capacity = var_capacity_.value();
-
- Label grow(a), store(a);
- a->Branch(a->IntPtrEqual(capacity, length), &grow, &store);
-
- a->BIND(&grow);
- {
- Node* const new_capacity = NewCapacity(a, capacity);
- Node* const new_array = ResizeFixedArray(length, new_capacity);
-
- var_capacity_.Bind(new_capacity);
- var_array_.Bind(new_array);
- a->Goto(&store);
- }
-
- a->BIND(&store);
- {
- Node* const array = var_array_.value();
- a->StoreFixedArrayElement(array, length, value);
-
- Node* const new_length = a->IntPtrAdd(length, a->IntPtrConstant(1));
- var_length_.Bind(new_length);
- }
- }
-
- Node* ToJSArray(Node* const context) {
- CodeStubAssembler* a = assembler_;
-
- const ElementsKind kind = PACKED_ELEMENTS;
-
- Node* const native_context = a->LoadNativeContext(context);
- Node* const array_map = a->LoadJSArrayElementsMap(kind, native_context);
-
- // Shrink to fit if necessary.
- {
- Label next(a);
-
- Node* const length = var_length_.value();
- Node* const capacity = var_capacity_.value();
-
- a->GotoIf(a->WordEqual(length, capacity), &next);
-
- Node* const array = ResizeFixedArray(length, length);
- var_array_.Bind(array);
- var_capacity_.Bind(length);
- a->Goto(&next);
-
- a->BIND(&next);
- }
-
- Node* const result_length = a->SmiTag(length());
- Node* const result = a->AllocateUninitializedJSArrayWithoutElements(
- array_map, result_length, nullptr);
-
- // Note: We do not currently shrink the fixed array.
-
- a->StoreObjectField(result, JSObject::kElementsOffset, var_array_.value());
-
- return result;
- }
-
- private:
- void Initialize() {
- CodeStubAssembler* a = assembler_;
-
- const ElementsKind kind = PACKED_ELEMENTS;
-
- static const int kInitialArraySize = 8;
- Node* const capacity = a->IntPtrConstant(kInitialArraySize);
- Node* const array = a->AllocateFixedArray(kind, capacity);
-
- a->FillFixedArrayWithValue(kind, array, a->IntPtrConstant(0), capacity,
- Heap::kTheHoleValueRootIndex);
-
- var_array_.Bind(array);
- var_capacity_.Bind(capacity);
- var_length_.Bind(a->IntPtrConstant(0));
- }
-
- Node* NewCapacity(CodeStubAssembler* a,
- compiler::SloppyTNode<IntPtrT> current_capacity) {
- CSA_ASSERT(a, a->IntPtrGreaterThan(current_capacity, a->IntPtrConstant(0)));
-
- // Growth rate is analog to JSObject::NewElementsCapacity:
- // new_capacity = (current_capacity + (current_capacity >> 1)) + 16.
-
- Node* const new_capacity = a->IntPtrAdd(
- a->IntPtrAdd(current_capacity, a->WordShr(current_capacity, 1)),
- a->IntPtrConstant(16));
-
- return new_capacity;
- }
-
- // Creates a new array with {new_capacity} and copies the first
- // {element_count} elements from the current array.
- Node* ResizeFixedArray(Node* const element_count, Node* const new_capacity) {
- CodeStubAssembler* a = assembler_;
-
- CSA_ASSERT(a, a->IntPtrGreaterThan(element_count, a->IntPtrConstant(0)));
- CSA_ASSERT(a, a->IntPtrGreaterThan(new_capacity, a->IntPtrConstant(0)));
- CSA_ASSERT(a, a->IntPtrGreaterThanOrEqual(new_capacity, element_count));
-
- Node* const from_array = var_array_.value();
-
- CodeStubAssembler::ExtractFixedArrayFlags flags;
- flags |= CodeStubAssembler::ExtractFixedArrayFlag::kFixedArrays;
- Node* to_array = a->ExtractFixedArray(from_array, nullptr, element_count,
- new_capacity, flags);
-
- return to_array;
- }
-
- private:
- CodeStubAssembler* const assembler_;
- Variable var_array_;
- Variable var_length_;
- Variable var_capacity_;
-};
-
-} // namespace
-
void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
Node* const regexp,
- Node* const string,
+ TNode<String> string,
const bool is_fastpath) {
- CSA_ASSERT(this, IsString(string));
if (is_fastpath) CSA_ASSERT(this, IsFastRegExp(context, regexp));
Node* const int_zero = IntPtrConstant(0);
Node* const smi_zero = SmiConstant(0);
-
Node* const is_global =
FlagGetter(context, regexp, JSRegExp::kGlobal, is_fastpath);
@@ -1975,7 +1806,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
// Allocate an array to store the resulting match strings.
- GrowableFixedArray array(this);
+ GrowableFixedArray array(state());
// Loop preparations. Within the loop, collect results from RegExpExec
// and store match strings in the array.
@@ -2001,9 +1832,8 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
Node* const match_to = LoadFixedArrayElement(
match_indices, RegExpMatchInfo::kFirstCaptureIndex + 1);
- Node* match = SubString(context, string, match_from, match_to);
- var_match.Bind(match);
-
+ var_match.Bind(
+ SubString(string, SmiUntag(match_from), SmiUntag(match_to)));
Goto(&if_didmatch);
} else {
DCHECK(!is_fastpath);
@@ -2052,7 +1882,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
// Store the match, growing the fixed array if needed.
- array.Push(match);
+ array.Push(CAST(match));
// Advance last index if the match is the empty string.
@@ -2087,7 +1917,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
{
// Wrap the match in a JSArray.
- Node* const result = array.ToJSArray(context);
+ Node* const result = array.ToJSArray(CAST(context));
Return(result);
}
}
@@ -2107,7 +1937,7 @@ TF_BUILTIN(RegExpPrototypeMatch, RegExpBuiltinsAssembler) {
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
- Node* const string = ToString_Inline(context, maybe_string);
+ TNode<String> const string = ToString_Inline(context, maybe_string);
Label fast_path(this), slow_path(this);
BranchIfFastRegExp(context, receiver, &fast_path, &slow_path);
@@ -2126,7 +1956,7 @@ TF_BUILTIN(RegExpPrototypeMatch, RegExpBuiltinsAssembler) {
// 2) pattern is a string
TF_BUILTIN(RegExpMatchFast, RegExpBuiltinsAssembler) {
Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* const string = Parameter(Descriptor::kPattern);
+ TNode<String> const string = CAST(Parameter(Descriptor::kPattern));
Node* const context = Parameter(Descriptor::kContext);
RegExpPrototypeMatchBody(context, receiver, string, true);
@@ -2248,7 +2078,7 @@ TF_BUILTIN(RegExpPrototypeSearch, RegExpBuiltinsAssembler) {
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
- Node* const string = ToString_Inline(context, maybe_string);
+ TNode<String> const string = ToString_Inline(context, maybe_string);
Label fast_path(this), slow_path(this);
BranchIfFastRegExp(context, receiver, &fast_path, &slow_path);
@@ -2277,12 +2107,11 @@ TF_BUILTIN(RegExpSearchFast, RegExpBuiltinsAssembler) {
// JSRegExp, {string} is a String, and {limit} is a Smi.
void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
Node* const regexp,
- Node* const string,
+ TNode<String> string,
Node* const limit) {
CSA_ASSERT(this, IsFastRegExp(context, regexp));
CSA_ASSERT(this, Word32BinaryNot(FastFlagGetter(regexp, JSRegExp::kSticky)));
CSA_ASSERT(this, TaggedIsSmi(limit));
- CSA_ASSERT(this, IsString(string));
TNode<Smi> const smi_zero = SmiConstant(0);
TNode<IntPtrT> const int_zero = IntPtrConstant(0);
@@ -2343,7 +2172,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
// Loop preparations.
- GrowableFixedArray array(this);
+ GrowableFixedArray array(state());
VARIABLE(var_last_matched_until, MachineRepresentation::kTagged);
VARIABLE(var_next_search_from, MachineRepresentation::kTagged);
@@ -2422,10 +2251,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
{
Node* const from = last_matched_until;
Node* const to = match_from;
-
- Node* const substr = SubString(context, string, from, to);
- array.Push(substr);
-
+ array.Push(SubString(string, SmiUntag(from), SmiUntag(to)));
GotoIf(WordEqual(array.length(), int_limit), &out);
}
@@ -2462,21 +2288,19 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
BIND(&select_capture);
{
- Node* const substr = SubString(context, string, from, to);
- var_value.Bind(substr);
+ var_value.Bind(SubString(string, SmiUntag(from), SmiUntag(to)));
Goto(&store_value);
}
BIND(&select_undefined);
{
- Node* const undefined = UndefinedConstant();
- var_value.Bind(undefined);
+ var_value.Bind(UndefinedConstant());
Goto(&store_value);
}
BIND(&store_value);
{
- array.Push(var_value.value());
+ array.Push(CAST(var_value.value()));
GotoIf(WordEqual(array.length(), int_limit), &out);
Node* const new_reg = IntPtrAdd(reg, IntPtrConstant(2));
@@ -2499,16 +2323,13 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
{
Node* const from = var_last_matched_until.value();
Node* const to = string_length;
-
- Node* const substr = SubString(context, string, from, to);
- array.Push(substr);
-
+ array.Push(SubString(string, SmiUntag(from), SmiUntag(to)));
Goto(&out);
}
BIND(&out);
{
- Node* const result = array.ToJSArray(context);
+ Node* const result = array.ToJSArray(CAST(context));
Return(result);
}
@@ -2525,12 +2346,11 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
// Helper that skips a few initial checks.
TF_BUILTIN(RegExpSplit, RegExpBuiltinsAssembler) {
Node* const regexp = Parameter(Descriptor::kRegExp);
- Node* const string = Parameter(Descriptor::kString);
+ TNode<String> const string = CAST(Parameter(Descriptor::kString));
Node* const maybe_limit = Parameter(Descriptor::kLimit);
Node* const context = Parameter(Descriptor::kContext);
CSA_ASSERT(this, IsFastRegExp(context, regexp));
- CSA_ASSERT(this, IsString(string));
// TODO(jgruber): Even if map checks send us to the fast path, we still need
// to verify the constructor property and jump to the slow path if it has
@@ -2600,7 +2420,7 @@ TF_BUILTIN(RegExpPrototypeSplit, RegExpBuiltinsAssembler) {
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
- Node* const string = ToString_Inline(context, maybe_string);
+ TNode<String> const string = ToString_Inline(context, maybe_string);
Label stub(this), runtime(this, Label::kDeferred);
BranchIfFastRegExp(context, receiver, &stub, &runtime);
@@ -2700,9 +2520,9 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
Goto(&loop);
BIND(&loop);
{
- GotoIfNot(IntPtrLessThan(var_i, end), &create_result);
+ GotoIfNot(IntPtrLessThan(var_i.value(), end), &create_result);
- Node* const elem = LoadFixedArrayElement(res_elems, var_i);
+ Node* const elem = LoadFixedArrayElement(res_elems, var_i.value());
Label if_issmi(this), if_isstring(this), loop_epilogue(this);
Branch(TaggedIsSmi(elem), &if_issmi, &if_isstring);
@@ -2726,9 +2546,10 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
BIND(&if_isnegativeorzero);
{
- var_i = IntPtrAdd(var_i, int_one);
+ var_i = IntPtrAdd(var_i.value(), int_one);
- Node* const next_elem = LoadFixedArrayElement(res_elems, var_i);
+ Node* const next_elem =
+ LoadFixedArrayElement(res_elems, var_i.value());
var_match_start = SmiSub(next_elem, elem);
Goto(&loop_epilogue);
@@ -2740,13 +2561,14 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
CSA_ASSERT(this, IsString(elem));
Callable call_callable = CodeFactory::Call(isolate);
- TNode<Smi> match_start = var_match_start;
+ TNode<Smi> match_start = var_match_start.value();
Node* const replacement_obj =
CallJS(call_callable, context, replace_callable, undefined, elem,
match_start, string);
- Node* const replacement_str = ToString_Inline(context, replacement_obj);
- StoreFixedArrayElement(res_elems, var_i, replacement_str);
+ TNode<String> const replacement_str =
+ ToString_Inline(context, replacement_obj);
+ StoreFixedArrayElement(res_elems, var_i.value(), replacement_str);
TNode<Smi> const elem_length = LoadStringLengthAsSmi(elem);
var_match_start = SmiAdd(match_start, elem_length);
@@ -2756,7 +2578,7 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
BIND(&loop_epilogue);
{
- var_i = IntPtrAdd(var_i, int_one);
+ var_i = IntPtrAdd(var_i.value(), int_one);
Goto(&loop);
}
}
@@ -2795,7 +2617,7 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
// Overwrite the i'th element in the results with the string
// we got back from the callback function.
- Node* const replacement_str =
+ TNode<String> const replacement_str =
ToString_Inline(context, replacement_obj);
StoreFixedArrayElement(res_elems, index, replacement_str);
@@ -2821,20 +2643,19 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
}
Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath(
- Node* context, Node* regexp, Node* string, Node* replace_string) {
+ Node* context, Node* regexp, TNode<String> string,
+ TNode<String> replace_string) {
// The fast path is reached only if {receiver} is an unmodified
// JSRegExp instance, {replace_value} is non-callable, and
// ToString({replace_value}) does not contain '$', i.e. we're doing a simple
// string replacement.
+ CSA_ASSERT(this, IsFastRegExp(context, regexp));
+
Node* const smi_zero = SmiConstant(0);
const bool kIsFastPath = true;
- CSA_ASSERT(this, IsFastRegExp(context, regexp));
- CSA_ASSERT(this, IsString(replace_string));
- CSA_ASSERT(this, IsString(string));
-
- VARIABLE(var_result, MachineRepresentation::kTagged, EmptyStringConstant());
+ TVARIABLE(String, var_result, EmptyStringConstant());
VARIABLE(var_match_indices, MachineRepresentation::kTagged);
VARIABLE(var_last_match_end, MachineRepresentation::kTagged, smi_zero);
VARIABLE(var_is_unicode, MachineRepresentation::kWord32, Int32Constant(0));
@@ -2871,22 +2692,21 @@ Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath(
{
// TODO(jgruber): We could skip many of the checks that using SubString
// here entails.
- Node* const first_part =
- SubString(context, string, var_last_match_end.value(), match_start);
-
- Node* const result = StringAdd(context, var_result.value(), first_part);
- var_result.Bind(result);
+ TNode<String> const first_part =
+ SubString(string, SmiUntag(var_last_match_end.value()),
+ SmiUntag(match_start));
+ var_result = StringAdd(context, var_result.value(), first_part);
Goto(&loop_end);
}
BIND(&if_replaceisnotempty);
{
- Node* const first_part =
- SubString(context, string, var_last_match_end.value(), match_start);
-
- Node* result = StringAdd(context, var_result.value(), first_part);
- result = StringAdd(context, result, replace_string);
- var_result.Bind(result);
+ TNode<String> const first_part =
+ SubString(string, SmiUntag(var_last_match_end.value()),
+ SmiUntag(match_start));
+ TNode<String> result =
+ StringAdd(context, var_result.value(), first_part);
+ var_result = StringAdd(context, result, replace_string);
Goto(&loop_end);
}
@@ -2910,10 +2730,9 @@ Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath(
BIND(&if_nofurthermatches);
{
TNode<Smi> const string_length = LoadStringLengthAsSmi(string);
- Node* const last_part =
- SubString(context, string, var_last_match_end.value(), string_length);
- Node* const result = StringAdd(context, var_result.value(), last_part);
- var_result.Bind(result);
+ TNode<String> const last_part = SubString(
+ string, SmiUntag(var_last_match_end.value()), SmiUntag(string_length));
+ var_result = StringAdd(context, var_result.value(), last_part);
Goto(&out);
}
@@ -2924,12 +2743,11 @@ Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath(
// Helper that skips a few initial checks.
TF_BUILTIN(RegExpReplace, RegExpBuiltinsAssembler) {
Node* const regexp = Parameter(Descriptor::kRegExp);
- Node* const string = Parameter(Descriptor::kString);
+ TNode<String> const string = CAST(Parameter(Descriptor::kString));
Node* const replace_value = Parameter(Descriptor::kReplaceValue);
Node* const context = Parameter(Descriptor::kContext);
CSA_ASSERT(this, IsFastRegExp(context, regexp));
- CSA_ASSERT(this, IsString(string));
Label checkreplacestring(this), if_iscallable(this),
runtime(this, Label::kDeferred);
@@ -2942,7 +2760,8 @@ TF_BUILTIN(RegExpReplace, RegExpBuiltinsAssembler) {
// 3. Does ToString({replace_value}) contain '$'?
BIND(&checkreplacestring);
{
- Node* const replace_string = ToString_Inline(context, replace_value);
+ TNode<String> const replace_string =
+ ToString_Inline(context, replace_value);
// ToString(replaceValue) could potentially change the shape of the RegExp
// object. Recheck that we are still on the fast path and bail to runtime
@@ -3028,7 +2847,7 @@ TF_BUILTIN(RegExpPrototypeReplace, RegExpBuiltinsAssembler) {
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
- Node* const string = ToString_Inline(context, maybe_string);
+ TNode<String> const string = ToString_Inline(context, maybe_string);
// Fast-path checks: 1. Is the {receiver} an unmodified JSRegExp instance?
Label stub(this), runtime(this, Label::kDeferred);
@@ -3046,27 +2865,19 @@ TF_BUILTIN(RegExpPrototypeReplace, RegExpBuiltinsAssembler) {
// Simple string matching functionality for internal use which does not modify
// the last match info.
TF_BUILTIN(RegExpInternalMatch, RegExpBuiltinsAssembler) {
- Node* const regexp = Parameter(Descriptor::kRegExp);
- Node* const string = Parameter(Descriptor::kString);
+ TNode<JSRegExp> const regexp = CAST(Parameter(Descriptor::kRegExp));
+ TNode<String> const string = CAST(Parameter(Descriptor::kString));
Node* const context = Parameter(Descriptor::kContext);
Node* const smi_zero = SmiConstant(0);
-
- CSA_ASSERT(this, IsJSRegExp(regexp));
- CSA_ASSERT(this, IsString(string));
-
Node* const native_context = LoadNativeContext(context);
Node* const internal_match_info = LoadContextElement(
native_context, Context::REGEXP_INTERNAL_MATCH_INFO_INDEX);
-
Node* const match_indices = RegExpExecInternal(context, regexp, string,
smi_zero, internal_match_info);
-
Node* const null = NullConstant();
- Label if_matched(this), if_didnotmatch(this);
- Branch(WordEqual(match_indices, null), &if_didnotmatch, &if_matched);
-
- BIND(&if_didnotmatch);
+ Label if_matched(this);
+ GotoIfNot(WordEqual(match_indices, null), &if_matched);
Return(null);
BIND(&if_matched);
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.h b/deps/v8/src/builtins/builtins-regexp-gen.h
index c8a94b7293..b57b90acf9 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.h
+++ b/deps/v8/src/builtins/builtins-regexp-gen.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_BUILTINS_BUILTINS_REGEXP_H_
-#define V8_BUILTINS_BUILTINS_REGEXP_H_
+#ifndef V8_BUILTINS_BUILTINS_REGEXP_GEN_H_
+#define V8_BUILTINS_BUILTINS_REGEXP_GEN_H_
#include "src/code-stub-assembler.h"
@@ -50,7 +50,7 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
Node* ConstructNewResultFromMatchInfo(Node* const context, Node* const regexp,
Node* const match_info,
- Node* const string);
+ TNode<String> const string);
Node* RegExpPrototypeExecBodyWithoutResult(Node* const context,
Node* const regexp,
@@ -58,7 +58,7 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
Label* if_didnotmatch,
const bool is_fastpath);
Node* RegExpPrototypeExecBody(Node* const context, Node* const regexp,
- Node* const string, const bool is_fastpath);
+ TNode<String> string, const bool is_fastpath);
Node* ThrowIfNotJSReceiver(Node* context, Node* maybe_receiver,
MessageTemplate::Template msg_template,
@@ -100,7 +100,8 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
Node* const is_unicode, bool is_fastpath);
void RegExpPrototypeMatchBody(Node* const context, Node* const regexp,
- Node* const string, const bool is_fastpath);
+ TNode<String> const string,
+ const bool is_fastpath);
void RegExpPrototypeSearchBodyFast(Node* const context, Node* const regexp,
Node* const string);
@@ -108,15 +109,16 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
Node* const string);
void RegExpPrototypeSplitBody(Node* const context, Node* const regexp,
- Node* const string, Node* const limit);
+ TNode<String> const string, Node* const limit);
Node* ReplaceGlobalCallableFastPath(Node* context, Node* regexp, Node* string,
Node* replace_callable);
- Node* ReplaceSimpleStringFastPath(Node* context, Node* regexp, Node* string,
- Node* replace_string);
+ Node* ReplaceSimpleStringFastPath(Node* context, Node* regexp,
+ TNode<String> string,
+ TNode<String> replace_string);
};
} // namespace internal
} // namespace v8
-#endif // V8_BUILTINS_BUILTINS_REGEXP_H_
+#endif // V8_BUILTINS_BUILTINS_REGEXP_GEN_H_
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
index 278a48c68e..2c9f0791da 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
@@ -69,9 +69,8 @@ void SharedArrayBufferBuiltinsAssembler::ValidateSharedTypedArray(
BIND(&invalid);
{
- CallRuntime(Runtime::kThrowNotIntegerSharedTypedArrayError, context,
- tagged);
- Unreachable();
+ ThrowTypeError(context, MessageTemplate::kNotIntegerSharedTypedArray,
+ tagged);
}
BIND(&not_float_or_clamped);
@@ -96,15 +95,12 @@ Node* SharedArrayBufferBuiltinsAssembler::ConvertTaggedAtomicIndexToWord32(
// The |number_index| output parameter is used only for architectures that
// don't currently have a TF implementation and forward to runtime functions
// instead; they expect the value has already been coerced to an integer.
- *number_index = ToSmiIndex(tagged, context, &range_error);
- var_result.Bind(SmiToWord32(*number_index));
+ *number_index = ToSmiIndex(CAST(tagged), CAST(context), &range_error);
+ var_result.Bind(SmiToInt32(*number_index));
Goto(&done);
BIND(&range_error);
- {
- CallRuntime(Runtime::kThrowInvalidAtomicAccessIndexError, context);
- Unreachable();
- }
+ { ThrowRangeError(context, MessageTemplate::kInvalidAtomicAccessIndex); }
BIND(&done);
return var_result.value();
@@ -119,8 +115,7 @@ void SharedArrayBufferBuiltinsAssembler::ValidateAtomicIndex(Node* array,
context, LoadObjectField(array, JSTypedArray::kLengthOffset));
GotoIf(Uint32LessThan(index_word, array_length_word32), &check_passed);
- CallRuntime(Runtime::kThrowInvalidAtomicAccessIndexError, context);
- Unreachable();
+ ThrowRangeError(context, MessageTemplate::kInvalidAtomicAccessIndex);
BIND(&check_passed);
}
@@ -169,20 +164,20 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
arraysize(case_labels));
BIND(&i8);
- Return(SmiFromWord32(
- AtomicLoad(MachineType::Int8(), backing_store, index_word)));
+ Return(
+ SmiFromInt32(AtomicLoad(MachineType::Int8(), backing_store, index_word)));
BIND(&u8);
- Return(SmiFromWord32(
+ Return(SmiFromInt32(
AtomicLoad(MachineType::Uint8(), backing_store, index_word)));
BIND(&i16);
- Return(SmiFromWord32(
+ Return(SmiFromInt32(
AtomicLoad(MachineType::Int16(), backing_store, WordShl(index_word, 1))));
BIND(&u16);
- Return(SmiFromWord32(AtomicLoad(MachineType::Uint16(), backing_store,
- WordShl(index_word, 1))));
+ Return(SmiFromInt32(AtomicLoad(MachineType::Uint16(), backing_store,
+ WordShl(index_word, 1))));
BIND(&i32);
Return(ChangeInt32ToTagged(
@@ -293,20 +288,20 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
arraysize(case_labels));
BIND(&i8);
- Return(SmiFromWord32(AtomicExchange(MachineType::Int8(), backing_store,
- index_word, value_word32)));
+ Return(SmiFromInt32(AtomicExchange(MachineType::Int8(), backing_store,
+ index_word, value_word32)));
BIND(&u8);
- Return(SmiFromWord32(AtomicExchange(MachineType::Uint8(), backing_store,
- index_word, value_word32)));
+ Return(SmiFromInt32(AtomicExchange(MachineType::Uint8(), backing_store,
+ index_word, value_word32)));
BIND(&i16);
- Return(SmiFromWord32(AtomicExchange(MachineType::Int16(), backing_store,
- WordShl(index_word, 1), value_word32)));
+ Return(SmiFromInt32(AtomicExchange(MachineType::Int16(), backing_store,
+ WordShl(index_word, 1), value_word32)));
BIND(&u16);
- Return(SmiFromWord32(AtomicExchange(MachineType::Uint16(), backing_store,
- WordShl(index_word, 1), value_word32)));
+ Return(SmiFromInt32(AtomicExchange(MachineType::Uint16(), backing_store,
+ WordShl(index_word, 1), value_word32)));
BIND(&i32);
Return(ChangeInt32ToTagged(AtomicExchange(MachineType::Int32(), backing_store,
@@ -371,22 +366,22 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
arraysize(case_labels));
BIND(&i8);
- Return(SmiFromWord32(AtomicCompareExchange(MachineType::Int8(), backing_store,
- index_word, old_value_word32,
- new_value_word32)));
+ Return(SmiFromInt32(AtomicCompareExchange(MachineType::Int8(), backing_store,
+ index_word, old_value_word32,
+ new_value_word32)));
BIND(&u8);
- Return(SmiFromWord32(
- AtomicCompareExchange(MachineType::Uint8(), backing_store, index_word,
- old_value_word32, new_value_word32)));
+ Return(SmiFromInt32(AtomicCompareExchange(MachineType::Uint8(), backing_store,
+ index_word, old_value_word32,
+ new_value_word32)));
BIND(&i16);
- Return(SmiFromWord32(AtomicCompareExchange(
+ Return(SmiFromInt32(AtomicCompareExchange(
MachineType::Int16(), backing_store, WordShl(index_word, 1),
old_value_word32, new_value_word32)));
BIND(&u16);
- Return(SmiFromWord32(AtomicCompareExchange(
+ Return(SmiFromInt32(AtomicCompareExchange(
MachineType::Uint16(), backing_store, WordShl(index_word, 1),
old_value_word32, new_value_word32)));
@@ -468,22 +463,20 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
arraysize(case_labels));
BIND(&i8);
- Return(SmiFromWord32((this->*function)(MachineType::Int8(), backing_store,
- index_word, value_word32)));
+ Return(SmiFromInt32((this->*function)(MachineType::Int8(), backing_store,
+ index_word, value_word32)));
BIND(&u8);
- Return(SmiFromWord32((this->*function)(MachineType::Uint8(), backing_store,
- index_word, value_word32)));
+ Return(SmiFromInt32((this->*function)(MachineType::Uint8(), backing_store,
+ index_word, value_word32)));
BIND(&i16);
- Return(
- SmiFromWord32((this->*function)(MachineType::Int16(), backing_store,
- WordShl(index_word, 1), value_word32)));
+ Return(SmiFromInt32((this->*function)(MachineType::Int16(), backing_store,
+ WordShl(index_word, 1), value_word32)));
BIND(&u16);
- Return(
- SmiFromWord32((this->*function)(MachineType::Uint16(), backing_store,
- WordShl(index_word, 1), value_word32)));
+ Return(SmiFromInt32((this->*function)(MachineType::Uint16(), backing_store,
+ WordShl(index_word, 1), value_word32)));
BIND(&i32);
Return(ChangeInt32ToTagged(
diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc
index 195572de8e..5cc4621b84 100644
--- a/deps/v8/src/builtins/builtins-string-gen.cc
+++ b/deps/v8/src/builtins/builtins-string-gen.cc
@@ -124,42 +124,6 @@ Node* StringBuiltinsAssembler::PointerToStringDataAtIndex(
return IntPtrAdd(string_data, offset_in_bytes);
}
-void StringBuiltinsAssembler::ConvertAndBoundsCheckStartArgument(
- Node* context, Variable* var_start, Node* start, Node* string_length) {
- TNode<Object> const start_int = ToInteger_Inline(
- CAST(context), CAST(start), CodeStubAssembler::kTruncateMinusZero);
- TNode<Smi> const zero = SmiConstant(0);
-
- Label done(this);
- Label if_issmi(this), if_isheapnumber(this, Label::kDeferred);
- Branch(TaggedIsSmi(start_int), &if_issmi, &if_isheapnumber);
-
- BIND(&if_issmi);
- {
- TNode<Smi> const start_int_smi = CAST(start_int);
- var_start->Bind(Select(
- SmiLessThan(start_int_smi, zero),
- [&] { return SmiMax(SmiAdd(string_length, start_int_smi), zero); },
- [&] { return start_int_smi; }, MachineRepresentation::kTagged));
- Goto(&done);
- }
-
- BIND(&if_isheapnumber);
- {
- // If {start} is a heap number, it is definitely out of bounds. If it is
- // negative, {start} = max({string_length} + {start}),0) = 0'. If it is
- // positive, set {start} to {string_length} which ultimately results in
- // returning an empty string.
- TNode<HeapNumber> const start_int_hn = CAST(start_int);
- TNode<Float64T> const float_zero = Float64Constant(0.);
- TNode<Float64T> const start_float = LoadHeapNumberValue(start_int_hn);
- var_start->Bind(SelectTaggedConstant<Smi>(
- Float64LessThan(start_float, float_zero), zero, string_length));
- Goto(&done);
- }
- BIND(&done);
-}
-
void StringBuiltinsAssembler::GenerateStringEqual(Node* context, Node* left,
Node* right) {
VARIABLE(var_left, MachineRepresentation::kTagged, left);
@@ -300,21 +264,23 @@ void StringBuiltinsAssembler::StringEqual_Loop(
{
// If {offset} equals {end}, no difference was found, so the
// strings are equal.
- GotoIf(WordEqual(var_offset, length), if_equal);
+ GotoIf(WordEqual(var_offset.value(), length), if_equal);
// Load the next characters from {lhs} and {rhs}.
Node* lhs_value =
Load(lhs_type, lhs_data,
- WordShl(var_offset, ElementSizeLog2Of(lhs_type.representation())));
+ WordShl(var_offset.value(),
+ ElementSizeLog2Of(lhs_type.representation())));
Node* rhs_value =
Load(rhs_type, rhs_data,
- WordShl(var_offset, ElementSizeLog2Of(rhs_type.representation())));
+ WordShl(var_offset.value(),
+ ElementSizeLog2Of(rhs_type.representation())));
// Check if the characters match.
GotoIf(Word32NotEqual(lhs_value, rhs_value), if_not_equal);
// Advance to next character.
- var_offset = IntPtrAdd(var_offset, IntPtrConstant(1));
+ var_offset = IntPtrAdd(var_offset.value(), IntPtrConstant(1));
Goto(&loop);
}
}
@@ -408,13 +374,13 @@ void StringBuiltinsAssembler::GenerateStringRelationalComparison(Node* context,
{
// Check if {offset} equals {end}.
Label if_done(this), if_notdone(this);
- Branch(WordEqual(var_offset, end), &if_done, &if_notdone);
+ Branch(WordEqual(var_offset.value(), end), &if_done, &if_notdone);
BIND(&if_notdone);
{
// Load the next characters from {lhs} and {rhs}.
- Node* lhs_value = Load(MachineType::Uint8(), lhs, var_offset);
- Node* rhs_value = Load(MachineType::Uint8(), rhs, var_offset);
+ Node* lhs_value = Load(MachineType::Uint8(), lhs, var_offset.value());
+ Node* rhs_value = Load(MachineType::Uint8(), rhs, var_offset.value());
// Check if the characters match.
Label if_valueissame(this), if_valueisnotsame(this);
@@ -424,7 +390,7 @@ void StringBuiltinsAssembler::GenerateStringRelationalComparison(Node* context,
BIND(&if_valueissame);
{
// Advance to next character.
- var_offset = IntPtrAdd(var_offset, IntPtrConstant(1));
+ var_offset = IntPtrAdd(var_offset.value(), IntPtrConstant(1));
}
Goto(&loop);
@@ -563,20 +529,21 @@ TF_BUILTIN(StringCharAt, StringBuiltinsAssembler) {
Return(result);
}
-TF_BUILTIN(StringCharCodeAt, StringBuiltinsAssembler) {
+TF_BUILTIN(StringCodePointAtUTF16, StringBuiltinsAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* position = Parameter(Descriptor::kPosition);
-
+ // TODO(sigurds) Figure out if passing length as argument pays off.
+ TNode<IntPtrT> length = LoadStringLengthAsWord(receiver);
// Load the character code at the {position} from the {receiver}.
- TNode<Int32T> code = StringCharCodeAt(receiver, position);
-
+ TNode<Int32T> code =
+ LoadSurrogatePairAt(receiver, length, position, UnicodeEncoding::UTF16);
// And return it as TaggedSigned value.
// TODO(turbofan): Allow builtins to return values untagged.
- TNode<Smi> result = SmiFromWord32(code);
+ TNode<Smi> result = SmiFromInt32(code);
Return(result);
}
-TF_BUILTIN(StringCodePointAt, StringBuiltinsAssembler) {
+TF_BUILTIN(StringCodePointAtUTF32, StringBuiltinsAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* position = Parameter(Descriptor::kPosition);
@@ -587,7 +554,7 @@ TF_BUILTIN(StringCodePointAt, StringBuiltinsAssembler) {
LoadSurrogatePairAt(receiver, length, position, UnicodeEncoding::UTF32);
// And return it as TaggedSigned value.
// TODO(turbofan): Allow builtins to return values untagged.
- TNode<Smi> result = SmiFromWord32(code);
+ TNode<Smi> result = SmiFromInt32(code);
Return(result);
}
@@ -648,11 +615,12 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
// The {code16} fits into the SeqOneByteString {one_byte_result}.
Node* offset = ElementOffsetFromIndex(
- var_max_index, UINT8_ELEMENTS, CodeStubAssembler::INTPTR_PARAMETERS,
+ var_max_index.value(), UINT8_ELEMENTS,
+ CodeStubAssembler::INTPTR_PARAMETERS,
SeqOneByteString::kHeaderSize - kHeapObjectTag);
StoreNoWriteBarrier(MachineRepresentation::kWord8, one_byte_result,
offset, code16);
- var_max_index = IntPtrAdd(var_max_index, IntPtrConstant(1));
+ var_max_index = IntPtrAdd(var_max_index.value(), IntPtrConstant(1));
});
arguments.PopAndReturn(one_byte_result);
@@ -667,16 +635,17 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
// their corresponding positions in the new 16-bit string.
TNode<IntPtrT> zero = IntPtrConstant(0);
CopyStringCharacters(one_byte_result, two_byte_result, zero, zero,
- var_max_index, String::ONE_BYTE_ENCODING,
+ var_max_index.value(), String::ONE_BYTE_ENCODING,
String::TWO_BYTE_ENCODING);
// Write the character that caused the 8-bit to 16-bit fault.
- Node* max_index_offset = ElementOffsetFromIndex(
- var_max_index, UINT16_ELEMENTS, CodeStubAssembler::INTPTR_PARAMETERS,
- SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+ Node* max_index_offset =
+ ElementOffsetFromIndex(var_max_index.value(), UINT16_ELEMENTS,
+ CodeStubAssembler::INTPTR_PARAMETERS,
+ SeqTwoByteString::kHeaderSize - kHeapObjectTag);
StoreNoWriteBarrier(MachineRepresentation::kWord16, two_byte_result,
max_index_offset, code16);
- var_max_index = IntPtrAdd(var_max_index, IntPtrConstant(1));
+ var_max_index = IntPtrAdd(var_max_index.value(), IntPtrConstant(1));
// Resume copying the passed-in arguments from the same place where the
// 8-bit copy stopped, but this time copying over all of the characters
@@ -689,14 +658,14 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
Word32And(code32, Int32Constant(String::kMaxUtf16CodeUnit));
Node* offset = ElementOffsetFromIndex(
- var_max_index, UINT16_ELEMENTS,
+ var_max_index.value(), UINT16_ELEMENTS,
CodeStubAssembler::INTPTR_PARAMETERS,
SeqTwoByteString::kHeaderSize - kHeapObjectTag);
StoreNoWriteBarrier(MachineRepresentation::kWord16, two_byte_result,
offset, code16);
- var_max_index = IntPtrAdd(var_max_index, IntPtrConstant(1));
+ var_max_index = IntPtrAdd(var_max_index.value(), IntPtrConstant(1));
},
- var_max_index);
+ var_max_index.value());
arguments.PopAndReturn(two_byte_result);
}
@@ -728,7 +697,7 @@ TF_BUILTIN(StringPrototypeCharCodeAt, StringBuiltinsAssembler) {
[this](TNode<String> receiver, TNode<IntPtrT> length,
TNode<IntPtrT> index) {
Node* value = StringCharCodeAt(receiver, index);
- return SmiFromWord32(value);
+ return SmiFromInt32(value);
});
}
@@ -742,9 +711,11 @@ TF_BUILTIN(StringPrototypeCodePointAt, StringBuiltinsAssembler) {
maybe_position, UndefinedConstant(),
[this](TNode<String> receiver, TNode<IntPtrT> length,
TNode<IntPtrT> index) {
+ // This is always a call to a builtin from Javascript,
+ // so we need to produce UTF32.
Node* value = LoadSurrogatePairAt(receiver, length, index,
UnicodeEncoding::UTF32);
- return SmiFromWord32(value);
+ return SmiFromInt32(value);
});
}
@@ -1044,8 +1015,8 @@ void StringBuiltinsAssembler::RequireObjectCoercible(Node* const context,
Branch(IsNullOrUndefined(value), &throw_exception, &out);
BIND(&throw_exception);
- TailCallRuntime(Runtime::kThrowCalledOnNullOrUndefined, context,
- StringConstant(method_name));
+ ThrowTypeError(context, MessageTemplate::kCalledOnNullOrUndefined,
+ method_name);
BIND(&out);
}
@@ -1173,8 +1144,8 @@ compiler::Node* StringBuiltinsAssembler::GetSubstitution(
CSA_ASSERT(this, TaggedIsPositiveSmi(dollar_index));
Node* const matched =
- CallBuiltin(Builtins::kSubString, context, subject_string,
- match_start_index, match_end_index);
+ CallBuiltin(Builtins::kStringSubstring, context, subject_string,
+ SmiUntag(match_start_index), SmiUntag(match_end_index));
Node* const replacement_string =
CallRuntime(Runtime::kGetSubstitution, context, matched, subject_string,
match_start_index, replace_string, dollar_index);
@@ -1242,11 +1213,10 @@ TF_BUILTIN(StringPrototypeRepeat, StringBuiltinsAssembler) {
BIND(&invalid_count);
{
- CallRuntime(Runtime::kThrowRangeError, context,
- SmiConstant(MessageTemplate::kInvalidCountValue),
- var_count.value());
- Unreachable();
+ ThrowRangeError(context, MessageTemplate::kInvalidCountValue,
+ var_count.value());
}
+
BIND(&invalid_string_length);
{
CallRuntime(Runtime::kThrowInvalidStringLength, context);
@@ -1288,7 +1258,7 @@ TF_BUILTIN(StringRepeat, StringBuiltinsAssembler) {
{
{
Label next(this);
- GotoIfNot(SmiToWord32(SmiAnd(var_count.value(), SmiConstant(1))), &next);
+ GotoIfNot(SmiToInt32(SmiAnd(var_count.value(), SmiConstant(1))), &next);
var_result.Bind(CallStub(stringadd_callable, context, var_result.value(),
var_temp.value()));
Goto(&next);
@@ -1412,8 +1382,8 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
GotoIf(SmiEqual(match_start_index, smi_zero), &next);
Node* const prefix =
- CallBuiltin(Builtins::kSubString, context, subject_string, smi_zero,
- match_start_index);
+ CallBuiltin(Builtins::kStringSubstring, context, subject_string,
+ IntPtrConstant(0), SmiUntag(match_start_index));
var_result.Bind(prefix);
Goto(&next);
@@ -1453,8 +1423,8 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
BIND(&out);
{
Node* const suffix =
- CallBuiltin(Builtins::kSubString, context, subject_string,
- match_end_index, subject_length);
+ CallBuiltin(Builtins::kStringSubstring, context, subject_string,
+ SmiUntag(match_end_index), SmiUntag(subject_length));
Node* const result =
CallStub(stringadd_callable, context, var_result.value(), suffix);
Return(result);
@@ -1587,14 +1557,15 @@ class StringPadAssembler : public StringBuiltinsAssembler {
GotoIf(IsUndefined(fill), &pad);
var_fill_string = ToString_Inline(context, fill);
- var_fill_length = LoadStringLengthAsWord(var_fill_string);
+ var_fill_length = LoadStringLengthAsWord(var_fill_string.value());
- Branch(IntPtrGreaterThan(var_fill_length, IntPtrConstant(0)), &pad,
- &dont_pad);
+ Branch(IntPtrGreaterThan(var_fill_length.value(), IntPtrConstant(0)),
+ &pad, &dont_pad);
}
BIND(&pad);
{
- CSA_ASSERT(this, IntPtrGreaterThan(var_fill_length, IntPtrConstant(0)));
+ CSA_ASSERT(this,
+ IntPtrGreaterThan(var_fill_length.value(), IntPtrConstant(0)));
CSA_ASSERT(this, SmiGreaterThan(max_length, string_length));
Callable stringadd_callable =
@@ -1604,38 +1575,37 @@ class StringPadAssembler : public StringBuiltinsAssembler {
VARIABLE(var_pad, MachineRepresentation::kTagged);
Label single_char_fill(this), multi_char_fill(this), return_result(this);
- Branch(IntPtrEqual(var_fill_length, IntPtrConstant(1)), &single_char_fill,
- &multi_char_fill);
+ Branch(IntPtrEqual(var_fill_length.value(), IntPtrConstant(1)),
+ &single_char_fill, &multi_char_fill);
// Fast path for a single character fill. No need to calculate number of
// repetitions or remainder.
BIND(&single_char_fill);
{
var_pad.Bind(CallBuiltin(Builtins::kStringRepeat, context,
- static_cast<Node*>(var_fill_string),
+ static_cast<Node*>(var_fill_string.value()),
pad_length));
Goto(&return_result);
}
BIND(&multi_char_fill);
{
TNode<Int32T> const fill_length_word32 =
- TruncateWordToWord32(var_fill_length);
- TNode<Int32T> const pad_length_word32 = SmiToWord32(pad_length);
+ TruncateIntPtrToInt32(var_fill_length.value());
+ TNode<Int32T> const pad_length_word32 = SmiToInt32(pad_length);
TNode<Int32T> const repetitions_word32 =
Int32Div(pad_length_word32, fill_length_word32);
TNode<Int32T> const remaining_word32 =
Int32Mod(pad_length_word32, fill_length_word32);
var_pad.Bind(CallBuiltin(Builtins::kStringRepeat, context,
- static_cast<Node*>(var_fill_string),
- SmiFromWord32(repetitions_word32)));
+ var_fill_string.value(),
+ SmiFromInt32(repetitions_word32)));
GotoIfNot(remaining_word32, &return_result);
{
- Node* const remainder_string =
- CallBuiltin(Builtins::kSubString, context,
- static_cast<Node*>(var_fill_string), SmiConstant(0),
- SmiFromWord32(remaining_word32));
+ Node* const remainder_string = CallBuiltin(
+ Builtins::kStringSubstring, context, var_fill_string.value(),
+ IntPtrConstant(0), ChangeInt32ToIntPtr(remaining_word32));
var_pad.Bind(CallStub(stringadd_callable, context, var_pad.value(),
remainder_string));
Goto(&return_result);
@@ -1679,8 +1649,8 @@ TF_BUILTIN(StringPrototypeSearch, StringMatchSearchAssembler) {
// ES6 section 21.1.3.18 String.prototype.slice ( start, end )
TF_BUILTIN(StringPrototypeSlice, StringBuiltinsAssembler) {
Label out(this);
- VARIABLE(var_start, MachineRepresentation::kTagged);
- VARIABLE(var_end, MachineRepresentation::kTagged);
+ TVARIABLE(IntPtrT, var_start);
+ TVARIABLE(IntPtrT, var_end);
const int kStart = 0;
const int kEnd = 1;
@@ -1688,69 +1658,38 @@ TF_BUILTIN(StringPrototypeSlice, StringBuiltinsAssembler) {
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
Node* const receiver = args.GetReceiver();
- Node* const start = args.GetOptionalArgumentValue(kStart);
- TNode<Object> end = CAST(args.GetOptionalArgumentValue(kEnd));
+ TNode<Object> start = args.GetOptionalArgumentValue(kStart);
+ TNode<Object> end = args.GetOptionalArgumentValue(kEnd);
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
- TNode<Smi> const smi_zero = SmiConstant(0);
-
// 1. Let O be ? RequireObjectCoercible(this value).
RequireObjectCoercible(context, receiver, "String.prototype.slice");
// 2. Let S be ? ToString(O).
- Node* const subject_string =
- CallBuiltin(Builtins::kToString, context, receiver);
+ TNode<String> const subject_string =
+ CAST(CallBuiltin(Builtins::kToString, context, receiver));
// 3. Let len be the number of elements in S.
- TNode<Smi> const length = LoadStringLengthAsSmi(subject_string);
+ TNode<IntPtrT> const length = LoadStringLengthAsWord(subject_string);
- // Conversions and bounds-checks for {start}.
- ConvertAndBoundsCheckStartArgument(context, &var_start, start, length);
+ // Convert {start} to a relative index.
+ var_start = ConvertToRelativeIndex(context, start, length);
// 5. If end is undefined, let intEnd be len;
- var_end.Bind(length);
+ var_end = length;
GotoIf(IsUndefined(end), &out);
- // else let intEnd be ? ToInteger(end).
- Node* const end_int =
- ToInteger_Inline(context, end, CodeStubAssembler::kTruncateMinusZero);
-
- // 7. If intEnd < 0, let to be max(len + intEnd, 0);
- // otherwise let to be min(intEnd, len).
- Label if_issmi(this), if_isheapnumber(this, Label::kDeferred);
- Branch(TaggedIsSmi(end_int), &if_issmi, &if_isheapnumber);
-
- BIND(&if_issmi);
- {
- Node* const length_plus_end = SmiAdd(length, end_int);
- var_end.Bind(Select(SmiLessThan(end_int, smi_zero),
- [&] { return SmiMax(length_plus_end, smi_zero); },
- [&] { return SmiMin(length, end_int); },
- MachineRepresentation::kTagged));
- Goto(&out);
- }
-
- BIND(&if_isheapnumber);
- {
- // If {end} is a heap number, it is definitely out of bounds. If it is
- // negative, {int_end} = max({length} + {int_end}),0) = 0'. If it is
- // positive, set {int_end} to {length} which ultimately results in
- // returning an empty string.
- Node* const float_zero = Float64Constant(0.);
- Node* const end_float = LoadHeapNumberValue(end_int);
- var_end.Bind(SelectTaggedConstant<Smi>(
- Float64LessThan(end_float, float_zero), smi_zero, length));
- Goto(&out);
- }
+ // Convert {end} to a relative index.
+ var_end = ConvertToRelativeIndex(context, end, length);
+ Goto(&out);
Label return_emptystring(this);
BIND(&out);
{
- GotoIf(SmiLessThanOrEqual(var_end.value(), var_start.value()),
+ GotoIf(IntPtrLessThanOrEqual(var_end.value(), var_start.value()),
&return_emptystring);
- Node* const result =
- SubString(context, subject_string, var_start.value(), var_end.value(),
- SubStringFlags::FROM_TO_ARE_BOUNDED);
+ TNode<String> const result =
+ SubString(subject_string, var_start.value(), var_end.value());
args.PopAndReturn(result);
}
@@ -1868,25 +1807,25 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
CodeStubArguments args(this, argc);
Node* const receiver = args.GetReceiver();
- Node* const start = args.GetOptionalArgumentValue(kStartArg);
- TNode<Object> length = CAST(args.GetOptionalArgumentValue(kLengthArg));
+ TNode<Object> start = args.GetOptionalArgumentValue(kStartArg);
+ TNode<Object> length = args.GetOptionalArgumentValue(kLengthArg);
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Label out(this);
- TVARIABLE(Smi, var_start);
+ TVARIABLE(IntPtrT, var_start);
TVARIABLE(Number, var_length);
- TNode<Smi> const zero = SmiConstant(0);
+ TNode<IntPtrT> const zero = IntPtrConstant(0);
// Check that {receiver} is coercible to Object and convert it to a String.
- Node* const string =
+ TNode<String> const string =
ToThisString(context, receiver, "String.prototype.substr");
- TNode<Smi> const string_length = LoadStringLengthAsSmi(string);
+ TNode<IntPtrT> const string_length = LoadStringLengthAsWord(string);
- // Conversions and bounds-checks for {start}.
- ConvertAndBoundsCheckStartArgument(context, &var_start, start, string_length);
+ // Convert {start} to a relative index.
+ var_start = ConvertToRelativeIndex(context, start, string_length);
// Conversions and bounds-checks for {length}.
Label if_issmi(this), if_isheapnumber(this, Label::kDeferred);
@@ -1897,7 +1836,7 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
Branch(IsUndefined(length), &if_isundefined, &if_isnotundefined);
BIND(&if_isundefined);
- var_length = string_length;
+ var_length = SmiTag(string_length);
Goto(&if_issmi);
BIND(&if_isnotundefined);
@@ -1905,18 +1844,20 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
CodeStubAssembler::kTruncateMinusZero);
}
- TVARIABLE(Smi, var_result_length);
+ TVARIABLE(IntPtrT, var_result_length);
- Branch(TaggedIsSmi(var_length), &if_issmi, &if_isheapnumber);
+ Branch(TaggedIsSmi(var_length.value()), &if_issmi, &if_isheapnumber);
// Set {length} to min(max({length}, 0), {string_length} - {start}
BIND(&if_issmi);
{
- TNode<Smi> const positive_length = SmiMax(CAST(var_length), zero);
- TNode<Smi> const minimal_length = SmiSub(string_length, var_start);
- var_result_length = SmiMin(positive_length, minimal_length);
+ TNode<IntPtrT> const positive_length =
+ IntPtrMax(SmiUntag(CAST(var_length.value())), zero);
+ TNode<IntPtrT> const minimal_length =
+ IntPtrSub(string_length, var_start.value());
+ var_result_length = IntPtrMin(positive_length, minimal_length);
- GotoIfNot(SmiLessThanOrEqual(var_result_length, zero), &out);
+ GotoIfNot(IntPtrLessThanOrEqual(var_result_length.value(), zero), &out);
args.PopAndReturn(EmptyStringConstant());
}
@@ -1926,11 +1867,12 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
// two cases according to the spec: if it is negative, "" is returned; if
// it is positive, then length is set to {string_length} - {start}.
- CSA_ASSERT(this, IsHeapNumber(var_length));
+ CSA_ASSERT(this, IsHeapNumber(var_length.value()));
Label if_isnegative(this), if_ispositive(this);
TNode<Float64T> const float_zero = Float64Constant(0.);
- TNode<Float64T> const length_float = LoadHeapNumberValue(CAST(var_length));
+ TNode<Float64T> const length_float =
+ LoadHeapNumberValue(CAST(var_length.value()));
Branch(Float64LessThan(length_float, float_zero), &if_isnegative,
&if_ispositive);
@@ -1939,17 +1881,17 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
BIND(&if_ispositive);
{
- var_result_length = SmiSub(string_length, var_start);
- GotoIfNot(SmiLessThanOrEqual(var_result_length, zero), &out);
+ var_result_length = IntPtrSub(string_length, var_start.value());
+ GotoIfNot(IntPtrLessThanOrEqual(var_result_length.value(), zero), &out);
args.PopAndReturn(EmptyStringConstant());
}
}
BIND(&out);
{
- TNode<Smi> const end = SmiAdd(var_start, var_result_length);
- Node* const result = SubString(context, string, var_start, end);
- args.PopAndReturn(result);
+ TNode<IntPtrT> const end =
+ IntPtrAdd(var_start.value(), var_result_length.value());
+ args.PopAndReturn(SubString(string, var_start.value(), end));
}
}
@@ -1959,7 +1901,7 @@ TNode<Smi> StringBuiltinsAssembler::ToSmiBetweenZeroAnd(
Label out(this);
TVARIABLE(Smi, var_result);
- TNode<Object> const value_int =
+ TNode<Number> const value_int =
ToInteger_Inline(context, value, CodeStubAssembler::kTruncateMinusZero);
Label if_issmi(this), if_isnotsmi(this, Label::kDeferred);
@@ -1967,8 +1909,9 @@ TNode<Smi> StringBuiltinsAssembler::ToSmiBetweenZeroAnd(
BIND(&if_issmi);
{
+ TNode<Smi> value_smi = CAST(value_int);
Label if_isinbounds(this), if_isoutofbounds(this, Label::kDeferred);
- Branch(SmiAbove(value_int, limit), &if_isoutofbounds, &if_isinbounds);
+ Branch(SmiAbove(value_smi, limit), &if_isoutofbounds, &if_isinbounds);
BIND(&if_isinbounds);
{
@@ -1980,7 +1923,7 @@ TNode<Smi> StringBuiltinsAssembler::ToSmiBetweenZeroAnd(
{
TNode<Smi> const zero = SmiConstant(0);
var_result =
- SelectTaggedConstant(SmiLessThan(value_int, zero), zero, limit);
+ SelectTaggedConstant(SmiLessThan(value_smi, zero), zero, limit);
Goto(&out);
}
}
@@ -1999,16 +1942,15 @@ TNode<Smi> StringBuiltinsAssembler::ToSmiBetweenZeroAnd(
}
BIND(&out);
- return var_result;
+ return var_result.value();
}
-TF_BUILTIN(SubString, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* string = Parameter(Descriptor::kString);
- Node* from = Parameter(Descriptor::kFrom);
- Node* to = Parameter(Descriptor::kTo);
+TF_BUILTIN(StringSubstring, CodeStubAssembler) {
+ TNode<String> string = CAST(Parameter(Descriptor::kString));
+ TNode<IntPtrT> from = UncheckedCast<IntPtrT>(Parameter(Descriptor::kFrom));
+ TNode<IntPtrT> to = UncheckedCast<IntPtrT>(Parameter(Descriptor::kTo));
- Return(SubString(context, string, from, to));
+ Return(SubString(string, from, to));
}
// ES6 #sec-string.prototype.substring
@@ -2031,7 +1973,7 @@ TF_BUILTIN(StringPrototypeSubstring, StringBuiltinsAssembler) {
VARIABLE(var_end, MachineRepresentation::kTagged);
// Check that {receiver} is coercible to Object and convert it to a String.
- Node* const string =
+ TNode<String> const string =
ToThisString(context, receiver, "String.prototype.substring");
Node* const length = LoadStringLengthAsSmi(string);
@@ -2061,9 +2003,8 @@ TF_BUILTIN(StringPrototypeSubstring, StringBuiltinsAssembler) {
BIND(&out);
{
- Node* result =
- SubString(context, string, var_start.value(), var_end.value());
- args.PopAndReturn(result);
+ args.PopAndReturn(SubString(string, SmiUntag(var_start.value()),
+ SmiUntag(var_end.value())));
}
}
@@ -2072,14 +2013,14 @@ TF_BUILTIN(StringPrototypeTrim, StringTrimAssembler) {
Generate(String::kTrim, "String.prototype.trim");
}
-// Non-standard WebKit extension
-TF_BUILTIN(StringPrototypeTrimLeft, StringTrimAssembler) {
- Generate(String::kTrimLeft, "String.prototype.trimLeft");
+// https://github.com/tc39/proposal-string-left-right-trim
+TF_BUILTIN(StringPrototypeTrimStart, StringTrimAssembler) {
+ Generate(String::kTrimStart, "String.prototype.trimLeft");
}
-// Non-standard WebKit extension
-TF_BUILTIN(StringPrototypeTrimRight, StringTrimAssembler) {
- Generate(String::kTrimRight, "String.prototype.trimRight");
+// https://github.com/tc39/proposal-string-left-right-trim
+TF_BUILTIN(StringPrototypeTrimEnd, StringTrimAssembler) {
+ Generate(String::kTrimEnd, "String.prototype.trimRight");
}
void StringTrimAssembler::Generate(String::TrimMode mode,
@@ -2092,7 +2033,7 @@ void StringTrimAssembler::Generate(String::TrimMode mode,
Node* const receiver = arguments.GetReceiver();
// Check that {receiver} is coercible to Object and convert it to a String.
- Node* const string = ToThisString(context, receiver, method_name);
+ TNode<String> const string = ToThisString(context, receiver, method_name);
TNode<IntPtrT> const string_length = LoadStringLengthAsWord(string);
ToDirectStringAssembler to_direct(state(), string);
@@ -2105,20 +2046,20 @@ void StringTrimAssembler::Generate(String::TrimMode mode,
TVARIABLE(IntPtrT, var_start, IntPtrConstant(0));
TVARIABLE(IntPtrT, var_end, IntPtrSub(string_length, IntPtrConstant(1)));
- if (mode == String::kTrimLeft || mode == String::kTrim) {
+ if (mode == String::kTrimStart || mode == String::kTrim) {
ScanForNonWhiteSpaceOrLineTerminator(string_data, string_data_offset,
is_stringonebyte, &var_start,
string_length, 1, &return_emptystring);
}
- if (mode == String::kTrimRight || mode == String::kTrim) {
+ if (mode == String::kTrimEnd || mode == String::kTrim) {
ScanForNonWhiteSpaceOrLineTerminator(
string_data, string_data_offset, is_stringonebyte, &var_end,
IntPtrConstant(-1), -1, &return_emptystring);
}
- arguments.PopAndReturn(SubString(context, string, SmiTag(var_start),
- SmiAdd(SmiTag(var_end), SmiConstant(1)),
- SubStringFlags::FROM_TO_ARE_BOUNDED));
+ arguments.PopAndReturn(
+ SubString(string, var_start.value(),
+ IntPtrAdd(var_end.value(), IntPtrConstant(1))));
BIND(&if_runtime);
arguments.PopAndReturn(
@@ -2281,21 +2222,21 @@ TNode<Int32T> StringBuiltinsAssembler::LoadSurrogatePairAt(
var_result = StringCharCodeAt(string, index);
var_trail = Int32Constant(0);
- GotoIf(Word32NotEqual(Word32And(var_result, Int32Constant(0xFC00)),
+ GotoIf(Word32NotEqual(Word32And(var_result.value(), Int32Constant(0xFC00)),
Int32Constant(0xD800)),
&return_result);
TNode<IntPtrT> next_index = IntPtrAdd(index, IntPtrConstant(1));
GotoIfNot(IntPtrLessThan(next_index, length), &return_result);
var_trail = StringCharCodeAt(string, next_index);
- Branch(Word32Equal(Word32And(var_trail, Int32Constant(0xFC00)),
+ Branch(Word32Equal(Word32And(var_trail.value(), Int32Constant(0xFC00)),
Int32Constant(0xDC00)),
&handle_surrogate_pair, &return_result);
BIND(&handle_surrogate_pair);
{
- TNode<Int32T> lead = var_result;
- TNode<Int32T> trail = var_trail;
+ TNode<Int32T> lead = var_result.value();
+ TNode<Int32T> trail = var_trail.value();
// Check that this path is only taken if a surrogate pair is found
CSA_SLOW_ASSERT(this,
@@ -2332,7 +2273,7 @@ TNode<Int32T> StringBuiltinsAssembler::LoadSurrogatePairAt(
}
BIND(&return_result);
- return var_result;
+ return var_result.value();
}
// ES6 #sec-%stringiteratorprototype%.next
@@ -2383,9 +2324,8 @@ TF_BUILTIN(StringIteratorPrototypeNext, StringBuiltinsAssembler) {
BIND(&throw_bad_receiver);
{
// The {receiver} is not a valid JSGeneratorObject.
- CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
- StringConstant("String Iterator.prototype.next"), iterator);
- Unreachable();
+ ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
+ StringConstant("String Iterator.prototype.next"), iterator);
}
}
diff --git a/deps/v8/src/builtins/builtins-typedarray-gen.cc b/deps/v8/src/builtins/builtins-typedarray-gen.cc
index b830a8597d..2a4f23b003 100644
--- a/deps/v8/src/builtins/builtins-typedarray-gen.cc
+++ b/deps/v8/src/builtins/builtins-typedarray-gen.cc
@@ -2,9 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/builtins/builtins-typedarray-gen.h"
+
+#include "src/builtins/builtins-constructor-gen.h"
+#include "src/builtins/builtins-iterator-gen.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
-#include "src/code-stub-assembler.h"
+#include "src/builtins/growable-fixed-array-gen.h"
#include "src/handles-inl.h"
namespace v8 {
@@ -23,106 +27,22 @@ using TNode = compiler::TNode<T>;
// -----------------------------------------------------------------------------
// ES6 section 22.2 TypedArray Objects
-class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
- public:
- explicit TypedArrayBuiltinsAssembler(compiler::CodeAssemblerState* state)
- : CodeStubAssembler(state) {}
-
- protected:
- void GenerateTypedArrayPrototypeGetter(Node* context, Node* receiver,
- const char* method_name,
- int object_offset);
- void GenerateTypedArrayPrototypeIterationMethod(Node* context, Node* receiver,
- const char* method_name,
- IterationKind iteration_kind);
-
- void SetupTypedArray(TNode<JSTypedArray> holder, TNode<Smi> length,
- TNode<Number> byte_offset, TNode<Number> byte_length);
- void AttachBuffer(TNode<JSTypedArray> holder, TNode<JSArrayBuffer> buffer,
- TNode<Map> map, TNode<Smi> length,
- TNode<Number> byte_offset);
-
- TNode<Map> LoadMapForType(TNode<JSTypedArray> array);
- TNode<UintPtrT> CalculateExternalPointer(TNode<UintPtrT> backing_store,
- TNode<Number> byte_offset);
- Node* LoadDataPtr(Node* typed_array);
- TNode<BoolT> ByteLengthIsValid(TNode<Number> byte_length);
-
- // Returns true if kind is either UINT8_ELEMENTS or UINT8_CLAMPED_ELEMENTS.
- TNode<Word32T> IsUint8ElementsKind(TNode<Word32T> kind);
-
- // Loads the element kind of TypedArray instance.
- TNode<Word32T> LoadElementsKind(TNode<Object> typed_array);
-
- // Returns the byte size of an element for a TypedArray elements kind.
- TNode<IntPtrT> GetTypedArrayElementSize(TNode<Word32T> elements_kind);
-
- // Fast path for setting a TypedArray (source) onto another TypedArray
- // (target) at an element offset.
- void SetTypedArraySource(TNode<Context> context, TNode<JSTypedArray> source,
- TNode<JSTypedArray> target, TNode<IntPtrT> offset,
- Label* call_runtime, Label* if_source_too_large);
-
- void SetJSArraySource(TNode<Context> context, TNode<JSArray> source,
- TNode<JSTypedArray> target, TNode<IntPtrT> offset,
- Label* call_runtime, Label* if_source_too_large);
-
- void CallCMemmove(TNode<IntPtrT> dest_ptr, TNode<IntPtrT> src_ptr,
- TNode<IntPtrT> byte_length);
-
- void CallCCopyFastNumberJSArrayElementsToTypedArray(
- TNode<Context> context, TNode<JSArray> source, TNode<JSTypedArray> dest,
- TNode<IntPtrT> source_length, TNode<IntPtrT> offset);
-
- void CallCCopyTypedArrayElementsToTypedArray(TNode<JSTypedArray> source,
- TNode<JSTypedArray> dest,
- TNode<IntPtrT> source_length,
- TNode<IntPtrT> offset);
-};
-
TNode<Map> TypedArrayBuiltinsAssembler::LoadMapForType(
TNode<JSTypedArray> array) {
- Label unreachable(this), done(this);
- Label uint8_elements(this), uint8_clamped_elements(this), int8_elements(this),
- uint16_elements(this), int16_elements(this), uint32_elements(this),
- int32_elements(this), float32_elements(this), float64_elements(this);
- Label* elements_kind_labels[] = {
- &uint8_elements, &uint8_clamped_elements, &int8_elements,
- &uint16_elements, &int16_elements, &uint32_elements,
- &int32_elements, &float32_elements, &float64_elements};
- int32_t elements_kinds[] = {
- UINT8_ELEMENTS, UINT8_CLAMPED_ELEMENTS, INT8_ELEMENTS,
- UINT16_ELEMENTS, INT16_ELEMENTS, UINT32_ELEMENTS,
- INT32_ELEMENTS, FLOAT32_ELEMENTS, FLOAT64_ELEMENTS};
- const size_t kTypedElementsKindCount = LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND -
- FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND +
- 1;
- DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kinds));
- DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kind_labels));
-
TVARIABLE(Map, var_typed_map);
-
TNode<Map> array_map = LoadMap(array);
TNode<Int32T> elements_kind = LoadMapElementsKind(array_map);
- Switch(elements_kind, &unreachable, elements_kinds, elements_kind_labels,
- kTypedElementsKindCount);
- for (int i = 0; i < static_cast<int>(kTypedElementsKindCount); i++) {
- BIND(elements_kind_labels[i]);
- {
- ElementsKind kind = static_cast<ElementsKind>(elements_kinds[i]);
- ExternalArrayType type =
- isolate()->factory()->GetArrayTypeFromElementsKind(kind);
- Handle<Map> map(isolate()->heap()->MapForFixedTypedArray(type));
- var_typed_map = HeapConstant(map);
- Goto(&done);
- }
- }
+ DispatchTypedArrayByElementsKind(
+ elements_kind,
+ [&](ElementsKind kind, int size, int typed_array_fun_index) {
+ ExternalArrayType type =
+ isolate()->factory()->GetArrayTypeFromElementsKind(kind);
+ Handle<Map> map(isolate()->heap()->MapForFixedTypedArray(type));
+ var_typed_map = HeapConstant(map);
+ });
- BIND(&unreachable);
- { Unreachable(); }
- BIND(&done);
- return var_typed_map;
+ return var_typed_map.value();
}
// The byte_offset can be higher than Smi range, in which case to perform the
@@ -218,7 +138,7 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
Label setup_holder(this), allocate_on_heap(this), aligned(this),
allocate_elements(this), allocate_off_heap(this),
allocate_off_heap_no_init(this), attach_buffer(this), done(this);
- VARIABLE(var_total_size, MachineType::PointerRepresentation());
+ TVARIABLE(IntPtrT, var_total_size);
// SmiMul returns a heap number in case of Smi overflow.
TNode<Number> byte_length = SmiMul(length, element_size);
@@ -227,10 +147,12 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
TNode<Map> fixed_typed_map = LoadMapForType(holder);
GotoIf(TaggedIsNotSmi(byte_length), &allocate_off_heap);
- GotoIf(
- SmiGreaterThan(byte_length, SmiConstant(V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP)),
- &allocate_off_heap);
- TNode<IntPtrT> word_byte_length = SmiToWord(CAST(byte_length));
+ // The goto above ensures that byte_length is a Smi.
+ TNode<Smi> smi_byte_length = CAST(byte_length);
+ GotoIf(SmiGreaterThan(smi_byte_length,
+ SmiConstant(V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP)),
+ &allocate_off_heap);
+ TNode<IntPtrT> word_byte_length = SmiToIntPtr(smi_byte_length);
Goto(&allocate_on_heap);
BIND(&allocate_on_heap);
@@ -281,17 +203,18 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
// Fix alignment if needed.
DCHECK_EQ(0, FixedTypedArrayBase::kHeaderSize & kObjectAlignmentMask);
- Node* aligned_header_size =
+ TNode<IntPtrT> aligned_header_size =
IntPtrConstant(FixedTypedArrayBase::kHeaderSize + kObjectAlignmentMask);
- Node* size = IntPtrAdd(word_byte_length, aligned_header_size);
- var_total_size.Bind(WordAnd(size, IntPtrConstant(~kObjectAlignmentMask)));
+ TNode<IntPtrT> size = IntPtrAdd(word_byte_length, aligned_header_size);
+ var_total_size = WordAnd(size, IntPtrConstant(~kObjectAlignmentMask));
Goto(&allocate_elements);
}
BIND(&aligned);
{
- Node* header_size = IntPtrConstant(FixedTypedArrayBase::kHeaderSize);
- var_total_size.Bind(IntPtrAdd(word_byte_length, header_size));
+ TNode<IntPtrT> header_size =
+ IntPtrConstant(FixedTypedArrayBase::kHeaderSize);
+ var_total_size = IntPtrAdd(word_byte_length, header_size);
Goto(&allocate_elements);
}
@@ -359,7 +282,8 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
BIND(&attach_buffer);
{
- AttachBuffer(holder, var_buffer, fixed_typed_map, length, byte_offset);
+ AttachBuffer(holder, var_buffer.value(), fixed_typed_map, length,
+ byte_offset);
Goto(&done);
}
@@ -368,49 +292,44 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
}
// ES6 #sec-typedarray-length
-TF_BUILTIN(TypedArrayConstructByLength, TypedArrayBuiltinsAssembler) {
- Node* holder = Parameter(Descriptor::kHolder);
- TNode<Object> maybe_length = CAST(Parameter(Descriptor::kLength));
- TNode<Object> element_size = CAST(Parameter(Descriptor::kElementSize));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
- CSA_ASSERT(this, IsJSTypedArray(holder));
+void TypedArrayBuiltinsAssembler::ConstructByLength(TNode<Context> context,
+ TNode<JSTypedArray> holder,
+ TNode<Object> length,
+ TNode<Smi> element_size) {
CSA_ASSERT(this, TaggedIsPositiveSmi(element_size));
- Label invalid_length(this);
+ Label invalid_length(this, Label::kDeferred), done(this);
- TNode<Number> length = ToInteger_Inline(
- context, maybe_length, CodeStubAssembler::kTruncateMinusZero);
+ TNode<Number> converted_length =
+ ToInteger_Inline(context, length, CodeStubAssembler::kTruncateMinusZero);
// The maximum length of a TypedArray is MaxSmi().
// Note: this is not per spec, but rather a constraint of our current
- // representation (which uses smi's).
- GotoIf(TaggedIsNotSmi(length), &invalid_length);
- GotoIf(SmiLessThan(length, SmiConstant(0)), &invalid_length);
-
- CallBuiltin(Builtins::kTypedArrayInitialize, context, holder, length,
- element_size, TrueConstant());
- Return(UndefinedConstant());
+ // representation (which uses Smis).
+ GotoIf(TaggedIsNotSmi(converted_length), &invalid_length);
+ // The goto above ensures that byte_length is a Smi.
+ TNode<Smi> smi_converted_length = CAST(converted_length);
+ GotoIf(SmiLessThan(smi_converted_length, SmiConstant(0)), &invalid_length);
+
+ Node* initialize = TrueConstant();
+ CallBuiltin(Builtins::kTypedArrayInitialize, context, holder,
+ converted_length, element_size, initialize);
+ Goto(&done);
BIND(&invalid_length);
{
- CallRuntime(Runtime::kThrowRangeError, context,
- SmiConstant(MessageTemplate::kInvalidTypedArrayLength), length);
- Unreachable();
+ ThrowRangeError(context, MessageTemplate::kInvalidTypedArrayLength,
+ converted_length);
}
+
+ BIND(&done);
}
// ES6 #sec-typedarray-buffer-byteoffset-length
-TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
- Node* holder = Parameter(Descriptor::kHolder);
- Node* buffer = Parameter(Descriptor::kBuffer);
- TNode<Object> byte_offset = CAST(Parameter(Descriptor::kByteOffset));
- Node* length = Parameter(Descriptor::kLength);
- Node* element_size = Parameter(Descriptor::kElementSize);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
- CSA_ASSERT(this, IsJSTypedArray(holder));
- CSA_ASSERT(this, IsJSArrayBuffer(buffer));
+void TypedArrayBuiltinsAssembler::ConstructByArrayBuffer(
+ TNode<Context> context, TNode<JSTypedArray> holder,
+ TNode<JSArrayBuffer> buffer, TNode<Object> byte_offset,
+ TNode<Object> length, TNode<Smi> element_size) {
CSA_ASSERT(this, TaggedIsPositiveSmi(element_size));
VARIABLE(new_byte_length, MachineRepresentation::kTagged, SmiConstant(0));
@@ -421,7 +340,8 @@ TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
invalid_offset_error(this, Label::kDeferred);
Label offset_is_smi(this), offset_not_smi(this, Label::kDeferred),
check_length(this), call_init(this), invalid_length(this),
- length_undefined(this), length_defined(this), detached_error(this);
+ length_undefined(this), length_defined(this), detached_error(this),
+ done(this);
GotoIf(IsUndefined(byte_offset), &check_length);
@@ -477,7 +397,7 @@ TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
BIND(&length_defined);
{
- Node* new_length = ToSmiIndex(length, context, &invalid_length);
+ TNode<Smi> new_length = ToSmiIndex(length, context, &invalid_length);
GotoIf(IsDetachedBuffer(buffer), &detached_error);
new_byte_length.Bind(SmiMul(new_length, element_size));
// Reading the byte length must come after the ToIndex operation, which
@@ -495,22 +415,18 @@ TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
BIND(&call_init);
{
- Node* new_length = CallBuiltin(Builtins::kDivide, context,
- new_byte_length.value(), element_size);
+ TNode<Object> raw_length = CallBuiltin(
+ Builtins::kDivide, context, new_byte_length.value(), element_size);
// Force the result into a Smi, or throw a range error if it doesn't fit.
- new_length = ToSmiIndex(new_length, context, &invalid_length);
+ TNode<Smi> new_length = ToSmiIndex(raw_length, context, &invalid_length);
CallBuiltin(Builtins::kTypedArrayInitializeWithBuffer, context, holder,
new_length, buffer, element_size, offset.value());
- Return(UndefinedConstant());
+ Goto(&done);
}
BIND(&invalid_offset_error);
- {
- CallRuntime(Runtime::kThrowRangeError, context,
- SmiConstant(MessageTemplate::kInvalidOffset), byte_offset);
- Unreachable();
- }
+ { ThrowRangeError(context, MessageTemplate::kInvalidOffset, byte_offset); }
BIND(&start_offset_error);
{
@@ -534,24 +450,84 @@ TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
BIND(&invalid_length);
{
- CallRuntime(Runtime::kThrowRangeError, context,
- SmiConstant(MessageTemplate::kInvalidTypedArrayLength), length);
- Unreachable();
+ ThrowRangeError(context, MessageTemplate::kInvalidTypedArrayLength, length);
}
BIND(&detached_error);
{ ThrowTypeError(context, MessageTemplate::kDetachedOperation, "Construct"); }
+
+ BIND(&done);
+}
+
+void TypedArrayBuiltinsAssembler::ConstructByTypedArray(
+ TNode<Context> context, TNode<JSTypedArray> holder,
+ TNode<JSTypedArray> typed_array, TNode<Smi> element_size) {
+ CSA_ASSERT(this, TaggedIsPositiveSmi(element_size));
+
+ TNode<JSFunction> const default_constructor = CAST(LoadContextElement(
+ LoadNativeContext(context), Context::ARRAY_BUFFER_FUN_INDEX));
+
+ Label construct(this), if_detached(this), if_notdetached(this),
+ check_for_sab(this), if_buffernotshared(this), check_prototype(this),
+ done(this);
+ TVARIABLE(JSReceiver, buffer_constructor, default_constructor);
+
+ TNode<JSArrayBuffer> source_buffer = LoadObjectField<JSArrayBuffer>(
+ typed_array, JSArrayBufferView::kBufferOffset);
+ Branch(IsDetachedBuffer(source_buffer), &if_detached, &if_notdetached);
+
+ // TODO(petermarshall): Throw on detached typedArray.
+ TVARIABLE(Smi, source_length);
+ BIND(&if_detached);
+ source_length = SmiConstant(0);
+ Goto(&check_for_sab);
+
+ BIND(&if_notdetached);
+ source_length =
+ CAST(LoadObjectField(typed_array, JSTypedArray::kLengthOffset));
+ Goto(&check_for_sab);
+
+ // The spec requires that constructing a typed array using a SAB-backed typed
+ // array use the ArrayBuffer constructor, not the species constructor. See
+ // https://tc39.github.io/ecma262/#sec-typedarray-typedarray.
+ BIND(&check_for_sab);
+ TNode<Uint32T> bitfield =
+ LoadObjectField<Uint32T>(source_buffer, JSArrayBuffer::kBitFieldOffset);
+ Branch(IsSetWord32<JSArrayBuffer::IsShared>(bitfield), &construct,
+ &if_buffernotshared);
+
+ BIND(&if_buffernotshared);
+ {
+ buffer_constructor =
+ CAST(SpeciesConstructor(context, source_buffer, default_constructor));
+ // TODO(petermarshall): Throw on detached typedArray.
+ GotoIfNot(IsDetachedBuffer(source_buffer), &construct);
+ source_length = SmiConstant(0);
+ Goto(&construct);
+ }
+
+ BIND(&construct);
+ {
+ ConstructByArrayLike(context, holder, typed_array, source_length.value(),
+ element_size);
+ TNode<Object> proto = GetProperty(context, buffer_constructor.value(),
+ PrototypeStringConstant());
+ // TODO(petermarshall): Correct for realm as per 9.1.14 step 4.
+ TNode<JSArrayBuffer> buffer = LoadObjectField<JSArrayBuffer>(
+ holder, JSArrayBufferView::kBufferOffset);
+ CallRuntime(Runtime::kInternalSetPrototype, context, buffer, proto);
+
+ Goto(&done);
+ }
+
+ BIND(&done);
}
Node* TypedArrayBuiltinsAssembler::LoadDataPtr(Node* typed_array) {
CSA_ASSERT(this, IsJSTypedArray(typed_array));
Node* elements = LoadElements(typed_array);
CSA_ASSERT(this, IsFixedTypedArray(elements));
- Node* base_pointer = BitcastTaggedToWord(
- LoadObjectField(elements, FixedTypedArrayBase::kBasePointerOffset));
- Node* external_pointer = BitcastTaggedToWord(
- LoadObjectField(elements, FixedTypedArrayBase::kExternalPointerOffset));
- return IntPtrAdd(base_pointer, external_pointer);
+ return LoadFixedTypedArrayBackingStore(CAST(elements));
}
TNode<BoolT> TypedArrayBuiltinsAssembler::ByteLengthIsValid(
@@ -574,28 +550,24 @@ TNode<BoolT> TypedArrayBuiltinsAssembler::ByteLengthIsValid(
Goto(&done);
BIND(&done);
- return is_valid;
+ return is_valid.value();
}
-TF_BUILTIN(TypedArrayConstructByArrayLike, TypedArrayBuiltinsAssembler) {
- Node* holder = Parameter(Descriptor::kHolder);
- Node* array_like = Parameter(Descriptor::kArrayLike);
- Node* initial_length = Parameter(Descriptor::kLength);
- Node* element_size = Parameter(Descriptor::kElementSize);
- CSA_ASSERT(this, TaggedIsSmi(element_size));
- Node* context = Parameter(Descriptor::kContext);
-
+void TypedArrayBuiltinsAssembler::ConstructByArrayLike(
+ TNode<Context> context, TNode<JSTypedArray> holder,
+ TNode<HeapObject> array_like, TNode<Object> initial_length,
+ TNode<Smi> element_size) {
Node* initialize = FalseConstant();
- Label invalid_length(this), fill(this), fast_copy(this);
+ Label invalid_length(this), fill(this), fast_copy(this), done(this);
// The caller has looked up length on array_like, which is observable.
- Node* length = ToSmiLength(initial_length, context, &invalid_length);
+ TNode<Smi> length = ToSmiLength(initial_length, context, &invalid_length);
CallBuiltin(Builtins::kTypedArrayInitialize, context, holder, length,
element_size, initialize);
GotoIf(SmiNotEqual(length, SmiConstant(0)), &fill);
- Return(UndefinedConstant());
+ Goto(&done);
BIND(&fill);
TNode<Int32T> holder_kind = LoadMapElementsKind(LoadMap(holder));
@@ -605,7 +577,7 @@ TF_BUILTIN(TypedArrayConstructByArrayLike, TypedArrayBuiltinsAssembler) {
// Copy using the elements accessor.
CallRuntime(Runtime::kTypedArrayCopyElements, context, holder, array_like,
length);
- Return(UndefinedConstant());
+ Goto(&done);
BIND(&fast_copy);
{
@@ -632,16 +604,117 @@ TF_BUILTIN(TypedArrayConstructByArrayLike, TypedArrayBuiltinsAssembler) {
CallCFunction3(MachineType::AnyTagged(), MachineType::Pointer(),
MachineType::Pointer(), MachineType::UintPtr(), memcpy,
holder_data_ptr, source_data_ptr, byte_length_intptr);
- Return(UndefinedConstant());
+ Goto(&done);
}
BIND(&invalid_length);
{
- CallRuntime(Runtime::kThrowRangeError, context,
- SmiConstant(MessageTemplate::kInvalidTypedArrayLength),
- initial_length);
- Unreachable();
+ ThrowRangeError(context, MessageTemplate::kInvalidTypedArrayLength,
+ initial_length);
+ }
+
+ BIND(&done);
+}
+
+void TypedArrayBuiltinsAssembler::ConstructByIterable(
+ TNode<Context> context, TNode<JSTypedArray> holder,
+ TNode<JSReceiver> iterable, TNode<Object> iterator_fn,
+ TNode<Smi> element_size) {
+ CSA_ASSERT(this, IsCallable(iterator_fn));
+ Label fast_path(this), slow_path(this), done(this);
+
+ TNode<JSArray> array_like = CAST(
+ CallBuiltin(Builtins::kIterableToList, context, iterable, iterator_fn));
+ TNode<Object> initial_length = LoadJSArrayLength(array_like);
+ ConstructByArrayLike(context, holder, array_like, initial_length,
+ element_size);
+}
+
+TF_BUILTIN(TypedArrayConstructor, TypedArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+
+ // If NewTarget is undefined, throw a TypeError exception.
+ Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
+ MachineType::TaggedPointer());
+ Node* shared = LoadObjectField(target, JSFunction::kSharedFunctionInfoOffset);
+ Node* name = LoadObjectField(shared, SharedFunctionInfo::kNameOffset);
+ ThrowTypeError(context, MessageTemplate::kConstructorNotFunction, name);
+}
+
+TF_BUILTIN(TypedArrayConstructor_ConstructStub, TypedArrayBuiltinsAssembler) {
+ Label if_arg1isbuffer(this), if_arg1istypedarray(this),
+ if_arg1isreceiver(this), if_arg1isnumber(this), done(this);
+
+ TNode<Object> new_target = CAST(Parameter(BuiltinDescriptor::kNewTarget));
+ CSA_ASSERT(this, IsNotUndefined(new_target));
+
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+ TNode<Object> arg1 = args.GetOptionalArgumentValue(0);
+ TNode<Object> arg2 = args.GetOptionalArgumentValue(1);
+ TNode<Object> arg3 = args.GetOptionalArgumentValue(2);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+
+ Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
+ MachineType::TaggedPointer());
+ ConstructorBuiltinsAssembler constructor_assembler(this->state());
+ TNode<JSTypedArray> holder = CAST(
+ constructor_assembler.EmitFastNewObject(context, target, new_target));
+
+ TNode<Smi> element_size =
+ SmiTag(GetTypedArrayElementSize(LoadElementsKind(holder)));
+
+ GotoIf(TaggedIsSmi(arg1), &if_arg1isnumber);
+ GotoIf(IsJSArrayBuffer(arg1), &if_arg1isbuffer);
+ GotoIf(IsJSTypedArray(arg1), &if_arg1istypedarray);
+ GotoIf(IsJSReceiver(arg1), &if_arg1isreceiver);
+ Goto(&if_arg1isnumber);
+
+ BIND(&if_arg1isbuffer);
+ ConstructByArrayBuffer(context, holder, CAST(arg1), arg2, arg3, element_size);
+ Goto(&done);
+
+ BIND(&if_arg1istypedarray);
+ TNode<JSTypedArray> typed_array = CAST(arg1);
+ ConstructByTypedArray(context, holder, typed_array, element_size);
+ Goto(&done);
+
+ BIND(&if_arg1isreceiver);
+ {
+ Label if_iteratorundefined(this), if_iteratornotcallable(this);
+ // Get iterator symbol
+ TNode<Object> iteratorFn =
+ CAST(GetMethod(context, arg1, isolate()->factory()->iterator_symbol(),
+ &if_iteratorundefined));
+ GotoIf(TaggedIsSmi(iteratorFn), &if_iteratornotcallable);
+ GotoIfNot(IsCallable(iteratorFn), &if_iteratornotcallable);
+
+ ConstructByIterable(context, holder, CAST(arg1), iteratorFn, element_size);
+ Goto(&done);
+
+ BIND(&if_iteratorundefined);
+ {
+ TNode<HeapObject> array_like = CAST(arg1);
+ TNode<Object> initial_length =
+ GetProperty(context, arg1, LengthStringConstant());
+
+ ConstructByArrayLike(context, holder, array_like, initial_length,
+ element_size);
+ Goto(&done);
+ }
+
+ BIND(&if_iteratornotcallable);
+ { ThrowTypeError(context, MessageTemplate::kIteratorSymbolNonCallable); }
}
+
+ // First arg was a number or fell through and will be treated as a number.
+ BIND(&if_arg1isnumber);
+ ConstructByLength(context, holder, arg1, element_size);
+ Goto(&done);
+
+ BIND(&done);
+ args.PopAndReturn(holder);
}
void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeGetter(
@@ -668,9 +741,8 @@ void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeGetter(
BIND(&receiver_is_incompatible);
{
// The {receiver} is not a valid JSTypedArray.
- CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
- StringConstant(method_name), receiver);
- Unreachable();
+ ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
+ StringConstant(method_name), receiver);
}
}
@@ -707,57 +779,156 @@ TNode<Word32T> TypedArrayBuiltinsAssembler::IsUint8ElementsKind(
Word32Equal(kind, Int32Constant(UINT8_CLAMPED_ELEMENTS)));
}
+TNode<Word32T> TypedArrayBuiltinsAssembler::IsBigInt64ElementsKind(
+ TNode<Word32T> kind) {
+ return Word32Or(Word32Equal(kind, Int32Constant(BIGINT64_ELEMENTS)),
+ Word32Equal(kind, Int32Constant(BIGUINT64_ELEMENTS)));
+}
+
TNode<Word32T> TypedArrayBuiltinsAssembler::LoadElementsKind(
- TNode<Object> typed_array) {
- CSA_ASSERT(this, IsJSTypedArray(typed_array));
- return LoadMapElementsKind(LoadMap(CAST(typed_array)));
+ TNode<JSTypedArray> typed_array) {
+ return LoadMapElementsKind(LoadMap(typed_array));
}
TNode<IntPtrT> TypedArrayBuiltinsAssembler::GetTypedArrayElementSize(
TNode<Word32T> elements_kind) {
TVARIABLE(IntPtrT, element_size);
- Label next(this), if_unknown_type(this, Label::kDeferred);
- size_t const kTypedElementsKindCount = LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND -
- FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND +
- 1;
+ DispatchTypedArrayByElementsKind(
+ elements_kind,
+ [&](ElementsKind el_kind, int size, int typed_array_fun_index) {
+ element_size = IntPtrConstant(size);
+ });
- int32_t elements_kinds[kTypedElementsKindCount] = {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) TYPE##_ELEMENTS,
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- };
+ return element_size.value();
+}
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- Label if_##type##array(this);
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
+TNode<Object> TypedArrayBuiltinsAssembler::GetDefaultConstructor(
+ TNode<Context> context, TNode<JSTypedArray> exemplar) {
+ TVARIABLE(IntPtrT, context_slot);
+ TNode<Word32T> elements_kind = LoadElementsKind(exemplar);
- Label* elements_kind_labels[kTypedElementsKindCount] = {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) &if_##type##array,
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- };
+ DispatchTypedArrayByElementsKind(
+ elements_kind,
+ [&](ElementsKind el_kind, int size, int typed_array_function_index) {
+ context_slot = IntPtrConstant(typed_array_function_index);
+ });
- Switch(elements_kind, &if_unknown_type, elements_kinds, elements_kind_labels,
- kTypedElementsKindCount);
+ return LoadContextElement(LoadNativeContext(context), context_slot.value());
+}
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- BIND(&if_##type##array); \
- { \
- element_size = IntPtrConstant(size); \
- Goto(&next); \
- }
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
+TNode<Object> TypedArrayBuiltinsAssembler::TypedArraySpeciesConstructor(
+ TNode<Context> context, TNode<JSTypedArray> exemplar) {
+ TVARIABLE(Object, var_constructor);
+ Label slow(this), done(this);
- BIND(&if_unknown_type);
+ // Let defaultConstructor be the intrinsic object listed in column one of
+ // Table 52 for exemplar.[[TypedArrayName]].
+ TNode<Object> default_constructor = GetDefaultConstructor(context, exemplar);
+
+ var_constructor = default_constructor;
+ Node* map = LoadMap(exemplar);
+ GotoIfNot(IsPrototypeTypedArrayPrototype(context, map), &slow);
+ Branch(IsSpeciesProtectorCellInvalid(), &slow, &done);
+
+ BIND(&slow);
+ var_constructor =
+ CAST(SpeciesConstructor(context, exemplar, default_constructor));
+ Goto(&done);
+
+ BIND(&done);
+ return var_constructor.value();
+}
+
+TNode<JSTypedArray> TypedArrayBuiltinsAssembler::SpeciesCreateByArrayBuffer(
+ TNode<Context> context, TNode<JSTypedArray> exemplar,
+ TNode<JSArrayBuffer> buffer, TNode<Number> byte_offset, TNode<Smi> len,
+ const char* method_name) {
+ // Let constructor be ? SpeciesConstructor(exemplar, defaultConstructor).
+ TNode<Object> constructor = TypedArraySpeciesConstructor(context, exemplar);
+
+ // Let newTypedArray be ? Construct(constructor, argumentList).
+ TNode<Object> new_object =
+ CAST(ConstructJS(CodeFactory::Construct(isolate()), context, constructor,
+ buffer, byte_offset, len));
+
+ // Perform ? ValidateTypedArray(newTypedArray).
+ return ValidateTypedArray(context, new_object, method_name);
+}
+
+TNode<JSTypedArray> TypedArrayBuiltinsAssembler::SpeciesCreateByLength(
+ TNode<Context> context, TNode<JSTypedArray> exemplar, TNode<Smi> len,
+ const char* method_name) {
+ CSA_ASSERT(this, TaggedIsPositiveSmi(len));
+
+ // Let constructor be ? SpeciesConstructor(exemplar, defaultConstructor).
+ TNode<Object> constructor = TypedArraySpeciesConstructor(context, exemplar);
+ CSA_ASSERT(this, IsJSFunction(constructor));
+
+ return CreateByLength(context, constructor, len, method_name);
+}
+
+TNode<JSTypedArray> TypedArrayBuiltinsAssembler::CreateByLength(
+ TNode<Context> context, TNode<Object> constructor, TNode<Smi> len,
+ const char* method_name) {
+ // Let newTypedArray be ? Construct(constructor, argumentList).
+ TNode<Object> new_object = CAST(ConstructJS(CodeFactory::Construct(isolate()),
+ context, constructor, len));
+
+ // Perform ? ValidateTypedArray(newTypedArray).
+ TNode<JSTypedArray> new_typed_array =
+ ValidateTypedArray(context, new_object, method_name);
+
+ // If newTypedArray.[[ArrayLength]] < argumentList[0], throw a TypeError
+ // exception.
+ Label if_length_is_not_short(this);
+ TNode<Smi> new_length =
+ LoadObjectField<Smi>(new_typed_array, JSTypedArray::kLengthOffset);
+ GotoIfNot(SmiLessThan(new_length, len), &if_length_is_not_short);
+ ThrowTypeError(context, MessageTemplate::kTypedArrayTooShort);
+
+ BIND(&if_length_is_not_short);
+ return new_typed_array;
+}
+
+TNode<JSArrayBuffer> TypedArrayBuiltinsAssembler::GetBuffer(
+ TNode<Context> context, TNode<JSTypedArray> array) {
+ Label call_runtime(this), done(this);
+ TVARIABLE(Object, var_result);
+
+ TNode<Object> buffer = LoadObjectField(array, JSTypedArray::kBufferOffset);
+ GotoIf(IsDetachedBuffer(buffer), &call_runtime);
+ TNode<UintPtrT> backing_store = LoadObjectField<UintPtrT>(
+ CAST(buffer), JSArrayBuffer::kBackingStoreOffset);
+ GotoIf(WordEqual(backing_store, IntPtrConstant(0)), &call_runtime);
+ var_result = buffer;
+ Goto(&done);
+
+ BIND(&call_runtime);
{
- element_size = IntPtrConstant(0);
- Goto(&next);
+ var_result = CallRuntime(Runtime::kTypedArrayGetBuffer, context, array);
+ Goto(&done);
}
- BIND(&next);
- return element_size;
+
+ BIND(&done);
+ return CAST(var_result.value());
+}
+
+TNode<JSTypedArray> TypedArrayBuiltinsAssembler::ValidateTypedArray(
+ TNode<Context> context, TNode<Object> obj, const char* method_name) {
+ Label validation_done(this);
+
+ // If it is not a typed array, throw
+ ThrowIfNotInstanceType(context, obj, JS_TYPED_ARRAY_TYPE, method_name);
+
+ // If the typed array's buffer is detached, throw
+ TNode<Object> buffer =
+ LoadObjectField(CAST(obj), JSTypedArray::kBufferOffset);
+ GotoIfNot(IsDetachedBuffer(buffer), &validation_done);
+ ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);
+
+ BIND(&validation_done);
+ return CAST(obj);
}
void TypedArrayBuiltinsAssembler::SetTypedArraySource(
@@ -801,7 +972,7 @@ void TypedArrayBuiltinsAssembler::SetTypedArraySource(
CSA_ASSERT(this,
UintPtrGreaterThanOrEqual(source_byte_length, IntPtrConstant(0)));
- Label call_memmove(this), fast_c_call(this), out(this);
+ Label call_memmove(this), fast_c_call(this), out(this), exception(this);
// A fast memmove call can be used when the source and target types are are
// the same or either Uint8 or Uint8Clamped.
@@ -823,6 +994,10 @@ void TypedArrayBuiltinsAssembler::SetTypedArraySource(
this, UintPtrGreaterThanOrEqual(
IntPtrMul(target_length, target_el_size), IntPtrConstant(0)));
+ GotoIf(Word32NotEqual(IsBigInt64ElementsKind(source_el_kind),
+ IsBigInt64ElementsKind(target_el_kind)),
+ &exception);
+
TNode<IntPtrT> source_length =
LoadAndUntagObjectField(source, JSTypedArray::kLengthOffset);
CallCCopyTypedArrayElementsToTypedArray(source, target, source_length,
@@ -830,6 +1005,9 @@ void TypedArrayBuiltinsAssembler::SetTypedArraySource(
Goto(&out);
}
+ BIND(&exception);
+ ThrowTypeError(context, MessageTemplate::kBigIntMixedTypes);
+
BIND(&out);
}
@@ -871,6 +1049,7 @@ void TypedArrayBuiltinsAssembler::SetJSArraySource(
}
BIND(&fast_c_call);
+ GotoIf(IsBigInt64ElementsKind(LoadElementsKind(target)), call_runtime);
CallCCopyFastNumberJSArrayElementsToTypedArray(context, source, target,
source_length, offset);
Goto(&out);
@@ -893,6 +1072,7 @@ void TypedArrayBuiltinsAssembler::
TNode<JSTypedArray> dest,
TNode<IntPtrT> source_length,
TNode<IntPtrT> offset) {
+ CSA_ASSERT(this, Word32Not(IsBigInt64ElementsKind(LoadElementsKind(dest))));
TNode<ExternalReference> f = ExternalConstant(
ExternalReference::copy_fast_number_jsarray_elements_to_typed_array(
isolate()));
@@ -913,6 +1093,56 @@ void TypedArrayBuiltinsAssembler::CallCCopyTypedArrayElementsToTypedArray(
offset);
}
+void TypedArrayBuiltinsAssembler::CallCCopyTypedArrayElementsSlice(
+ TNode<JSTypedArray> source, TNode<JSTypedArray> dest, TNode<IntPtrT> start,
+ TNode<IntPtrT> end) {
+ TNode<ExternalReference> f = ExternalConstant(
+ ExternalReference::copy_typed_array_elements_slice(isolate()));
+ CallCFunction4(MachineType::AnyTagged(), MachineType::AnyTagged(),
+ MachineType::AnyTagged(), MachineType::UintPtr(),
+ MachineType::UintPtr(), f, source, dest, start, end);
+}
+
+void TypedArrayBuiltinsAssembler::DispatchTypedArrayByElementsKind(
+ TNode<Word32T> elements_kind, const TypedArraySwitchCase& case_function) {
+ Label next(this), if_unknown_type(this, Label::kDeferred);
+
+ int32_t elements_kinds[] = {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) TYPE##_ELEMENTS,
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ };
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ Label if_##type##array(this);
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
+ Label* elements_kind_labels[] = {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) &if_##type##array,
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ };
+ STATIC_ASSERT(arraysize(elements_kinds) == arraysize(elements_kind_labels));
+
+ Switch(elements_kind, &if_unknown_type, elements_kinds, elements_kind_labels,
+ arraysize(elements_kinds));
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ BIND(&if_##type##array); \
+ { \
+ case_function(TYPE##_ELEMENTS, size, Context::TYPE##_ARRAY_FUN_INDEX); \
+ Goto(&next); \
+ }
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
+ BIND(&if_unknown_type);
+ Unreachable();
+
+ BIND(&next);
+}
+
// ES #sec-get-%typedarray%.prototype.set
TF_BUILTIN(TypedArrayPrototypeSet, TypedArrayBuiltinsAssembler) {
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
@@ -998,6 +1228,193 @@ TF_BUILTIN(TypedArrayPrototypeSet, TypedArrayBuiltinsAssembler) {
ThrowTypeError(context, MessageTemplate::kNotTypedArray);
}
+// ES %TypedArray%.prototype.slice
+TF_BUILTIN(TypedArrayPrototypeSlice, TypedArrayBuiltinsAssembler) {
+ const char* method_name = "%TypedArray%.prototype.slice";
+ Label call_c(this), call_memmove(this), if_count_is_not_zero(this),
+ if_typed_array_is_neutered(this, Label::kDeferred),
+ if_bigint_mixed_types(this, Label::kDeferred);
+
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ CodeStubArguments args(
+ this, ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount)));
+
+ TNode<Object> receiver = args.GetReceiver();
+ TNode<JSTypedArray> source =
+ ValidateTypedArray(context, receiver, method_name);
+
+ TNode<Smi> source_length =
+ LoadObjectField<Smi>(source, JSTypedArray::kLengthOffset);
+
+ // Convert start offset argument to integer, and calculate relative offset.
+ TNode<Object> start = args.GetOptionalArgumentValue(0, SmiConstant(0));
+ TNode<Smi> start_index =
+ SmiTag(ConvertToRelativeIndex(context, start, SmiUntag(source_length)));
+
+ // Convert end offset argument to integer, and calculate relative offset.
+ // If end offset is not given or undefined is given, set source_length to
+ // "end_index".
+ TNode<Object> end = args.GetOptionalArgumentValue(1, UndefinedConstant());
+ TNode<Smi> end_index =
+ Select<Smi>(IsUndefined(end), [=] { return source_length; },
+ [=] {
+ return SmiTag(ConvertToRelativeIndex(
+ context, end, SmiUntag(source_length)));
+ },
+ MachineRepresentation::kTagged);
+
+ // Create a result array by invoking TypedArraySpeciesCreate.
+ TNode<Smi> count = SmiMax(SmiSub(end_index, start_index), SmiConstant(0));
+ TNode<JSTypedArray> result_array =
+ SpeciesCreateByLength(context, source, count, method_name);
+
+ // If count is zero, return early.
+ GotoIf(SmiGreaterThan(count, SmiConstant(0)), &if_count_is_not_zero);
+ args.PopAndReturn(result_array);
+
+ BIND(&if_count_is_not_zero);
+ // Check the source array is neutered or not. We don't need to check if the
+ // result array is neutered or not since TypedArraySpeciesCreate checked it.
+ CSA_ASSERT(this, Word32BinaryNot(IsDetachedBuffer(LoadObjectField(
+ result_array, JSTypedArray::kBufferOffset))));
+ TNode<Object> receiver_buffer =
+ LoadObjectField(CAST(receiver), JSTypedArray::kBufferOffset);
+ GotoIf(IsDetachedBuffer(receiver_buffer), &if_typed_array_is_neutered);
+
+ // result_array could be a different type from source or share the same
+ // buffer with the source because of custom species constructor.
+ // If the types of source and result array are the same and they are not
+ // sharing the same buffer, use memmove.
+ TNode<Word32T> source_el_kind = LoadElementsKind(source);
+ TNode<Word32T> target_el_kind = LoadElementsKind(result_array);
+ GotoIfNot(Word32Equal(source_el_kind, target_el_kind), &call_c);
+
+ TNode<Object> target_buffer =
+ LoadObjectField(result_array, JSTypedArray::kBufferOffset);
+ Branch(WordEqual(receiver_buffer, target_buffer), &call_c, &call_memmove);
+
+ BIND(&call_memmove);
+ {
+ GotoIfForceSlowPath(&call_c);
+
+ TNode<IntPtrT> target_data_ptr =
+ UncheckedCast<IntPtrT>(LoadDataPtr(result_array));
+ TNode<IntPtrT> source_data_ptr =
+ UncheckedCast<IntPtrT>(LoadDataPtr(source));
+
+ TNode<IntPtrT> source_el_size = GetTypedArrayElementSize(source_el_kind);
+ TNode<IntPtrT> source_start_bytes =
+ IntPtrMul(SmiToIntPtr(start_index), source_el_size);
+ TNode<IntPtrT> source_start =
+ IntPtrAdd(source_data_ptr, source_start_bytes);
+
+ TNode<IntPtrT> count_bytes = IntPtrMul(SmiToIntPtr(count), source_el_size);
+
+#ifdef DEBUG
+ TNode<IntPtrT> target_byte_length =
+ LoadAndUntagObjectField(result_array, JSTypedArray::kByteLengthOffset);
+ CSA_ASSERT(this, IntPtrLessThanOrEqual(count_bytes, target_byte_length));
+
+ TNode<IntPtrT> source_byte_length =
+ LoadAndUntagObjectField(source, JSTypedArray::kByteLengthOffset);
+ TNode<IntPtrT> source_size_in_bytes =
+ IntPtrSub(source_byte_length, source_start_bytes);
+ CSA_ASSERT(this, IntPtrLessThanOrEqual(count_bytes, source_size_in_bytes));
+#endif // DEBUG
+
+ CallCMemmove(target_data_ptr, source_start, count_bytes);
+ args.PopAndReturn(result_array);
+ }
+
+ BIND(&call_c);
+ {
+ GotoIf(Word32NotEqual(IsBigInt64ElementsKind(source_el_kind),
+ IsBigInt64ElementsKind(target_el_kind)),
+ &if_bigint_mixed_types);
+
+ CallCCopyTypedArrayElementsSlice(
+ source, result_array, SmiToIntPtr(start_index), SmiToIntPtr(end_index));
+ args.PopAndReturn(result_array);
+ }
+
+ BIND(&if_typed_array_is_neutered);
+ ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);
+
+ BIND(&if_bigint_mixed_types);
+ ThrowTypeError(context, MessageTemplate::kBigIntMixedTypes);
+}
+
+// ES %TypedArray%.prototype.subarray
+TF_BUILTIN(TypedArrayPrototypeSubArray, TypedArrayBuiltinsAssembler) {
+ const char* method_name = "%TypedArray%.prototype.subarray";
+ Label offset_done(this);
+
+ TVARIABLE(Smi, var_begin);
+ TVARIABLE(Smi, var_end);
+
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ CodeStubArguments args(
+ this, ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount)));
+
+ // 1. Let O be the this value.
+ // 3. If O does not have a [[TypedArrayName]] internal slot, throw a TypeError
+ // exception.
+ TNode<Object> receiver = args.GetReceiver();
+ ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, method_name);
+
+ TNode<JSTypedArray> source = CAST(receiver);
+
+ // 5. Let buffer be O.[[ViewedArrayBuffer]].
+ TNode<JSArrayBuffer> buffer = GetBuffer(context, source);
+ // 6. Let srcLength be O.[[ArrayLength]].
+ TNode<Smi> source_length =
+ LoadObjectField<Smi>(source, JSTypedArray::kLengthOffset);
+
+ // 7. Let relativeBegin be ? ToInteger(begin).
+ // 8. If relativeBegin < 0, let beginIndex be max((srcLength + relativeBegin),
+ // 0); else let beginIndex be min(relativeBegin, srcLength).
+ TNode<Object> begin = args.GetOptionalArgumentValue(0, SmiConstant(0));
+ var_begin =
+ SmiTag(ConvertToRelativeIndex(context, begin, SmiUntag(source_length)));
+
+ TNode<Object> end = args.GetOptionalArgumentValue(1, UndefinedConstant());
+ // 9. If end is undefined, let relativeEnd be srcLength;
+ var_end = source_length;
+ GotoIf(IsUndefined(end), &offset_done);
+
+ // else, let relativeEnd be ? ToInteger(end).
+ // 10. If relativeEnd < 0, let endIndex be max((srcLength + relativeEnd), 0);
+ // else let endIndex be min(relativeEnd, srcLength).
+ var_end =
+ SmiTag(ConvertToRelativeIndex(context, end, SmiUntag(source_length)));
+ Goto(&offset_done);
+
+ BIND(&offset_done);
+
+ // 11. Let newLength be max(endIndex - beginIndex, 0).
+ TNode<Smi> new_length =
+ SmiMax(SmiSub(var_end.value(), var_begin.value()), SmiConstant(0));
+
+ // 12. Let constructorName be the String value of O.[[TypedArrayName]].
+ // 13. Let elementSize be the Number value of the Element Size value specified
+ // in Table 52 for constructorName.
+ TNode<Word32T> element_kind = LoadElementsKind(source);
+ TNode<IntPtrT> element_size = GetTypedArrayElementSize(element_kind);
+
+ // 14. Let srcByteOffset be O.[[ByteOffset]].
+ TNode<Number> source_byte_offset =
+ LoadObjectField<Number>(source, JSTypedArray::kByteOffsetOffset);
+
+ // 15. Let beginByteOffset be srcByteOffset + beginIndex × elementSize.
+ TNode<Number> offset = SmiMul(var_begin.value(), SmiFromIntPtr(element_size));
+ TNode<Number> begin_byte_offset = NumberAdd(source_byte_offset, offset);
+
+ // 16. Let argumentsList be « buffer, beginByteOffset, newLength ».
+ // 17. Return ? TypedArraySpeciesCreate(O, argumentsList).
+ args.PopAndReturn(SpeciesCreateByArrayBuffer(
+ context, source, buffer, begin_byte_offset, new_length, method_name));
+}
+
// ES #sec-get-%typedarray%.prototype-@@tostringtag
TF_BUILTIN(TypedArrayPrototypeToStringTag, TypedArrayBuiltinsAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -1045,7 +1462,6 @@ void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeIterationMethod(
Node* context, Node* receiver, const char* method_name,
IterationKind iteration_kind) {
Label throw_bad_receiver(this, Label::kDeferred);
- Label throw_typeerror(this, Label::kDeferred);
GotoIf(TaggedIsSmi(receiver), &throw_bad_receiver);
@@ -1063,22 +1479,11 @@ void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeIterationMethod(
Return(CreateArrayIterator(receiver, map, instance_type, context,
iteration_kind));
- VARIABLE(var_message, MachineRepresentation::kTagged);
BIND(&throw_bad_receiver);
- var_message.Bind(SmiConstant(MessageTemplate::kNotTypedArray));
- Goto(&throw_typeerror);
+ ThrowTypeError(context, MessageTemplate::kNotTypedArray, method_name);
BIND(&if_receiverisneutered);
- var_message.Bind(SmiConstant(MessageTemplate::kDetachedOperation));
- Goto(&throw_typeerror);
-
- BIND(&throw_typeerror);
- {
- Node* method_arg = StringConstant(method_name);
- Node* result = CallRuntime(Runtime::kThrowTypeError, context,
- var_message.value(), method_arg);
- Return(result);
- }
+ ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);
}
// ES6 #sec-%typedarray%.prototype.values
@@ -1107,6 +1512,427 @@ TF_BUILTIN(TypedArrayPrototypeKeys, TypedArrayBuiltinsAssembler) {
context, receiver, "%TypedArray%.prototype.keys()", IterationKind::kKeys);
}
+// ES6 #sec-%typedarray%.of
+TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+
+ // 1. Let len be the actual number of arguments passed to this function.
+ TNode<IntPtrT> length = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount)));
+ // 2. Let items be the List of arguments passed to this function.
+ CodeStubArguments args(this, length, nullptr, INTPTR_PARAMETERS,
+ CodeStubArguments::ReceiverMode::kHasReceiver);
+
+ Label if_not_constructor(this, Label::kDeferred),
+ if_neutered(this, Label::kDeferred);
+
+ // 3. Let C be the this value.
+ // 4. If IsConstructor(C) is false, throw a TypeError exception.
+ TNode<Object> receiver = args.GetReceiver();
+ GotoIf(TaggedIsSmi(receiver), &if_not_constructor);
+ GotoIfNot(IsConstructor(receiver), &if_not_constructor);
+
+ // 5. Let newObj be ? TypedArrayCreate(C, len).
+ TNode<JSTypedArray> new_typed_array =
+ CreateByLength(context, receiver, SmiTag(length), "%TypedArray%.of");
+
+ TNode<Word32T> elements_kind = LoadElementsKind(new_typed_array);
+
+ // 6. Let k be 0.
+ // 7. Repeat, while k < len
+ // a. Let kValue be items[k].
+ // b. Let Pk be ! ToString(k).
+ // c. Perform ? Set(newObj, Pk, kValue, true).
+ // d. Increase k by 1.
+ DispatchTypedArrayByElementsKind(
+ elements_kind,
+ [&](ElementsKind kind, int size, int typed_array_fun_index) {
+ TNode<FixedTypedArrayBase> elements =
+ CAST(LoadElements(new_typed_array));
+ BuildFastLoop(
+ IntPtrConstant(0), length,
+ [&](Node* index) {
+ TNode<Object> item = args.AtIndex(index, INTPTR_PARAMETERS);
+ TNode<IntPtrT> intptr_index = UncheckedCast<IntPtrT>(index);
+ if (kind == BIGINT64_ELEMENTS || kind == BIGUINT64_ELEMENTS) {
+ EmitBigTypedArrayElementStore(new_typed_array, elements,
+ intptr_index, item, context,
+ &if_neutered);
+ } else {
+ Node* value =
+ PrepareValueForWriteToTypedArray(item, kind, context);
+
+ // ToNumber may execute JavaScript code, which could neuter
+ // the array's buffer.
+ Node* buffer = LoadObjectField(new_typed_array,
+ JSTypedArray::kBufferOffset);
+ GotoIf(IsDetachedBuffer(buffer), &if_neutered);
+
+ // GC may move backing store in ToNumber, thus load backing
+ // store everytime in this loop.
+ TNode<RawPtrT> backing_store =
+ LoadFixedTypedArrayBackingStore(elements);
+ StoreElement(backing_store, kind, index, value,
+ INTPTR_PARAMETERS);
+ }
+ },
+ 1, ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ });
+
+ // 8. Return newObj.
+ args.PopAndReturn(new_typed_array);
+
+ BIND(&if_not_constructor);
+ ThrowTypeError(context, MessageTemplate::kNotConstructor, receiver);
+
+ BIND(&if_neutered);
+ ThrowTypeError(context, MessageTemplate::kDetachedOperation,
+ "%TypedArray%.of");
+}
+
+TF_BUILTIN(IterableToList, TypedArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> iterable = CAST(Parameter(Descriptor::kIterable));
+ TNode<Object> iterator_fn = CAST(Parameter(Descriptor::kIteratorFn));
+
+ Label fast_path(this), slow_path(this), done(this);
+
+ TVARIABLE(JSArray, created_list);
+
+ // This is a fast-path for ignoring the iterator.
+ // TODO(petermarshall): Port to CSA.
+ Node* elided =
+ CallRuntime(Runtime::kIterableToListCanBeElided, context, iterable);
+ CSA_ASSERT(this, IsBoolean(elided));
+ Branch(IsTrue(elided), &fast_path, &slow_path);
+
+ BIND(&fast_path);
+ {
+ created_list = CAST(iterable);
+ Goto(&done);
+ }
+
+ BIND(&slow_path);
+ {
+ IteratorBuiltinsAssembler iterator_assembler(state());
+
+ // 1. Let iteratorRecord be ? GetIterator(items, method).
+ IteratorRecord iterator_record =
+ iterator_assembler.GetIterator(context, iterable, iterator_fn);
+
+ // 2. Let values be a new empty List.
+ GrowableFixedArray values(state());
+
+ Variable* vars[] = {values.var_array(), values.var_length(),
+ values.var_capacity()};
+ Label loop_start(this, 3, vars), loop_end(this);
+ Goto(&loop_start);
+ // 3. Let next be true.
+ // 4. Repeat, while next is not false
+ BIND(&loop_start);
+ {
+ // a. Set next to ? IteratorStep(iteratorRecord).
+ TNode<Object> next = CAST(
+ iterator_assembler.IteratorStep(context, iterator_record, &loop_end));
+ // b. If next is not false, then
+ // i. Let nextValue be ? IteratorValue(next).
+ TNode<Object> next_value =
+ CAST(iterator_assembler.IteratorValue(context, next));
+ // ii. Append nextValue to the end of the List values.
+ values.Push(next_value);
+ Goto(&loop_start);
+ }
+ BIND(&loop_end);
+
+ // 5. Return values.
+ TNode<JSArray> js_array_values = values.ToJSArray(context);
+ created_list = js_array_values;
+ Goto(&done);
+ }
+
+ BIND(&done);
+ Return(created_list.value());
+}
+
+// ES6 #sec-%typedarray%.from
+TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+
+ Label check_iterator(this), from_array_like(this), fast_path(this),
+ slow_path(this), create_typed_array(this),
+ if_not_constructor(this, Label::kDeferred),
+ if_map_fn_not_callable(this, Label::kDeferred),
+ if_iterator_fn_not_callable(this, Label::kDeferred),
+ if_neutered(this, Label::kDeferred);
+
+ CodeStubArguments args(
+ this, ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount)));
+ TNode<Object> source = args.GetOptionalArgumentValue(0);
+
+ // 5. If thisArg is present, let T be thisArg; else let T be undefined.
+ TNode<Object> this_arg = args.GetOptionalArgumentValue(2);
+
+ // 1. Let C be the this value.
+ // 2. If IsConstructor(C) is false, throw a TypeError exception.
+ TNode<Object> receiver = args.GetReceiver();
+ GotoIf(TaggedIsSmi(receiver), &if_not_constructor);
+ GotoIfNot(IsConstructor(receiver), &if_not_constructor);
+
+ // 3. If mapfn is present and mapfn is not undefined, then
+ TNode<Object> map_fn = args.GetOptionalArgumentValue(1);
+ TVARIABLE(BoolT, mapping, Int32FalseConstant());
+ GotoIf(IsUndefined(map_fn), &check_iterator);
+
+ // a. If IsCallable(mapfn) is false, throw a TypeError exception.
+ // b. Let mapping be true.
+ // 4. Else, let mapping be false.
+ GotoIf(TaggedIsSmi(map_fn), &if_map_fn_not_callable);
+ GotoIfNot(IsCallable(map_fn), &if_map_fn_not_callable);
+ mapping = Int32TrueConstant();
+ Goto(&check_iterator);
+
+ TVARIABLE(Object, final_source);
+ TVARIABLE(Smi, final_length);
+
+ // We split up this builtin differently to the way it is written in the spec.
+ // We already have great code in the elements accessor for copying from a
+ // JSArray into a TypedArray, so we use that when possible. We only avoid
+ // calling into the elements accessor when we have a mapping function, because
+ // we can't handle that. Here, presence of a mapping function is the slow
+ // path. We also combine the two different loops in the specification
+ // (starting at 7.e and 13) because they are essentially identical. We also
+ // save on code-size this way.
+
+ BIND(&check_iterator);
+ {
+ // 6. Let usingIterator be ? GetMethod(source, @@iterator).
+ TNode<Object> iterator_fn =
+ CAST(GetMethod(context, source, isolate()->factory()->iterator_symbol(),
+ &from_array_like));
+ GotoIf(TaggedIsSmi(iterator_fn), &if_iterator_fn_not_callable);
+ GotoIfNot(IsCallable(iterator_fn), &if_iterator_fn_not_callable);
+
+ // We are using the iterator.
+ Label if_length_not_smi(this, Label::kDeferred);
+ // 7. If usingIterator is not undefined, then
+ // a. Let values be ? IterableToList(source, usingIterator).
+ // b. Let len be the number of elements in values.
+ TNode<JSArray> values = CAST(
+ CallBuiltin(Builtins::kIterableToList, context, source, iterator_fn));
+
+ // This is not a spec'd limit, so it doesn't particularly matter when we
+ // throw the range error for typed array length > MaxSmi.
+ TNode<Object> raw_length = LoadJSArrayLength(values);
+ GotoIfNot(TaggedIsSmi(raw_length), &if_length_not_smi);
+
+ final_length = CAST(raw_length);
+ final_source = values;
+ Goto(&create_typed_array);
+
+ BIND(&if_length_not_smi);
+ ThrowRangeError(context, MessageTemplate::kInvalidTypedArrayLength,
+ raw_length);
+ }
+
+ BIND(&from_array_like);
+ {
+ Label if_length_not_smi(this, Label::kDeferred);
+ final_source = source;
+
+ // 10. Let len be ? ToLength(? Get(arrayLike, "length")).
+ TNode<Object> raw_length =
+ GetProperty(context, final_source.value(), LengthStringConstant());
+ final_length = ToSmiLength(raw_length, context, &if_length_not_smi);
+ Goto(&create_typed_array);
+
+ BIND(&if_length_not_smi);
+ ThrowRangeError(context, MessageTemplate::kInvalidTypedArrayLength,
+ raw_length);
+ }
+
+ TVARIABLE(JSTypedArray, target_obj);
+
+ BIND(&create_typed_array);
+ {
+ // 7c/11. Let targetObj be ? TypedArrayCreate(C, «len»).
+ target_obj = CreateByLength(context, receiver, final_length.value(),
+ "%TypedArray%.from");
+
+ Branch(mapping.value(), &slow_path, &fast_path);
+ }
+
+ BIND(&fast_path);
+ {
+ Label done(this);
+ GotoIf(SmiEqual(final_length.value(), SmiConstant(0)), &done);
+
+ CallRuntime(Runtime::kTypedArrayCopyElements, context, target_obj.value(),
+ final_source.value(), final_length.value());
+ Goto(&done);
+
+ BIND(&done);
+ args.PopAndReturn(target_obj.value());
+ }
+
+ BIND(&slow_path);
+ TNode<Word32T> elements_kind = LoadElementsKind(target_obj.value());
+
+ // 7e/13 : Copy the elements
+ TNode<FixedTypedArrayBase> elements = CAST(LoadElements(target_obj.value()));
+ BuildFastLoop(
+ SmiConstant(0), final_length.value(),
+ [&](Node* index) {
+ TNode<Object> const k_value =
+ GetProperty(context, final_source.value(), index);
+
+ TNode<Object> const mapped_value =
+ CAST(CallJS(CodeFactory::Call(isolate()), context, map_fn, this_arg,
+ k_value, index));
+
+ TNode<IntPtrT> intptr_index = SmiUntag(index);
+ DispatchTypedArrayByElementsKind(
+ elements_kind,
+ [&](ElementsKind kind, int size, int typed_array_fun_index) {
+ if (kind == BIGINT64_ELEMENTS || kind == BIGUINT64_ELEMENTS) {
+ EmitBigTypedArrayElementStore(target_obj.value(), elements,
+ intptr_index, mapped_value,
+ context, &if_neutered);
+ } else {
+ Node* const final_value = PrepareValueForWriteToTypedArray(
+ mapped_value, kind, context);
+
+ // ToNumber may execute JavaScript code, which could neuter
+ // the array's buffer.
+ Node* buffer = LoadObjectField(target_obj.value(),
+ JSTypedArray::kBufferOffset);
+ GotoIf(IsDetachedBuffer(buffer), &if_neutered);
+
+ // GC may move backing store in map_fn, thus load backing
+ // store in each iteration of this loop.
+ TNode<RawPtrT> backing_store =
+ LoadFixedTypedArrayBackingStore(elements);
+ StoreElement(backing_store, kind, index, final_value,
+ SMI_PARAMETERS);
+ }
+ });
+ },
+ 1, ParameterMode::SMI_PARAMETERS, IndexAdvanceMode::kPost);
+
+ args.PopAndReturn(target_obj.value());
+
+ BIND(&if_not_constructor);
+ ThrowTypeError(context, MessageTemplate::kNotConstructor, receiver);
+
+ BIND(&if_map_fn_not_callable);
+ ThrowTypeError(context, MessageTemplate::kCalledNonCallable, map_fn);
+
+ BIND(&if_iterator_fn_not_callable);
+ ThrowTypeError(context, MessageTemplate::kIteratorSymbolNonCallable);
+
+ BIND(&if_neutered);
+ ThrowTypeError(context, MessageTemplate::kDetachedOperation,
+ "%TypedArray%.from");
+}
+
+// ES %TypedArray%.prototype.filter
+TF_BUILTIN(TypedArrayPrototypeFilter, TypedArrayBuiltinsAssembler) {
+ const char* method_name = "%TypedArray%.prototype.filter";
+
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ CodeStubArguments args(
+ this, ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount)));
+
+ Label if_callback_not_callable(this, Label::kDeferred),
+ detached(this, Label::kDeferred);
+
+ // 1. Let O be the this value.
+ // 2. Perform ? ValidateTypedArray(O).
+ TNode<Object> receiver = args.GetReceiver();
+ TNode<JSTypedArray> source =
+ ValidateTypedArray(context, receiver, method_name);
+
+ // 3. Let len be O.[[ArrayLength]].
+ TNode<Smi> length = LoadObjectField<Smi>(source, JSTypedArray::kLengthOffset);
+
+ // 4. If IsCallable(callbackfn) is false, throw a TypeError exception.
+ TNode<Object> callbackfn = args.GetOptionalArgumentValue(0);
+ GotoIf(TaggedIsSmi(callbackfn), &if_callback_not_callable);
+ GotoIfNot(IsCallable(callbackfn), &if_callback_not_callable);
+
+ // 5. If thisArg is present, let T be thisArg; else let T be undefined.
+ TNode<Object> this_arg = args.GetOptionalArgumentValue(1);
+
+ TNode<JSArrayBuffer> source_buffer =
+ LoadObjectField<JSArrayBuffer>(source, JSArrayBufferView::kBufferOffset);
+ TNode<Word32T> elements_kind = LoadElementsKind(source);
+ GrowableFixedArray values(state());
+ VariableList vars(
+ {values.var_array(), values.var_length(), values.var_capacity()}, zone());
+
+ // 6. Let kept be a new empty List.
+ // 7. Let k be 0.
+ // 8. Let captured be 0.
+ // 9. Repeat, while k < len
+ BuildFastLoop(
+ vars, SmiConstant(0), length,
+ [&](Node* index) {
+ GotoIf(IsDetachedBuffer(source_buffer), &detached);
+
+ TVARIABLE(Numeric, value);
+ // a. Let Pk be ! ToString(k).
+ // b. Let kValue be ? Get(O, Pk).
+ DispatchTypedArrayByElementsKind(
+ elements_kind,
+ [&](ElementsKind kind, int size, int typed_array_fun_index) {
+ TNode<IntPtrT> backing_store =
+ UncheckedCast<IntPtrT>(LoadDataPtr(source));
+ value = CAST(LoadFixedTypedArrayElementAsTagged(
+ backing_store, index, kind, ParameterMode::SMI_PARAMETERS));
+ });
+
+ // c. Let selected be ToBoolean(Call(callbackfn, T, kValue, k, O))
+ Node* selected =
+ CallJS(CodeFactory::Call(isolate()), context, callbackfn, this_arg,
+ value.value(), index, source);
+
+ Label true_continue(this), false_continue(this);
+ BranchIfToBooleanIsTrue(selected, &true_continue, &false_continue);
+
+ BIND(&true_continue);
+ // d. If selected is true, then
+ // i. Append kValue to the end of kept.
+ // ii. Increase captured by 1.
+ values.Push(value.value());
+ Goto(&false_continue);
+
+ BIND(&false_continue);
+ },
+ 1, ParameterMode::SMI_PARAMETERS, IndexAdvanceMode::kPost);
+
+ TNode<JSArray> values_array = values.ToJSArray(context);
+ TNode<Smi> captured = LoadFastJSArrayLength(values_array);
+
+ // 10. Let A be ? TypedArraySpeciesCreate(O, captured).
+ TNode<JSTypedArray> result_array =
+ SpeciesCreateByLength(context, source, captured, method_name);
+
+ // 11. Let n be 0.
+ // 12. For each element e of kept, do
+ // a. Perform ! Set(A, ! ToString(n), e, true).
+ // b. Increment n by 1.
+ CallRuntime(Runtime::kTypedArrayCopyElements, context, result_array,
+ values_array, captured);
+
+ // 13. Return A.
+ args.PopAndReturn(result_array);
+
+ BIND(&if_callback_not_callable);
+ ThrowTypeError(context, MessageTemplate::kCalledNonCallable, callbackfn);
+
+ BIND(&detached);
+ ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);
+}
+
#undef V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-typedarray-gen.h b/deps/v8/src/builtins/builtins-typedarray-gen.h
new file mode 100644
index 0000000000..37f923dea6
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-typedarray-gen.h
@@ -0,0 +1,133 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_BUILTINS_TYPEDARRAY_GEN_H_
+#define V8_BUILTINS_BUILTINS_TYPEDARRAY_GEN_H_
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit TypedArrayBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ TNode<JSTypedArray> SpeciesCreateByLength(TNode<Context> context,
+ TNode<JSTypedArray> exemplar,
+ TNode<Smi> len,
+ const char* method_name);
+
+ protected:
+ void GenerateTypedArrayPrototypeGetter(Node* context, Node* receiver,
+ const char* method_name,
+ int object_offset);
+ void GenerateTypedArrayPrototypeIterationMethod(Node* context, Node* receiver,
+ const char* method_name,
+ IterationKind iteration_kind);
+
+ void ConstructByLength(TNode<Context> context, TNode<JSTypedArray> holder,
+ TNode<Object> length, TNode<Smi> element_size);
+ void ConstructByArrayBuffer(TNode<Context> context,
+ TNode<JSTypedArray> holder,
+ TNode<JSArrayBuffer> buffer,
+ TNode<Object> byte_offset, TNode<Object> length,
+ TNode<Smi> element_size);
+ void ConstructByTypedArray(TNode<Context> context, TNode<JSTypedArray> holder,
+ TNode<JSTypedArray> typed_array,
+ TNode<Smi> element_size);
+ void ConstructByArrayLike(TNode<Context> context, TNode<JSTypedArray> holder,
+ TNode<HeapObject> array_like,
+ TNode<Object> initial_length,
+ TNode<Smi> element_size);
+ void ConstructByIterable(TNode<Context> context, TNode<JSTypedArray> holder,
+ TNode<JSReceiver> iterable,
+ TNode<Object> iterator_fn, TNode<Smi> element_size);
+
+ void SetupTypedArray(TNode<JSTypedArray> holder, TNode<Smi> length,
+ TNode<Number> byte_offset, TNode<Number> byte_length);
+ void AttachBuffer(TNode<JSTypedArray> holder, TNode<JSArrayBuffer> buffer,
+ TNode<Map> map, TNode<Smi> length,
+ TNode<Number> byte_offset);
+
+ TNode<Map> LoadMapForType(TNode<JSTypedArray> array);
+ TNode<UintPtrT> CalculateExternalPointer(TNode<UintPtrT> backing_store,
+ TNode<Number> byte_offset);
+ Node* LoadDataPtr(Node* typed_array);
+ TNode<BoolT> ByteLengthIsValid(TNode<Number> byte_length);
+
+ // Returns true if kind is either UINT8_ELEMENTS or UINT8_CLAMPED_ELEMENTS.
+ TNode<Word32T> IsUint8ElementsKind(TNode<Word32T> kind);
+
+ // Returns true if kind is either BIGINT64_ELEMENTS or BIGUINT64_ELEMENTS.
+ TNode<Word32T> IsBigInt64ElementsKind(TNode<Word32T> kind);
+
+ // Loads the element kind of TypedArray instance.
+ TNode<Word32T> LoadElementsKind(TNode<JSTypedArray> typed_array);
+
+ // Returns the byte size of an element for a TypedArray elements kind.
+ TNode<IntPtrT> GetTypedArrayElementSize(TNode<Word32T> elements_kind);
+
+ TNode<Object> GetDefaultConstructor(TNode<Context> context,
+ TNode<JSTypedArray> exemplar);
+
+ TNode<Object> TypedArraySpeciesConstructor(TNode<Context> context,
+ TNode<JSTypedArray> exemplar);
+
+ TNode<JSTypedArray> SpeciesCreateByArrayBuffer(TNode<Context> context,
+ TNode<JSTypedArray> exemplar,
+ TNode<JSArrayBuffer> buffer,
+ TNode<Number> byte_offset,
+ TNode<Smi> len,
+ const char* method_name);
+
+ TNode<JSTypedArray> CreateByLength(TNode<Context> context,
+ TNode<Object> constructor, TNode<Smi> len,
+ const char* method_name);
+
+ TNode<JSArrayBuffer> GetBuffer(TNode<Context> context,
+ TNode<JSTypedArray> array);
+
+ TNode<JSTypedArray> ValidateTypedArray(TNode<Context> context,
+ TNode<Object> obj,
+ const char* method_name);
+
+ // Fast path for setting a TypedArray (source) onto another TypedArray
+ // (target) at an element offset.
+ void SetTypedArraySource(TNode<Context> context, TNode<JSTypedArray> source,
+ TNode<JSTypedArray> target, TNode<IntPtrT> offset,
+ Label* call_runtime, Label* if_source_too_large);
+
+ void SetJSArraySource(TNode<Context> context, TNode<JSArray> source,
+ TNode<JSTypedArray> target, TNode<IntPtrT> offset,
+ Label* call_runtime, Label* if_source_too_large);
+
+ void CallCMemmove(TNode<IntPtrT> dest_ptr, TNode<IntPtrT> src_ptr,
+ TNode<IntPtrT> byte_length);
+
+ void CallCCopyFastNumberJSArrayElementsToTypedArray(
+ TNode<Context> context, TNode<JSArray> source, TNode<JSTypedArray> dest,
+ TNode<IntPtrT> source_length, TNode<IntPtrT> offset);
+
+ void CallCCopyTypedArrayElementsToTypedArray(TNode<JSTypedArray> source,
+ TNode<JSTypedArray> dest,
+ TNode<IntPtrT> source_length,
+ TNode<IntPtrT> offset);
+
+ void CallCCopyTypedArrayElementsSlice(TNode<JSTypedArray> source,
+ TNode<JSTypedArray> dest,
+ TNode<IntPtrT> start,
+ TNode<IntPtrT> end);
+
+ typedef std::function<void(ElementsKind, int, int)> TypedArraySwitchCase;
+
+ void DispatchTypedArrayByElementsKind(
+ TNode<Word32T> elements_kind, const TypedArraySwitchCase& case_function);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BUILTINS_BUILTINS_TYPEDARRAY_GEN_H_
diff --git a/deps/v8/src/builtins/builtins-typedarray.cc b/deps/v8/src/builtins/builtins-typedarray.cc
index 18625c8d90..6fcc279c66 100644
--- a/deps/v8/src/builtins/builtins-typedarray.cc
+++ b/deps/v8/src/builtins/builtins-typedarray.cc
@@ -42,16 +42,6 @@ int64_t CapRelativeIndex(Handle<Object> num, int64_t minimum, int64_t maximum) {
: std::min<int64_t>(relative, maximum);
}
-MaybeHandle<JSTypedArray> TypedArraySpeciesCreateByLength(
- Isolate* isolate, Handle<JSTypedArray> exemplar, const char* method_name,
- int64_t length) {
- const int argc = 1;
- ScopedVector<Handle<Object>> argv(argc);
- argv[0] = isolate->factory()->NewNumberFromInt64(length);
- return JSTypedArray::SpeciesCreate(isolate, exemplar, argc, argv.start(),
- method_name);
-}
-
} // namespace
BUILTIN(TypedArrayPrototypeCopyWithin) {
@@ -124,10 +114,16 @@ BUILTIN(TypedArrayPrototypeFill) {
const char* method = "%TypedArray%.prototype.fill";
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, array, JSTypedArray::Validate(isolate, args.receiver(), method));
+ ElementsKind kind = array->GetElementsKind();
Handle<Object> obj_value = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, obj_value, Object::ToNumber(obj_value));
+ if (kind == BIGINT64_ELEMENTS || kind == BIGUINT64_ELEMENTS) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, obj_value,
+ BigInt::FromObject(isolate, obj_value));
+ } else {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, obj_value,
+ Object::ToNumber(obj_value));
+ }
int64_t len = array->length_value();
int64_t start = 0;
@@ -161,9 +157,9 @@ BUILTIN(TypedArrayPrototypeFill) {
DCHECK_LE(end, len);
DCHECK_LE(count, len);
- return array->GetElementsAccessor()->Fill(isolate, array, obj_value,
- static_cast<uint32_t>(start),
- static_cast<uint32_t>(end));
+ return ElementsAccessor::ForKind(kind)->Fill(isolate, array, obj_value,
+ static_cast<uint32_t>(start),
+ static_cast<uint32_t>(end));
}
BUILTIN(TypedArrayPrototypeIncludes) {
@@ -277,49 +273,5 @@ BUILTIN(TypedArrayPrototypeReverse) {
return *array;
}
-BUILTIN(TypedArrayPrototypeSlice) {
- HandleScope scope(isolate);
-
- Handle<JSTypedArray> array;
- const char* method = "%TypedArray%.prototype.slice";
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, array, JSTypedArray::Validate(isolate, args.receiver(), method));
-
- int64_t len = array->length_value();
- int64_t start = 0;
- int64_t end = len;
- {
- Handle<Object> num = args.atOrUndefined(isolate, 1);
- if (!num->IsUndefined(isolate)) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, num,
- Object::ToInteger(isolate, num));
- start = CapRelativeIndex(num, 0, len);
-
- num = args.atOrUndefined(isolate, 2);
- if (!num->IsUndefined(isolate)) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, num,
- Object::ToInteger(isolate, num));
- end = CapRelativeIndex(num, 0, len);
- }
- }
- }
-
- int64_t count = std::max<int64_t>(end - start, 0);
-
- Handle<JSTypedArray> result_array;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result_array,
- TypedArraySpeciesCreateByLength(isolate, array, method, count));
-
- // TODO(cwhan.tunz): should throw.
- if (V8_UNLIKELY(array->WasNeutered())) return *result_array;
-
- if (count == 0) return *result_array;
-
- ElementsAccessor* accessor = array->GetElementsAccessor();
- return *accessor->Slice(array, static_cast<uint32_t>(start),
- static_cast<uint32_t>(end), result_array);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
index dc175e50b7..ad1763a292 100644
--- a/deps/v8/src/builtins/builtins.cc
+++ b/deps/v8/src/builtins/builtins.cc
@@ -86,8 +86,9 @@ Builtins::Name Builtins::GetBuiltinFromBailoutId(BailoutId id) {
void Builtins::TearDown() { initialized_ = false; }
void Builtins::IterateBuiltins(RootVisitor* v) {
- v->VisitRootPointers(Root::kBuiltins, &builtins_[0],
- &builtins_[0] + builtin_count);
+ for (int i = 0; i < builtin_count; i++) {
+ v->VisitRootPointer(Root::kBuiltins, name(i), &builtins_[i]);
+ }
}
const char* Builtins::Lookup(byte* pc) {
@@ -170,30 +171,11 @@ Callable Builtins::CallableFor(Isolate* isolate, Name name) {
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, CASE_OTHER,
CASE_OTHER, CASE_OTHER, IGNORE_BUILTIN)
#undef CASE_OTHER
- case kArrayFilterLoopEagerDeoptContinuation:
- case kArrayFilterLoopLazyDeoptContinuation:
- case kArrayEveryLoopEagerDeoptContinuation:
- case kArrayEveryLoopLazyDeoptContinuation:
- case kArrayFindIndexLoopAfterCallbackLazyDeoptContinuation:
- case kArrayFindIndexLoopEagerDeoptContinuation:
- case kArrayFindIndexLoopLazyDeoptContinuation:
- case kArrayFindLoopAfterCallbackLazyDeoptContinuation:
- case kArrayFindLoopEagerDeoptContinuation:
- case kArrayFindLoopLazyDeoptContinuation:
- case kArrayForEach:
- case kArrayForEachLoopEagerDeoptContinuation:
- case kArrayForEachLoopLazyDeoptContinuation:
- case kArrayMapLoopEagerDeoptContinuation:
- case kArrayMapLoopLazyDeoptContinuation:
- case kArrayReduceLoopEagerDeoptContinuation:
- case kArrayReduceLoopLazyDeoptContinuation:
- case kArrayReduceRightLoopEagerDeoptContinuation:
- case kArrayReduceRightLoopLazyDeoptContinuation:
- case kArraySomeLoopEagerDeoptContinuation:
- case kArraySomeLoopLazyDeoptContinuation:
- case kConsoleAssert:
- return Callable(code, BuiltinDescriptor(isolate));
default:
+ Builtins::Kind kind = Builtins::KindOf(name);
+ if (kind == TFJ || kind == CPP) {
+ return Callable(code, BuiltinDescriptor(isolate));
+ }
UNREACHABLE();
}
CallInterfaceDescriptor descriptor(isolate, key);
@@ -213,6 +195,22 @@ Address Builtins::CppEntryOf(int index) {
}
// static
+bool Builtins::IsBuiltin(Code* code) {
+ return Builtins::IsBuiltinId(code->builtin_index());
+}
+
+// static
+bool Builtins::IsOffHeapBuiltin(Code* code) {
+#ifdef V8_EMBEDDED_BUILTINS
+ return FLAG_stress_off_heap_code &&
+ Builtins::IsBuiltinId(code->builtin_index()) &&
+ Builtins::IsOffHeapSafe(code->builtin_index());
+#else
+ return false;
+#endif
+}
+
+// static
bool Builtins::IsLazy(int index) {
DCHECK(IsBuiltinId(index));
// There are a couple of reasons that builtins can require eager-loading,
@@ -245,12 +243,16 @@ bool Builtins::IsLazy(int index) {
case kArrayEveryLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
case kArrayFilterLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
case kArrayFilterLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
+ case kArrayReducePreLoopEagerDeoptContinuation:
case kArrayReduceLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
case kArrayReduceLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
+ case kArrayReduceRightPreLoopEagerDeoptContinuation:
case kArrayReduceRightLoopEagerDeoptContinuation:
case kArrayReduceRightLoopLazyDeoptContinuation:
case kArraySomeLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
case kArraySomeLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
+ case kAsyncGeneratorAwaitCaught: // https://crbug.com/v8/6786.
+ case kAsyncGeneratorAwaitUncaught: // https://crbug.com/v8/6786.
case kCheckOptimizationMarker:
case kCompileLazy:
case kDeserializeLazy:
@@ -261,9 +263,11 @@ bool Builtins::IsLazy(int index) {
case kInterpreterEnterBytecodeDispatch:
case kInterpreterEntryTrampoline:
case kObjectConstructor_ConstructStub: // https://crbug.com/v8/6787.
+ case kPromiseConstructorLazyDeoptContinuation: // crbug/v8/6786.
case kProxyConstructor_ConstructStub: // https://crbug.com/v8/6787.
case kNumberConstructor_ConstructStub: // https://crbug.com/v8/6787.
case kStringConstructor_ConstructStub: // https://crbug.com/v8/6787.
+ case kTypedArrayConstructor_ConstructStub: // https://crbug.com/v8/6787.
case kProxyConstructor: // https://crbug.com/v8/6787.
case kRecordWrite: // https://crbug.com/chromium/765301.
case kThrowWasmTrapDivByZero: // Required by wasm.
@@ -286,6 +290,1090 @@ bool Builtins::IsLazy(int index) {
}
// static
+bool Builtins::IsIsolateIndependent(int index) {
+ DCHECK(IsBuiltinId(index));
+ switch (index) {
+#ifdef DEBUG
+ case kAbortJS:
+ case kAllocateHeapNumber:
+ case kArrayEveryLoopEagerDeoptContinuation:
+ case kArrayFilterLoopEagerDeoptContinuation:
+ case kArrayFindIndexLoopEagerDeoptContinuation:
+ case kArrayFindIndexLoopLazyDeoptContinuation:
+ case kArrayFindLoopEagerDeoptContinuation:
+ case kArrayFindLoopLazyDeoptContinuation:
+ case kArrayForEachLoopEagerDeoptContinuation:
+ case kArrayForEachLoopLazyDeoptContinuation:
+ case kArrayMapLoopEagerDeoptContinuation:
+ case kArrayReduceLoopEagerDeoptContinuation:
+ case kArrayReduceLoopLazyDeoptContinuation:
+ case kArrayReducePreLoopEagerDeoptContinuation:
+ case kArrayReduceRightLoopEagerDeoptContinuation:
+ case kArrayReduceRightLoopLazyDeoptContinuation:
+ case kArrayReduceRightPreLoopEagerDeoptContinuation:
+ case kArraySomeLoopEagerDeoptContinuation:
+ case kBitwiseNot:
+ case kBooleanPrototypeToString:
+ case kBooleanPrototypeValueOf:
+ case kContinueToCodeStubBuiltin:
+ case kContinueToCodeStubBuiltinWithResult:
+ case kContinueToJavaScriptBuiltin:
+ case kContinueToJavaScriptBuiltinWithResult:
+ case kDatePrototypeGetDate:
+ case kDatePrototypeGetDay:
+ case kDatePrototypeGetFullYear:
+ case kDatePrototypeGetHours:
+ case kDatePrototypeGetMilliseconds:
+ case kDatePrototypeGetMinutes:
+ case kDatePrototypeGetMonth:
+ case kDatePrototypeGetSeconds:
+ case kDatePrototypeGetTime:
+ case kDatePrototypeGetTimezoneOffset:
+ case kDatePrototypeGetUTCDate:
+ case kDatePrototypeGetUTCDay:
+ case kDatePrototypeGetUTCFullYear:
+ case kDatePrototypeGetUTCHours:
+ case kDatePrototypeGetUTCMilliseconds:
+ case kDatePrototypeGetUTCMinutes:
+ case kDatePrototypeGetUTCMonth:
+ case kDatePrototypeGetUTCSeconds:
+ case kDatePrototypeToPrimitive:
+ case kDatePrototypeValueOf:
+ case kDecrement:
+ case kDivide:
+ case kGlobalIsFinite:
+ case kGlobalIsNaN:
+ case kIncrement:
+ case kKeyedLoadIC_Slow:
+ case kKeyedLoadICTrampoline:
+ case kKeyedStoreIC_Slow:
+ case kKeyedStoreICTrampoline:
+ case kLoadField:
+ case kLoadGlobalICInsideTypeofTrampoline:
+ case kLoadGlobalIC_Slow:
+ case kLoadGlobalICTrampoline:
+ case kLoadIC_Slow:
+ case kLoadICTrampoline:
+ case kMapPrototypeEntries:
+ case kMapPrototypeGet:
+ case kMapPrototypeGetSize:
+ case kMapPrototypeHas:
+ case kMapPrototypeKeys:
+ case kMapPrototypeValues:
+ case kMathAcos:
+ case kMathAcosh:
+ case kMathAsin:
+ case kMathAsinh:
+ case kMathAtan:
+ case kMathAtan2:
+ case kMathAtanh:
+ case kMathCbrt:
+ case kMathCeil:
+ case kMathCos:
+ case kMathCosh:
+ case kMathExp:
+ case kMathExpm1:
+ case kMathFloor:
+ case kMathFround:
+ case kMathLog:
+ case kMathLog10:
+ case kMathLog1p:
+ case kMathLog2:
+ case kMathMax:
+ case kMathMin:
+ case kMathRound:
+ case kMathSign:
+ case kMathSin:
+ case kMathSinh:
+ case kMathSqrt:
+ case kMathTan:
+ case kMathTanh:
+ case kMathTrunc:
+ case kModulus:
+ case kMultiply:
+ case kNonPrimitiveToPrimitive_Default:
+ case kNonPrimitiveToPrimitive_Number:
+ case kNonPrimitiveToPrimitive_String:
+ case kNumberIsFinite:
+ case kNumberIsInteger:
+ case kNumberIsNaN:
+ case kNumberIsSafeInteger:
+ case kNumberPrototypeValueOf:
+ case kObjectPrototypeToLocaleString:
+ case kObjectPrototypeValueOf:
+ case kPromiseCapabilityDefaultReject:
+ case kPromiseCapabilityDefaultResolve:
+ case kPromiseConstructorLazyDeoptContinuation:
+ case kPromiseInternalReject:
+ case kPromiseInternalResolve:
+ case kPromiseResolveTrampoline:
+ case kPromiseThrowerFinally:
+ case kPromiseValueThunkFinally:
+ case kProxyConstructor:
+ case kReflectHas:
+ case kRegExpPrototypeDotAllGetter:
+ case kRegExpPrototypeGlobalGetter:
+ case kRegExpPrototypeIgnoreCaseGetter:
+ case kRegExpPrototypeMultilineGetter:
+ case kRegExpPrototypeSourceGetter:
+ case kRegExpPrototypeStickyGetter:
+ case kRegExpPrototypeUnicodeGetter:
+ case kReturnReceiver:
+ case kSetPrototypeEntries:
+ case kSetPrototypeGetSize:
+ case kSetPrototypeValues:
+ case kStoreGlobalIC_Slow:
+ case kStoreGlobalICTrampoline:
+ case kStoreICTrampoline:
+ case kStringPrototypeBig:
+ case kStringPrototypeBlink:
+ case kStringPrototypeBold:
+ case kStringPrototypeConcat:
+ case kStringPrototypeFixed:
+ case kStringPrototypeItalics:
+ case kStringPrototypeIterator:
+ case kStringPrototypeSmall:
+ case kStringPrototypeStrike:
+ case kStringPrototypeSub:
+ case kStringPrototypeSup:
+#ifdef V8_INTL_SUPPORT
+ case kStringPrototypeToLowerCaseIntl:
+#endif
+ case kSubtract:
+ case kSymbolPrototypeToPrimitive:
+ case kSymbolPrototypeToString:
+ case kSymbolPrototypeValueOf:
+ case kThrowWasmTrapDivByZero:
+ case kThrowWasmTrapDivUnrepresentable:
+ case kThrowWasmTrapFloatUnrepresentable:
+ case kThrowWasmTrapFuncInvalid:
+ case kThrowWasmTrapFuncSigMismatch:
+ case kThrowWasmTrapMemOutOfBounds:
+ case kThrowWasmTrapRemByZero:
+ case kThrowWasmTrapUnreachable:
+ case kToInteger:
+ case kTypedArrayConstructor:
+ case kWasmStackGuard:
+ case kWeakMapGet:
+ case kWeakMapHas:
+ case kWeakMapPrototypeDelete:
+ case kWeakMapPrototypeSet:
+ case kWeakSetHas:
+ case kWeakSetPrototypeAdd:
+ case kWeakSetPrototypeDelete:
+#else
+ case kAbortJS:
+ case kAdd:
+ case kAllocateHeapNumber:
+ case kArrayEvery:
+ case kArrayEveryLoopContinuation:
+ case kArrayEveryLoopEagerDeoptContinuation:
+ case kArrayEveryLoopLazyDeoptContinuation:
+ case kArrayFilterLoopEagerDeoptContinuation:
+ case kArrayFilterLoopLazyDeoptContinuation:
+ case kArrayFindIndexLoopAfterCallbackLazyDeoptContinuation:
+ case kArrayFindIndexLoopContinuation:
+ case kArrayFindIndexLoopEagerDeoptContinuation:
+ case kArrayFindIndexLoopLazyDeoptContinuation:
+ case kArrayFindLoopAfterCallbackLazyDeoptContinuation:
+ case kArrayFindLoopContinuation:
+ case kArrayFindLoopEagerDeoptContinuation:
+ case kArrayFindLoopLazyDeoptContinuation:
+ case kArrayForEach:
+ case kArrayForEachLoopContinuation:
+ case kArrayForEachLoopEagerDeoptContinuation:
+ case kArrayForEachLoopLazyDeoptContinuation:
+ case kArrayFrom:
+ case kArrayIncludes:
+ case kArrayIndexOf:
+ case kArrayIsArray:
+ case kArrayMapLoopContinuation:
+ case kArrayMapLoopEagerDeoptContinuation:
+ case kArrayMapLoopLazyDeoptContinuation:
+ case kArrayOf:
+ case kArrayPrototypeEntries:
+ case kArrayPrototypeFind:
+ case kArrayPrototypeFindIndex:
+ case kArrayPrototypeKeys:
+ case kArrayPrototypeSlice:
+ case kArrayPrototypeValues:
+ case kArrayReduce:
+ case kArrayReduceLoopContinuation:
+ case kArrayReduceLoopEagerDeoptContinuation:
+ case kArrayReduceLoopLazyDeoptContinuation:
+ case kArrayReducePreLoopEagerDeoptContinuation:
+ case kArrayReduceRight:
+ case kArrayReduceRightLoopContinuation:
+ case kArrayReduceRightLoopEagerDeoptContinuation:
+ case kArrayReduceRightLoopLazyDeoptContinuation:
+ case kArrayReduceRightPreLoopEagerDeoptContinuation:
+ case kArraySome:
+ case kArraySomeLoopContinuation:
+ case kArraySomeLoopEagerDeoptContinuation:
+ case kArraySomeLoopLazyDeoptContinuation:
+ case kAsyncFromSyncIteratorPrototypeNext:
+ case kAsyncFromSyncIteratorPrototypeReturn:
+ case kAsyncFromSyncIteratorPrototypeThrow:
+ case kAsyncFunctionAwaitFulfill:
+ case kAsyncFunctionAwaitReject:
+ case kAsyncFunctionPromiseCreate:
+ case kAsyncFunctionPromiseRelease:
+ case kAsyncGeneratorAwaitFulfill:
+ case kAsyncGeneratorAwaitReject:
+ case kAsyncGeneratorResumeNext:
+ case kAsyncGeneratorReturnClosedFulfill:
+ case kAsyncGeneratorReturnClosedReject:
+ case kAsyncGeneratorReturnFulfill:
+ case kAsyncGeneratorYieldFulfill:
+ case kAsyncIteratorValueUnwrap:
+ case kBitwiseNot:
+ case kBooleanPrototypeToString:
+ case kBooleanPrototypeValueOf:
+ case kCallProxy:
+ case kConstructFunction:
+ case kConstructProxy:
+ case kContinueToCodeStubBuiltin:
+ case kContinueToCodeStubBuiltinWithResult:
+ case kContinueToJavaScriptBuiltin:
+ case kContinueToJavaScriptBuiltinWithResult:
+ case kCreateGeneratorObject:
+ case kCreateIterResultObject:
+ case kCreateRegExpLiteral:
+ case kDatePrototypeGetDate:
+ case kDatePrototypeGetDay:
+ case kDatePrototypeGetFullYear:
+ case kDatePrototypeGetHours:
+ case kDatePrototypeGetMilliseconds:
+ case kDatePrototypeGetMinutes:
+ case kDatePrototypeGetMonth:
+ case kDatePrototypeGetSeconds:
+ case kDatePrototypeGetTime:
+ case kDatePrototypeGetTimezoneOffset:
+ case kDatePrototypeGetUTCDate:
+ case kDatePrototypeGetUTCDay:
+ case kDatePrototypeGetUTCFullYear:
+ case kDatePrototypeGetUTCHours:
+ case kDatePrototypeGetUTCMilliseconds:
+ case kDatePrototypeGetUTCMinutes:
+ case kDatePrototypeGetUTCMonth:
+ case kDatePrototypeGetUTCSeconds:
+ case kDatePrototypeToPrimitive:
+ case kDatePrototypeValueOf:
+ case kDecrement:
+ case kDeleteProperty:
+ case kDivide:
+ case kEqual:
+ case kFastConsoleAssert:
+ case kFastNewClosure:
+ case kFastNewFunctionContextEval:
+ case kFastNewFunctionContextFunction:
+ case kFastNewObject:
+ case kFindOrderedHashMapEntry:
+ case kForInEnumerate:
+ case kForInFilter:
+ case kFunctionPrototypeHasInstance:
+ case kGeneratorPrototypeNext:
+ case kGeneratorPrototypeReturn:
+ case kGeneratorPrototypeThrow:
+ case kGetSuperConstructor:
+ case kGlobalIsFinite:
+ case kGlobalIsNaN:
+ case kGreaterThan:
+ case kGreaterThanOrEqual:
+ case kHasProperty:
+ case kIncrement:
+ case kInstanceOf:
+ case kKeyedLoadIC_Megamorphic:
+ case kKeyedLoadIC_PolymorphicName:
+ case kKeyedLoadIC_Slow:
+ case kKeyedLoadICTrampoline:
+ case kKeyedStoreIC_Slow:
+ case kKeyedStoreICTrampoline:
+ case kLessThan:
+ case kLessThanOrEqual:
+ case kLoadField:
+ case kLoadGlobalIC:
+ case kLoadGlobalICInsideTypeof:
+ case kLoadGlobalICInsideTypeofTrampoline:
+ case kLoadGlobalIC_Slow:
+ case kLoadGlobalICTrampoline:
+ case kLoadIC:
+ case kLoadIC_FunctionPrototype:
+ case kLoadIC_Noninlined:
+ case kLoadIC_Slow:
+ case kLoadIC_StringLength:
+ case kLoadIC_StringWrapperLength:
+ case kLoadICTrampoline:
+ case kLoadIC_Uninitialized:
+ case kMapPrototypeEntries:
+ case kMapPrototypeForEach:
+ case kMapPrototypeGet:
+ case kMapPrototypeGetSize:
+ case kMapPrototypeHas:
+ case kMapPrototypeKeys:
+ case kMapPrototypeValues:
+ case kMathAcos:
+ case kMathAcosh:
+ case kMathAsin:
+ case kMathAsinh:
+ case kMathAtan:
+ case kMathAtan2:
+ case kMathAtanh:
+ case kMathCbrt:
+ case kMathCeil:
+ case kMathCos:
+ case kMathCosh:
+ case kMathExp:
+ case kMathExpm1:
+ case kMathFloor:
+ case kMathFround:
+ case kMathLog:
+ case kMathLog10:
+ case kMathLog1p:
+ case kMathLog2:
+ case kMathMax:
+ case kMathMin:
+ case kMathRound:
+ case kMathSign:
+ case kMathSin:
+ case kMathSinh:
+ case kMathSqrt:
+ case kMathTan:
+ case kMathTanh:
+ case kMathTrunc:
+ case kModulus:
+ case kMultiply:
+ case kNegate:
+ case kNewArgumentsElements:
+ case kNonNumberToNumber:
+ case kNonNumberToNumeric:
+ case kNonPrimitiveToPrimitive_Default:
+ case kNonPrimitiveToPrimitive_Number:
+ case kNonPrimitiveToPrimitive_String:
+ case kNumberConstructor:
+ case kNumberIsFinite:
+ case kNumberIsInteger:
+ case kNumberIsNaN:
+ case kNumberIsSafeInteger:
+ case kNumberParseFloat:
+ case kNumberPrototypeValueOf:
+ case kNumberToString:
+ case kObjectConstructor:
+ case kObjectConstructor_ConstructStub:
+ case kObjectCreate:
+ case kObjectIs:
+ case kObjectKeys:
+ case kObjectPrototypeHasOwnProperty:
+ case kObjectPrototypeIsPrototypeOf:
+ case kObjectPrototypeToLocaleString:
+ case kObjectPrototypeToString:
+ case kObjectPrototypeValueOf:
+ case kOrderedHashTableHealIndex:
+ case kOrdinaryHasInstance:
+ case kOrdinaryToPrimitive_Number:
+ case kOrdinaryToPrimitive_String:
+ case kPromiseAll:
+ case kPromiseCapabilityDefaultReject:
+ case kPromiseCapabilityDefaultResolve:
+ case kPromiseCatchFinally:
+ case kPromiseConstructor:
+ case kPromiseConstructorLazyDeoptContinuation:
+ case kPromiseFulfillReactionJob:
+ case kPromiseInternalConstructor:
+ case kPromiseInternalReject:
+ case kPromiseInternalResolve:
+ case kPromisePrototypeCatch:
+ case kPromisePrototypeFinally:
+ case kPromiseRace:
+ case kPromiseReject:
+ case kPromiseRejectReactionJob:
+ case kPromiseResolve:
+ case kPromiseResolveThenableJob:
+ case kPromiseResolveTrampoline:
+ case kPromiseThenFinally:
+ case kPromiseThrowerFinally:
+ case kPromiseValueThunkFinally:
+ case kProxyConstructor:
+ case kProxyGetProperty:
+ case kProxyHasProperty:
+ case kProxySetProperty:
+ case kRecordWrite:
+ case kReflectHas:
+ case kRegExpConstructor:
+ case kRegExpPrototypeCompile:
+ case kRegExpPrototypeDotAllGetter:
+ case kRegExpPrototypeFlagsGetter:
+ case kRegExpPrototypeGlobalGetter:
+ case kRegExpPrototypeIgnoreCaseGetter:
+ case kRegExpPrototypeMultilineGetter:
+ case kRegExpPrototypeReplace:
+ case kRegExpPrototypeSearch:
+ case kRegExpPrototypeSourceGetter:
+ case kRegExpPrototypeSplit:
+ case kRegExpPrototypeStickyGetter:
+ case kRegExpPrototypeUnicodeGetter:
+ case kResolvePromise:
+ case kReturnReceiver:
+ case kRunMicrotasks:
+ case kSameValue:
+ case kSetPrototypeEntries:
+ case kSetPrototypeForEach:
+ case kSetPrototypeGetSize:
+ case kSetPrototypeHas:
+ case kSetPrototypeValues:
+ case kStoreGlobalIC_Slow:
+ case kStoreGlobalICTrampoline:
+ case kStoreICTrampoline:
+ case kStrictEqual:
+ case kStringCodePointAtUTF16:
+ case kStringCodePointAtUTF32:
+ case kStringConstructor:
+ case kStringEqual:
+ case kStringGreaterThan:
+ case kStringGreaterThanOrEqual:
+ case kStringIndexOf:
+ case kStringLessThan:
+ case kStringLessThanOrEqual:
+ case kStringPrototypeAnchor:
+ case kStringPrototypeBig:
+ case kStringPrototypeBlink:
+ case kStringPrototypeBold:
+ case kStringPrototypeCharCodeAt:
+ case kStringPrototypeCodePointAt:
+ case kStringPrototypeConcat:
+ case kStringPrototypeFixed:
+ case kStringPrototypeFontcolor:
+ case kStringPrototypeFontsize:
+ case kStringPrototypeIncludes:
+ case kStringPrototypeIndexOf:
+ case kStringPrototypeItalics:
+ case kStringPrototypeIterator:
+ case kStringPrototypeLink:
+ case kStringPrototypeMatch:
+ case kStringPrototypePadEnd:
+ case kStringPrototypePadStart:
+ case kStringPrototypeRepeat:
+ case kStringPrototypeReplace:
+ case kStringPrototypeSearch:
+ case kStringPrototypeSmall:
+ case kStringPrototypeStrike:
+ case kStringPrototypeSub:
+ case kStringPrototypeSup:
+#ifdef V8_INTL_SUPPORT
+ case kStringPrototypeToLowerCaseIntl:
+ case kStringToLowerCaseIntl:
+#endif
+ case kStringPrototypeToString:
+ case kStringPrototypeValueOf:
+ case kStringRepeat:
+ case kStringToNumber:
+ case kSubtract:
+ case kSymbolPrototypeToPrimitive:
+ case kSymbolPrototypeToString:
+ case kSymbolPrototypeValueOf:
+ case kThrowWasmTrapDivByZero:
+ case kThrowWasmTrapDivUnrepresentable:
+ case kThrowWasmTrapFloatUnrepresentable:
+ case kThrowWasmTrapFuncInvalid:
+ case kThrowWasmTrapFuncSigMismatch:
+ case kThrowWasmTrapMemOutOfBounds:
+ case kThrowWasmTrapRemByZero:
+ case kThrowWasmTrapUnreachable:
+ case kToBoolean:
+ case kToBooleanLazyDeoptContinuation:
+ case kToInteger:
+ case kToInteger_TruncateMinusZero:
+ case kToName:
+ case kToNumber:
+ case kToNumeric:
+ case kToString:
+ case kTypedArrayConstructor:
+ case kTypedArrayConstructor_ConstructStub:
+ case kTypedArrayPrototypeByteLength:
+ case kTypedArrayPrototypeByteOffset:
+ case kTypedArrayPrototypeEntries:
+ case kTypedArrayPrototypeEvery:
+ case kTypedArrayPrototypeFind:
+ case kTypedArrayPrototypeFindIndex:
+ case kTypedArrayPrototypeForEach:
+ case kTypedArrayPrototypeKeys:
+ case kTypedArrayPrototypeLength:
+ case kTypedArrayPrototypeReduce:
+ case kTypedArrayPrototypeReduceRight:
+ case kTypedArrayPrototypeSet:
+ case kTypedArrayPrototypeSlice:
+ case kTypedArrayPrototypeSome:
+ case kTypedArrayPrototypeSubArray:
+ case kTypedArrayPrototypeToStringTag:
+ case kTypedArrayPrototypeValues:
+ case kTypeof:
+ case kWasmStackGuard:
+ case kWeakMapGet:
+ case kWeakMapHas:
+ case kWeakMapLookupHashIndex:
+ case kWeakMapPrototypeDelete:
+ case kWeakMapPrototypeSet:
+ case kWeakSetHas:
+ case kWeakSetPrototypeAdd:
+ case kWeakSetPrototypeDelete:
+#endif
+ return true;
+ default:
+ return false;
+ }
+ UNREACHABLE();
+}
+
+// static
+bool Builtins::IsOffHeapSafe(int index) {
+#ifndef V8_EMBEDDED_BUILTINS
+ return false;
+#else
+ DCHECK(IsBuiltinId(index));
+ if (IsTooShortForOffHeapTrampoline(index)) return false;
+ switch (index) {
+#ifdef DEBUG
+ case kAbortJS:
+ case kAllocateHeapNumber:
+ case kArrayEveryLoopEagerDeoptContinuation:
+ case kArrayFilterLoopEagerDeoptContinuation:
+ case kArrayFindIndexLoopEagerDeoptContinuation:
+ case kArrayFindIndexLoopLazyDeoptContinuation:
+ case kArrayFindLoopEagerDeoptContinuation:
+ case kArrayFindLoopLazyDeoptContinuation:
+ case kArrayForEachLoopEagerDeoptContinuation:
+ case kArrayForEachLoopLazyDeoptContinuation:
+ case kArrayMapLoopEagerDeoptContinuation:
+ case kArrayReduceLoopEagerDeoptContinuation:
+ case kArrayReduceLoopLazyDeoptContinuation:
+ case kArrayReducePreLoopEagerDeoptContinuation:
+ case kArrayReduceRightLoopEagerDeoptContinuation:
+ case kArrayReduceRightLoopLazyDeoptContinuation:
+ case kArrayReduceRightPreLoopEagerDeoptContinuation:
+ case kArraySomeLoopEagerDeoptContinuation:
+ case kBitwiseNot:
+ case kBooleanPrototypeToString:
+ case kBooleanPrototypeValueOf:
+ case kContinueToCodeStubBuiltin:
+ case kContinueToCodeStubBuiltinWithResult:
+ case kContinueToJavaScriptBuiltin:
+ case kContinueToJavaScriptBuiltinWithResult:
+ case kDatePrototypeGetDate:
+ case kDatePrototypeGetDay:
+ case kDatePrototypeGetFullYear:
+ case kDatePrototypeGetHours:
+ case kDatePrototypeGetMilliseconds:
+ case kDatePrototypeGetMinutes:
+ case kDatePrototypeGetMonth:
+ case kDatePrototypeGetSeconds:
+ case kDatePrototypeGetTime:
+ case kDatePrototypeGetTimezoneOffset:
+ case kDatePrototypeGetUTCDate:
+ case kDatePrototypeGetUTCDay:
+ case kDatePrototypeGetUTCFullYear:
+ case kDatePrototypeGetUTCHours:
+ case kDatePrototypeGetUTCMilliseconds:
+ case kDatePrototypeGetUTCMinutes:
+ case kDatePrototypeGetUTCMonth:
+ case kDatePrototypeGetUTCSeconds:
+ case kDatePrototypeToPrimitive:
+ case kDatePrototypeValueOf:
+ case kDecrement:
+ case kDivide:
+ case kGlobalIsFinite:
+ case kGlobalIsNaN:
+ case kIncrement:
+ case kKeyedLoadIC_Slow:
+ case kKeyedLoadICTrampoline:
+ case kKeyedStoreIC_Slow:
+ case kKeyedStoreICTrampoline:
+ case kLoadField:
+ case kLoadGlobalICInsideTypeofTrampoline:
+ case kLoadGlobalIC_Slow:
+ case kLoadGlobalICTrampoline:
+ case kLoadIC_Slow:
+ case kLoadICTrampoline:
+ case kMapPrototypeEntries:
+ case kMapPrototypeGet:
+ case kMapPrototypeGetSize:
+ case kMapPrototypeHas:
+ case kMapPrototypeKeys:
+ case kMapPrototypeValues:
+ case kMathAcos:
+ case kMathAcosh:
+ case kMathAsin:
+ case kMathAsinh:
+ case kMathAtan:
+ case kMathAtan2:
+ case kMathAtanh:
+ case kMathCbrt:
+ case kMathCeil:
+ case kMathCos:
+ case kMathCosh:
+ case kMathExp:
+ case kMathExpm1:
+ case kMathFloor:
+ case kMathFround:
+ case kMathLog:
+ case kMathLog10:
+ case kMathLog1p:
+ case kMathLog2:
+ case kMathMax:
+ case kMathMin:
+ case kMathRound:
+ case kMathSign:
+ case kMathSin:
+ case kMathSinh:
+ case kMathSqrt:
+ case kMathTan:
+ case kMathTanh:
+ case kMathTrunc:
+ case kModulus:
+ case kMultiply:
+ case kNonPrimitiveToPrimitive_Default:
+ case kNonPrimitiveToPrimitive_Number:
+ case kNonPrimitiveToPrimitive_String:
+ case kNumberIsFinite:
+ case kNumberIsInteger:
+ case kNumberIsNaN:
+ case kNumberIsSafeInteger:
+ case kNumberPrototypeValueOf:
+ case kObjectPrototypeToLocaleString:
+ case kObjectPrototypeValueOf:
+ case kPromiseCapabilityDefaultReject:
+ case kPromiseCapabilityDefaultResolve:
+ case kPromiseConstructorLazyDeoptContinuation:
+ case kPromiseInternalReject:
+ case kPromiseInternalResolve:
+ case kPromiseResolveTrampoline:
+ case kPromiseThrowerFinally:
+ case kPromiseValueThunkFinally:
+ case kProxyConstructor:
+ case kReflectHas:
+ case kRegExpPrototypeDotAllGetter:
+ case kRegExpPrototypeGlobalGetter:
+ case kRegExpPrototypeIgnoreCaseGetter:
+ case kRegExpPrototypeMultilineGetter:
+ case kRegExpPrototypeSourceGetter:
+ case kRegExpPrototypeStickyGetter:
+ case kRegExpPrototypeUnicodeGetter:
+ case kReturnReceiver:
+ case kSetPrototypeEntries:
+ case kSetPrototypeGetSize:
+ case kSetPrototypeValues:
+ case kStoreGlobalIC_Slow:
+ case kStoreGlobalICTrampoline:
+ case kStoreICTrampoline:
+ case kStringPrototypeBig:
+ case kStringPrototypeBlink:
+ case kStringPrototypeBold:
+ case kStringPrototypeConcat:
+ case kStringPrototypeFixed:
+ case kStringPrototypeItalics:
+ case kStringPrototypeIterator:
+ case kStringPrototypeSmall:
+ case kStringPrototypeStrike:
+ case kStringPrototypeSub:
+ case kStringPrototypeSup:
+#ifdef V8_INTL_SUPPORT
+ case kStringPrototypeToLowerCaseIntl:
+#endif
+ case kSubtract:
+ case kSymbolPrototypeToPrimitive:
+ case kSymbolPrototypeToString:
+ case kSymbolPrototypeValueOf:
+ case kThrowWasmTrapDivByZero:
+ case kThrowWasmTrapDivUnrepresentable:
+ case kThrowWasmTrapFloatUnrepresentable:
+ case kThrowWasmTrapFuncInvalid:
+ case kThrowWasmTrapFuncSigMismatch:
+ case kThrowWasmTrapMemOutOfBounds:
+ case kThrowWasmTrapRemByZero:
+ case kThrowWasmTrapUnreachable:
+ case kToInteger:
+ case kTypedArrayConstructor:
+ case kWasmStackGuard:
+ case kWeakMapGet:
+ case kWeakMapHas:
+ case kWeakMapPrototypeDelete:
+ case kWeakMapPrototypeSet:
+ case kWeakSetHas:
+ case kWeakSetPrototypeAdd:
+ case kWeakSetPrototypeDelete:
+#else
+ case kAbortJS:
+ case kAdd:
+ case kAllocateHeapNumber:
+ case kArrayEvery:
+ case kArrayEveryLoopContinuation:
+ case kArrayEveryLoopEagerDeoptContinuation:
+ case kArrayEveryLoopLazyDeoptContinuation:
+ case kArrayFilterLoopEagerDeoptContinuation:
+ case kArrayFilterLoopLazyDeoptContinuation:
+ case kArrayFindIndexLoopAfterCallbackLazyDeoptContinuation:
+ case kArrayFindIndexLoopContinuation:
+ case kArrayFindIndexLoopEagerDeoptContinuation:
+ case kArrayFindIndexLoopLazyDeoptContinuation:
+ case kArrayFindLoopAfterCallbackLazyDeoptContinuation:
+ case kArrayFindLoopContinuation:
+ case kArrayFindLoopEagerDeoptContinuation:
+ case kArrayFindLoopLazyDeoptContinuation:
+ case kArrayForEach:
+ case kArrayForEachLoopContinuation:
+ case kArrayForEachLoopEagerDeoptContinuation:
+ case kArrayForEachLoopLazyDeoptContinuation:
+ case kArrayFrom:
+ case kArrayIncludes:
+ case kArrayIndexOf:
+ case kArrayIsArray:
+ case kArrayMapLoopContinuation:
+ case kArrayMapLoopEagerDeoptContinuation:
+ case kArrayMapLoopLazyDeoptContinuation:
+ case kArrayOf:
+ case kArrayPrototypeEntries:
+ case kArrayPrototypeFind:
+ case kArrayPrototypeFindIndex:
+ case kArrayPrototypeKeys:
+ case kArrayPrototypeSlice:
+ case kArrayPrototypeValues:
+ case kArrayReduce:
+ case kArrayReduceLoopContinuation:
+ case kArrayReduceLoopEagerDeoptContinuation:
+ case kArrayReduceLoopLazyDeoptContinuation:
+ case kArrayReducePreLoopEagerDeoptContinuation:
+ case kArrayReduceRight:
+ case kArrayReduceRightLoopContinuation:
+ case kArrayReduceRightLoopEagerDeoptContinuation:
+ case kArrayReduceRightLoopLazyDeoptContinuation:
+ case kArrayReduceRightPreLoopEagerDeoptContinuation:
+ case kArraySome:
+ case kArraySomeLoopContinuation:
+ case kArraySomeLoopEagerDeoptContinuation:
+ case kArraySomeLoopLazyDeoptContinuation:
+ case kAsyncFromSyncIteratorPrototypeNext:
+ case kAsyncFromSyncIteratorPrototypeReturn:
+ case kAsyncFromSyncIteratorPrototypeThrow:
+ case kAsyncFunctionAwaitFulfill:
+ case kAsyncFunctionAwaitReject:
+ case kAsyncFunctionPromiseCreate:
+ case kAsyncFunctionPromiseRelease:
+ case kAsyncGeneratorAwaitFulfill:
+ case kAsyncGeneratorAwaitReject:
+ case kAsyncGeneratorResumeNext:
+ case kAsyncGeneratorReturnClosedFulfill:
+ case kAsyncGeneratorReturnClosedReject:
+ case kAsyncGeneratorReturnFulfill:
+ case kAsyncGeneratorYieldFulfill:
+ case kAsyncIteratorValueUnwrap:
+ case kBitwiseNot:
+ case kBooleanPrototypeToString:
+ case kBooleanPrototypeValueOf:
+ case kCallProxy:
+ case kConstructFunction:
+ case kConstructProxy:
+ case kContinueToCodeStubBuiltin:
+ case kContinueToCodeStubBuiltinWithResult:
+ case kContinueToJavaScriptBuiltin:
+ case kContinueToJavaScriptBuiltinWithResult:
+ case kCreateGeneratorObject:
+ case kCreateIterResultObject:
+ case kCreateRegExpLiteral:
+ case kDatePrototypeGetDate:
+ case kDatePrototypeGetDay:
+ case kDatePrototypeGetFullYear:
+ case kDatePrototypeGetHours:
+ case kDatePrototypeGetMilliseconds:
+ case kDatePrototypeGetMinutes:
+ case kDatePrototypeGetMonth:
+ case kDatePrototypeGetSeconds:
+ case kDatePrototypeGetTime:
+ case kDatePrototypeGetTimezoneOffset:
+ case kDatePrototypeGetUTCDate:
+ case kDatePrototypeGetUTCDay:
+ case kDatePrototypeGetUTCFullYear:
+ case kDatePrototypeGetUTCHours:
+ case kDatePrototypeGetUTCMilliseconds:
+ case kDatePrototypeGetUTCMinutes:
+ case kDatePrototypeGetUTCMonth:
+ case kDatePrototypeGetUTCSeconds:
+ case kDatePrototypeToPrimitive:
+ case kDatePrototypeValueOf:
+ case kDecrement:
+ case kDeleteProperty:
+ case kDivide:
+ case kEqual:
+ case kFastConsoleAssert:
+ case kFastNewClosure:
+ case kFastNewFunctionContextEval:
+ case kFastNewFunctionContextFunction:
+ case kFastNewObject:
+ case kFindOrderedHashMapEntry:
+ case kForInEnumerate:
+ case kForInFilter:
+ case kFunctionPrototypeHasInstance:
+ case kGeneratorPrototypeNext:
+ case kGeneratorPrototypeReturn:
+ case kGeneratorPrototypeThrow:
+ case kGetSuperConstructor:
+ case kGlobalIsFinite:
+ case kGlobalIsNaN:
+ case kGreaterThan:
+ case kGreaterThanOrEqual:
+ case kHasProperty:
+ case kIncrement:
+ case kInstanceOf:
+ case kKeyedLoadIC_Megamorphic:
+ case kKeyedLoadIC_PolymorphicName:
+ case kKeyedLoadIC_Slow:
+ case kKeyedLoadICTrampoline:
+ case kKeyedStoreIC_Slow:
+ case kKeyedStoreICTrampoline:
+ case kLessThan:
+ case kLessThanOrEqual:
+ case kLoadField:
+ case kLoadGlobalIC:
+ case kLoadGlobalICInsideTypeof:
+ case kLoadGlobalICInsideTypeofTrampoline:
+ case kLoadGlobalIC_Slow:
+ case kLoadGlobalICTrampoline:
+ case kLoadIC:
+ case kLoadIC_FunctionPrototype:
+ case kLoadIC_Noninlined:
+ case kLoadIC_Slow:
+ case kLoadICTrampoline:
+ case kLoadIC_Uninitialized:
+ case kMapPrototypeEntries:
+ case kMapPrototypeForEach:
+ case kMapPrototypeGet:
+ case kMapPrototypeGetSize:
+ case kMapPrototypeHas:
+ case kMapPrototypeKeys:
+ case kMapPrototypeValues:
+ case kMathAcos:
+ case kMathAcosh:
+ case kMathAsin:
+ case kMathAsinh:
+ case kMathAtan:
+ case kMathAtan2:
+ case kMathAtanh:
+ case kMathCbrt:
+ case kMathCeil:
+ case kMathCos:
+ case kMathCosh:
+ case kMathExp:
+ case kMathExpm1:
+ case kMathFloor:
+ case kMathFround:
+ case kMathLog:
+ case kMathLog10:
+ case kMathLog1p:
+ case kMathLog2:
+ case kMathMax:
+ case kMathMin:
+ case kMathRound:
+ case kMathSign:
+ case kMathSin:
+ case kMathSinh:
+ case kMathSqrt:
+ case kMathTan:
+ case kMathTanh:
+ case kMathTrunc:
+ case kModulus:
+ case kMultiply:
+ case kNegate:
+ case kNewArgumentsElements:
+ case kNonNumberToNumber:
+ case kNonNumberToNumeric:
+ case kNonPrimitiveToPrimitive_Default:
+ case kNonPrimitiveToPrimitive_Number:
+ case kNonPrimitiveToPrimitive_String:
+ case kNumberConstructor:
+ case kNumberIsFinite:
+ case kNumberIsInteger:
+ case kNumberIsNaN:
+ case kNumberIsSafeInteger:
+ case kNumberParseFloat:
+ case kNumberPrototypeValueOf:
+ case kNumberToString:
+ case kObjectConstructor:
+ case kObjectConstructor_ConstructStub:
+ case kObjectCreate:
+ case kObjectIs:
+ case kObjectKeys:
+ case kObjectPrototypeHasOwnProperty:
+ case kObjectPrototypeIsPrototypeOf:
+ case kObjectPrototypeToLocaleString:
+ case kObjectPrototypeToString:
+ case kObjectPrototypeValueOf:
+ case kOrderedHashTableHealIndex:
+ case kOrdinaryHasInstance:
+ case kOrdinaryToPrimitive_Number:
+ case kOrdinaryToPrimitive_String:
+ case kPromiseAll:
+ case kPromiseCapabilityDefaultReject:
+ case kPromiseCapabilityDefaultResolve:
+ case kPromiseCatchFinally:
+ case kPromiseConstructor:
+ case kPromiseConstructorLazyDeoptContinuation:
+ case kPromiseFulfillReactionJob:
+ case kPromiseInternalConstructor:
+ case kPromiseInternalReject:
+ case kPromiseInternalResolve:
+ case kPromisePrototypeCatch:
+ case kPromisePrototypeFinally:
+ case kPromiseRace:
+ case kPromiseReject:
+ case kPromiseRejectReactionJob:
+ case kPromiseResolve:
+ case kPromiseResolveThenableJob:
+ case kPromiseResolveTrampoline:
+ case kPromiseThenFinally:
+ case kPromiseThrowerFinally:
+ case kPromiseValueThunkFinally:
+ case kProxyConstructor:
+ case kProxyGetProperty:
+ case kProxyHasProperty:
+ case kProxySetProperty:
+ case kRecordWrite:
+ case kReflectHas:
+ case kRegExpConstructor:
+ case kRegExpPrototypeCompile:
+ case kRegExpPrototypeDotAllGetter:
+ case kRegExpPrototypeFlagsGetter:
+ case kRegExpPrototypeGlobalGetter:
+ case kRegExpPrototypeIgnoreCaseGetter:
+ case kRegExpPrototypeMultilineGetter:
+ case kRegExpPrototypeReplace:
+ case kRegExpPrototypeSearch:
+ case kRegExpPrototypeSourceGetter:
+ case kRegExpPrototypeSplit:
+ case kRegExpPrototypeStickyGetter:
+ case kRegExpPrototypeUnicodeGetter:
+ case kResolvePromise:
+ case kReturnReceiver:
+ case kRunMicrotasks:
+ case kSameValue:
+ case kSetPrototypeEntries:
+ case kSetPrototypeForEach:
+ case kSetPrototypeGetSize:
+ case kSetPrototypeHas:
+ case kSetPrototypeValues:
+ case kStoreGlobalIC_Slow:
+ case kStoreGlobalICTrampoline:
+ case kStoreICTrampoline:
+ case kStrictEqual:
+ case kStringCodePointAtUTF16:
+ case kStringCodePointAtUTF32:
+ case kStringConstructor:
+ case kStringEqual:
+ case kStringGreaterThan:
+ case kStringGreaterThanOrEqual:
+ case kStringIndexOf:
+ case kStringLessThan:
+ case kStringLessThanOrEqual:
+ case kStringPrototypeAnchor:
+ case kStringPrototypeBig:
+ case kStringPrototypeBlink:
+ case kStringPrototypeBold:
+ case kStringPrototypeCharCodeAt:
+ case kStringPrototypeCodePointAt:
+ case kStringPrototypeConcat:
+ case kStringPrototypeFixed:
+ case kStringPrototypeFontcolor:
+ case kStringPrototypeFontsize:
+ case kStringPrototypeIncludes:
+ case kStringPrototypeIndexOf:
+ case kStringPrototypeItalics:
+ case kStringPrototypeIterator:
+ case kStringPrototypeLink:
+ case kStringPrototypeMatch:
+ case kStringPrototypePadEnd:
+ case kStringPrototypePadStart:
+ case kStringPrototypeRepeat:
+ case kStringPrototypeReplace:
+ case kStringPrototypeSearch:
+ case kStringPrototypeSmall:
+ case kStringPrototypeStrike:
+ case kStringPrototypeSub:
+ case kStringPrototypeSup:
+#ifdef V8_INTL_SUPPORT
+ case kStringPrototypeToLowerCaseIntl:
+ case kStringToLowerCaseIntl:
+#endif
+ case kStringPrototypeToString:
+ case kStringPrototypeValueOf:
+ case kStringRepeat:
+ case kStringToNumber:
+ case kSubtract:
+ case kSymbolPrototypeToPrimitive:
+ case kSymbolPrototypeToString:
+ case kSymbolPrototypeValueOf:
+ case kThrowWasmTrapDivByZero:
+ case kThrowWasmTrapDivUnrepresentable:
+ case kThrowWasmTrapFloatUnrepresentable:
+ case kThrowWasmTrapFuncInvalid:
+ case kThrowWasmTrapFuncSigMismatch:
+ case kThrowWasmTrapMemOutOfBounds:
+ case kThrowWasmTrapRemByZero:
+ case kThrowWasmTrapUnreachable:
+ case kToBoolean:
+ case kToBooleanLazyDeoptContinuation:
+ case kToInteger:
+ case kToInteger_TruncateMinusZero:
+ case kToName:
+ case kToNumber:
+ case kToNumeric:
+ case kToString:
+ case kTypedArrayConstructor:
+ case kTypedArrayConstructor_ConstructStub:
+ case kTypedArrayPrototypeByteLength:
+ case kTypedArrayPrototypeByteOffset:
+ case kTypedArrayPrototypeEntries:
+ case kTypedArrayPrototypeEvery:
+ case kTypedArrayPrototypeFind:
+ case kTypedArrayPrototypeFindIndex:
+ case kTypedArrayPrototypeForEach:
+ case kTypedArrayPrototypeKeys:
+ case kTypedArrayPrototypeLength:
+ case kTypedArrayPrototypeReduce:
+ case kTypedArrayPrototypeReduceRight:
+ case kTypedArrayPrototypeSet:
+ case kTypedArrayPrototypeSlice:
+ case kTypedArrayPrototypeSome:
+ case kTypedArrayPrototypeSubArray:
+ case kTypedArrayPrototypeToStringTag:
+ case kTypedArrayPrototypeValues:
+ case kTypeof:
+ case kWasmStackGuard:
+ case kWeakMapGet:
+ case kWeakMapHas:
+ case kWeakMapLookupHashIndex:
+ case kWeakMapPrototypeDelete:
+ case kWeakMapPrototypeSet:
+ case kWeakSetHas:
+ case kWeakSetPrototypeAdd:
+ case kWeakSetPrototypeDelete:
+#endif // !DEBUG
+ return true;
+ default:
+ return false;
+ }
+ UNREACHABLE();
+#endif // V8_EMBEDDED_BUILTINS
+}
+
+// static
+bool Builtins::IsTooShortForOffHeapTrampoline(int index) {
+ switch (index) {
+ case kLoadIC_StringLength:
+ case kLoadIC_StringWrapperLength:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// static
Builtins::Kind Builtins::KindOf(int index) {
DCHECK(IsBuiltinId(index));
return builtin_metadata[index].kind;
diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h
index d9090dc67e..bf96469d19 100644
--- a/deps/v8/src/builtins/builtins.h
+++ b/deps/v8/src/builtins/builtins.h
@@ -109,11 +109,33 @@ class Builtins {
static bool IsCpp(int index);
static bool HasCppImplementation(int index);
+ // True, iff the given code object is a builtin. Note that this does not
+ // necessarily mean that its kind is Code::BUILTIN.
+ static bool IsBuiltin(Code* code);
+
+ // True, iff the given code object is a builtin with off-heap code.
+ static bool IsOffHeapBuiltin(Code* code);
+
// Returns true iff the given builtin can be lazy-loaded from the snapshot.
// This is true in general for most builtins with the exception of a few
// special cases such as CompileLazy and DeserializeLazy.
static bool IsLazy(int index);
+ // Helper methods used for testing isolate-independent builtins.
+ // TODO(jgruber,v8:6666): Remove once all builtins have been migrated.
+ static bool IsIsolateIndependent(int index);
+
+ // This is the condition we currently use to determine whether a builtin is
+ // copied off-heap when --stress-off-heap-code is passed. Such builtins do not
+ // need to be isolate-independent, e.g. they can contain external references
+ // that point to one specific isolate. A further restrictions is that there
+ // must be enough space for the trampoline.
+ static bool IsOffHeapSafe(int index);
+
+ // The off-heap trampoline is short but requires a certain minimal instruction
+ // size. This function states whether a given builtin is too short.
+ static bool IsTooShortForOffHeapTrampoline(int index);
+
bool is_initialized() const { return initialized_; }
// Used by SetupIsolateDelegate and Deserializer.
diff --git a/deps/v8/src/builtins/constants-table-builder.cc b/deps/v8/src/builtins/constants-table-builder.cc
new file mode 100644
index 0000000000..a4117bd5a2
--- /dev/null
+++ b/deps/v8/src/builtins/constants-table-builder.cc
@@ -0,0 +1,83 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/constants-table-builder.h"
+
+#include "src/heap/heap-inl.h"
+
+namespace v8 {
+namespace internal {
+
+BuiltinsConstantsTableBuilder::BuiltinsConstantsTableBuilder(Isolate* isolate)
+ : isolate_(isolate), map_(isolate->heap()) {
+ // Ensure this is only called once per Isolate.
+ DCHECK_EQ(isolate_->heap()->empty_fixed_array(),
+ isolate_->heap()->builtins_constants_table());
+
+ // And that the initial value of the builtins constants table can be treated
+ // as a constant, which means that codegen will load it using the root
+ // register.
+ DCHECK(isolate_->heap()->RootCanBeTreatedAsConstant(
+ Heap::kEmptyFixedArrayRootIndex));
+}
+
+uint32_t BuiltinsConstantsTableBuilder::AddObject(Handle<Object> object) {
+#ifdef DEBUG
+ // Roots must not be inserted into the constants table as they are already
+ // accessibly from the root list.
+ Heap::RootListIndex root_list_index;
+ DCHECK(!isolate_->heap()->IsRootHandle(object, &root_list_index));
+
+ // Not yet finalized.
+ DCHECK_EQ(isolate_->heap()->empty_fixed_array(),
+ isolate_->heap()->builtins_constants_table());
+#endif
+
+ uint32_t* maybe_key = map_.Find(object);
+ if (maybe_key == nullptr) {
+ uint32_t index = map_.size();
+ map_.Set(object, index);
+ return index;
+ } else {
+ return *maybe_key;
+ }
+}
+
+void BuiltinsConstantsTableBuilder::Finalize() {
+ HandleScope handle_scope(isolate_);
+
+ DCHECK_EQ(isolate_->heap()->empty_fixed_array(),
+ isolate_->heap()->builtins_constants_table());
+
+ DCHECK_LT(0, map_.size());
+ Handle<FixedArray> table =
+ isolate_->factory()->NewFixedArray(map_.size(), TENURED);
+
+ Builtins* builtins = isolate_->builtins();
+ ConstantsMap::IteratableScope it_scope(&map_);
+ for (auto it = it_scope.begin(); it != it_scope.end(); ++it) {
+ uint32_t index = *it.entry();
+ Object* value = it.key();
+ if (value->IsCode() && Code::cast(value)->kind() == Code::BUILTIN) {
+ // Replace placeholder code objects with the real builtin.
+ // See also: SetupIsolateDelegate::PopulateWithPlaceholders.
+ // TODO(jgruber): Deduplicate placeholders and their corresponding
+ // builtin.
+ value = builtins->builtin(Code::cast(value)->builtin_index());
+ }
+ table->set(index, value);
+ }
+
+#ifdef DEBUG
+ for (int i = 0; i < map_.size(); i++) {
+ DCHECK(table->get(i)->IsHeapObject());
+ DCHECK_NE(isolate_->heap()->undefined_value(), table->get(i));
+ }
+#endif
+
+ isolate_->heap()->SetBuiltinsConstantsTable(*table);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/constants-table-builder.h b/deps/v8/src/builtins/constants-table-builder.h
new file mode 100644
index 0000000000..d251d5849b
--- /dev/null
+++ b/deps/v8/src/builtins/constants-table-builder.h
@@ -0,0 +1,48 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_CONSTANTS_TABLE_BUILDER_H_
+#define V8_BUILTINS_CONSTANTS_TABLE_BUILDER_H_
+
+#include "src/allocation.h"
+#include "src/base/macros.h"
+#include "src/handles.h"
+#include "src/identity-map.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+class Object;
+
+// Utility class to build the builtins constants table and store it on the root
+// list. The constants table contains constants used by builtins, and is there
+// to avoid directly embedding them into code objects, which would not be
+// possible for off-heap (and thus immutable) code objects.
+class BuiltinsConstantsTableBuilder final {
+ public:
+ explicit BuiltinsConstantsTableBuilder(Isolate* isolate);
+
+ // Returns the index within the builtins constants list for the given object,
+ // possibly adding the object to the cache. Objects are deduplicated.
+ uint32_t AddObject(Handle<Object> object);
+
+ // Should be called after all affected code (e.g. builtins and bytecode
+ // handlers) has been generated.
+ void Finalize();
+
+ private:
+ Isolate* isolate_;
+
+ // Maps objects to corresponding indices within the constants list.
+ typedef IdentityMap<uint32_t, FreeStoreAllocationPolicy> ConstantsMap;
+ ConstantsMap map_;
+
+ DISALLOW_COPY_AND_ASSIGN(BuiltinsConstantsTableBuilder)
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BUILTINS_CONSTANTS_TABLE_BUILDER_H_
diff --git a/deps/v8/src/builtins/growable-fixed-array-gen.cc b/deps/v8/src/builtins/growable-fixed-array-gen.cc
new file mode 100644
index 0000000000..3a155e26f9
--- /dev/null
+++ b/deps/v8/src/builtins/growable-fixed-array-gen.cc
@@ -0,0 +1,100 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/growable-fixed-array-gen.h"
+
+#include "src/compiler/code-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+void GrowableFixedArray::Push(TNode<Object> const value) {
+ TNode<IntPtrT> const length = var_length_.value();
+ TNode<IntPtrT> const capacity = var_capacity_.value();
+
+ Label grow(this), store(this);
+ Branch(IntPtrEqual(capacity, length), &grow, &store);
+
+ BIND(&grow);
+ {
+ var_capacity_ = NewCapacity(capacity);
+ var_array_ = ResizeFixedArray(length, var_capacity_.value());
+
+ Goto(&store);
+ }
+
+ BIND(&store);
+ {
+ TNode<FixedArray> const array = var_array_.value();
+ StoreFixedArrayElement(array, length, value);
+
+ var_length_ = IntPtrAdd(length, IntPtrConstant(1));
+ }
+}
+
+TNode<JSArray> GrowableFixedArray::ToJSArray(TNode<Context> const context) {
+ const ElementsKind kind = PACKED_ELEMENTS;
+
+ TNode<Context> const native_context = LoadNativeContext(context);
+ TNode<Map> const array_map = LoadJSArrayElementsMap(kind, native_context);
+
+ // Shrink to fit if necessary.
+ {
+ Label next(this);
+
+ TNode<IntPtrT> const length = var_length_.value();
+ TNode<IntPtrT> const capacity = var_capacity_.value();
+
+ GotoIf(WordEqual(length, capacity), &next);
+
+ var_array_ = ResizeFixedArray(length, length);
+ var_capacity_ = length;
+ Goto(&next);
+
+ BIND(&next);
+ }
+
+ TNode<Smi> const result_length = SmiTag(length());
+ TNode<JSArray> const result =
+ CAST(AllocateUninitializedJSArrayWithoutElements(array_map, result_length,
+ nullptr));
+
+ StoreObjectField(result, JSObject::kElementsOffset, var_array_.value());
+
+ return result;
+}
+
+TNode<IntPtrT> GrowableFixedArray::NewCapacity(
+ TNode<IntPtrT> current_capacity) {
+ CSA_ASSERT(this,
+ IntPtrGreaterThanOrEqual(current_capacity, IntPtrConstant(0)));
+
+ // Growth rate is analog to JSObject::NewElementsCapacity:
+ // new_capacity = (current_capacity + (current_capacity >> 1)) + 16.
+
+ TNode<IntPtrT> const new_capacity =
+ IntPtrAdd(IntPtrAdd(current_capacity, WordShr(current_capacity, 1)),
+ IntPtrConstant(16));
+
+ return new_capacity;
+}
+
+TNode<FixedArray> GrowableFixedArray::ResizeFixedArray(
+ TNode<IntPtrT> const element_count, TNode<IntPtrT> const new_capacity) {
+ CSA_ASSERT(this, IntPtrGreaterThanOrEqual(element_count, IntPtrConstant(0)));
+ CSA_ASSERT(this, IntPtrGreaterThanOrEqual(new_capacity, IntPtrConstant(0)));
+ CSA_ASSERT(this, IntPtrGreaterThanOrEqual(new_capacity, element_count));
+
+ TNode<FixedArray> const from_array = var_array_.value();
+
+ CodeStubAssembler::ExtractFixedArrayFlags flags;
+ flags |= CodeStubAssembler::ExtractFixedArrayFlag::kFixedArrays;
+ TNode<FixedArray> to_array = CAST(ExtractFixedArray(
+ from_array, nullptr, element_count, new_capacity, flags));
+
+ return to_array;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/growable-fixed-array-gen.h b/deps/v8/src/builtins/growable-fixed-array-gen.h
new file mode 100644
index 0000000000..f720659dee
--- /dev/null
+++ b/deps/v8/src/builtins/growable-fixed-array-gen.h
@@ -0,0 +1,56 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_GROWABLE_FIXED_ARRAY_GEN_H_
+#define V8_BUILTINS_GROWABLE_FIXED_ARRAY_GEN_H_
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+template <class T>
+using TNode = compiler::TNode<T>;
+
+// Utility class implementing a growable fixed array through CSA.
+class GrowableFixedArray : public CodeStubAssembler {
+ public:
+ explicit GrowableFixedArray(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state),
+ var_array_(this),
+ var_length_(this),
+ var_capacity_(this) {
+ var_array_ = EmptyFixedArrayConstant();
+ var_capacity_ = IntPtrConstant(0);
+ var_length_ = IntPtrConstant(0);
+ }
+
+ TNode<IntPtrT> length() const { return var_length_.value(); }
+
+ TVariable<FixedArray>* var_array() { return &var_array_; }
+ TVariable<IntPtrT>* var_length() { return &var_length_; }
+ TVariable<IntPtrT>* var_capacity() { return &var_capacity_; }
+
+ void Push(TNode<Object> const value);
+
+ TNode<JSArray> ToJSArray(TNode<Context> const context);
+
+ private:
+ TNode<IntPtrT> NewCapacity(TNode<IntPtrT> current_capacity);
+
+ // Creates a new array with {new_capacity} and copies the first
+ // {element_count} elements from the current array.
+ TNode<FixedArray> ResizeFixedArray(TNode<IntPtrT> const element_count,
+ TNode<IntPtrT> const new_capacity);
+
+ private:
+ TVariable<FixedArray> var_array_;
+ TVariable<IntPtrT> var_length_;
+ TVariable<IntPtrT> var_capacity_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BUILTINS_GROWABLE_FIXED_ARRAY_GEN_H_
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 368e6670c1..3319dd0c51 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -101,7 +101,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ push(edi);
__ CallRuntime(function_id, 1);
- __ mov(ebx, eax);
+ __ mov(ecx, eax);
// Restore target function and new target.
__ pop(edx);
@@ -110,15 +110,9 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ SmiUntag(eax);
}
- __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
- __ jmp(ebx);
-}
-
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kCodeOffset));
- __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
- __ jmp(ebx);
+ static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
+ __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+ __ jmp(ecx);
}
namespace {
@@ -224,7 +218,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ test(FieldOperand(ebx, SharedFunctionInfo::kCompilerHintsOffset),
- Immediate(SharedFunctionInfo::kDerivedConstructorMask));
+ Immediate(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
__ j(not_zero, &not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
@@ -345,7 +339,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ mov(ebx, Operand(ebp, ConstructFrameConstants::kConstructorOffset));
__ mov(ebx, FieldOperand(ebx, JSFunction::kSharedFunctionInfoOffset));
__ test(FieldOperand(ebx, SharedFunctionInfo::kCompilerHintsOffset),
- Immediate(SharedFunctionInfo::kClassConstructorMask));
+ Immediate(SharedFunctionInfo::IsClassConstructorBit::kMask));
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
@@ -590,6 +584,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
+ static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(edi, JSFunction::kCodeOffset));
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(ecx);
@@ -748,10 +743,12 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
edx, eax, feedback_vector);
- __ add(optimized_code_entry, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
+ __ Move(ecx, optimized_code_entry);
+ __ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ pop(edx);
__ pop(eax);
- __ jmp(optimized_code_entry);
+ __ jmp(ecx);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
@@ -767,10 +764,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
// Advance the current bytecode offset. This simulates what all bytecode
-// handlers do upon completion of the underlying operation.
-static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
- Register bytecode_offset, Register bytecode,
- Register scratch1) {
+// handlers do upon completion of the underlying operation. Will bail out to a
+// label if the bytecode (without prefix) is a return bytecode.
+static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
+ Register bytecode_array,
+ Register bytecode_offset,
+ Register bytecode, Register scratch1,
+ Label* if_return) {
Register bytecode_size_table = scratch1;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
@@ -780,11 +780,11 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
ExternalReference::bytecode_size_table_address(masm->isolate())));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
- Label load_size, extra_wide;
+ Label process_bytecode, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
__ cmpb(bytecode, Immediate(0x1));
- __ j(above, &load_size, Label::kNear);
+ __ j(above, &process_bytecode, Label::kNear);
__ j(equal, &extra_wide, Label::kNear);
// Load the next bytecode and update table to the wide scaled table.
@@ -792,7 +792,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ movzx_b(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
__ add(bytecode_size_table,
Immediate(kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ jmp(&load_size, Label::kNear);
+ __ jmp(&process_bytecode, Label::kNear);
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
@@ -801,8 +801,17 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ add(bytecode_size_table,
Immediate(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- // Load the size of the current bytecode.
- __ bind(&load_size);
+ __ bind(&process_bytecode);
+
+// Bailout to the return label if this is a return bytecode.
+#define JUMP_IF_EQUAL(NAME) \
+ __ cmpb(bytecode, \
+ Immediate(static_cast<int>(interpreter::Bytecode::k##NAME))); \
+ __ j(equal, if_return, Label::kNear);
+ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
+#undef JUMP_IF_EQUAL
+
+ // Otherwise, load the size of the current bytecode and advance the offset.
__ add(bytecode_offset, Operand(bytecode_size_table, bytecode, times_4, 0));
}
@@ -828,7 +837,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load the feedback vector from the closure.
__ mov(feedback_vector,
- FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
@@ -931,9 +940,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
masm->isolate())));
__ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ mov(ebx, Operand(kInterpreterDispatchTableRegister, ebx,
- times_pointer_size, 0));
- __ call(ebx);
+ __ mov(
+ kJavaScriptCallCodeStartRegister,
+ Operand(kInterpreterDispatchTableRegister, ebx, times_pointer_size, 0));
+ __ call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are either due to the return bytecode
@@ -946,16 +956,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
- // Check if we should return.
+ // Either return, or advance to the next bytecode and dispatch.
Label do_return;
__ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ cmpb(ebx, Immediate(static_cast<int>(interpreter::Bytecode::kReturn)));
- __ j(equal, &do_return, Label::kNear);
-
- // Advance to the next bytecode and dispatch.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, ebx, edx);
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, ebx, ecx,
+ &do_return);
__ jmp(&do_dispatch);
__ bind(&do_return);
@@ -1262,9 +1269,10 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Dispatch to the target bytecode.
__ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ mov(ebx, Operand(kInterpreterDispatchTableRegister, ebx,
- times_pointer_size, 0));
- __ jmp(ebx);
+ __ mov(
+ kJavaScriptCallCodeStartRegister,
+ Operand(kInterpreterDispatchTableRegister, ebx, times_pointer_size, 0));
+ __ jmp(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
@@ -1280,8 +1288,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister, times_1, 0));
// Advance to the next bytecode.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, ebx, edx);
+ Label if_return;
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, ebx, ecx,
+ &if_return);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ mov(ebx, kInterpreterBytecodeOffsetRegister);
@@ -1289,6 +1299,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ mov(Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp), ebx);
Generate_InterpreterEnterBytecode(masm);
+
+ // We should never take the if_return path.
+ __ bind(&if_return);
+ __ Abort(AbortReason::kInvalidBytecodeAdvance);
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1306,7 +1320,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// Get the feedback vector.
Register feedback_vector = ebx;
__ mov(feedback_vector,
- FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
@@ -1319,7 +1333,11 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, ecx);
// Otherwise, tail call the SFI code.
- GenerateTailCallToSharedCode(masm);
+ static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kCodeOffset));
+ __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+ __ jmp(ecx);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
@@ -1348,7 +1366,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Do we have a valid feedback vector?
__ mov(feedback_vector,
- FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
@@ -1531,6 +1549,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
}
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
+ static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(edi, JSFunction::kCodeOffset));
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(ecx);
@@ -2093,7 +2112,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ test(FieldOperand(edx, SharedFunctionInfo::kCompilerHintsOffset),
- Immediate(SharedFunctionInfo::kClassConstructorMask));
+ Immediate(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ j(not_zero, &class_constructor);
// Enter the context of the function; ToObject has to run in the function
@@ -2566,6 +2585,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// eax : expected number of arguments
// edx : new target (passed through to callee)
// edi : function (passed through to callee)
+ static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(edi, JSFunction::kCodeOffset));
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(ecx);
@@ -2581,6 +2601,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
+ static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(edi, JSFunction::kCodeOffset));
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(ecx);
diff --git a/deps/v8/src/builtins/mips/OWNERS b/deps/v8/src/builtins/mips/OWNERS
index 978563cab5..cf2df277c9 100644
--- a/deps/v8/src/builtins/mips/OWNERS
+++ b/deps/v8/src/builtins/mips/OWNERS
@@ -1,2 +1,3 @@
ivica.bogosavljevic@mips.com
Miran.Karic@mips.com
+sreten.kovacevic@mips.com
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index 7af02bb32e..e2d4421f86 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -154,12 +154,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
- __ Jump(at, a2, Code::kHeaderSize - kHeapObjectTag);
-}
-
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
@@ -181,7 +175,9 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ SmiUntag(a0);
}
- __ Jump(at, v0, Code::kHeaderSize - kHeapObjectTag);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Addu(a2, v0, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(a2);
}
namespace {
@@ -285,7 +281,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ lw(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(t2, FieldMemOperand(t2, SharedFunctionInfo::kCompilerHintsOffset));
- __ And(t2, t2, Operand(SharedFunctionInfo::kDerivedConstructorMask));
+ __ And(t2, t2, Operand(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
__ Branch(&not_create_implicit_receiver, ne, t2, Operand(zero_reg));
// If not derived class constructor: Allocate the new receiver object.
@@ -406,7 +402,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ lw(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ lw(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(t2, FieldMemOperand(t2, SharedFunctionInfo::kCompilerHintsOffset));
- __ And(t2, t2, Operand(SharedFunctionInfo::kClassConstructorMask));
+ __ And(t2, t2, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
@@ -656,8 +652,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// undefined because generator functions are non-constructable.
__ Move(a3, a1);
__ Move(a1, t0);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ lw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
- __ Jump(a2, Code::kHeaderSize - kHeapObjectTag);
+ __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(a2);
}
__ bind(&prepare_step_in_if_stepping);
@@ -807,7 +805,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
- __ Jump(optimized_code_entry, Code::kHeaderSize - kHeapObjectTag);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Addu(a2, optimized_code_entry, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(a2);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// losure's code.
@@ -821,10 +821,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
// Advance the current bytecode offset. This simulates what all bytecode
-// handlers do upon completion of the underlying operation.
-static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
- Register bytecode_offset, Register bytecode,
- Register scratch1, Register scratch2) {
+// handlers do upon completion of the underlying operation. Will bail out to a
+// label if the bytecode (without prefix) is a return bytecode.
+static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
+ Register bytecode_array,
+ Register bytecode_offset,
+ Register bytecode, Register scratch1,
+ Register scratch2, Label* if_return) {
Register bytecode_size_table = scratch1;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
@@ -834,10 +837,10 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
- Label load_size, extra_wide;
+ Label process_bytecode, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
- __ Branch(&load_size, hi, bytecode, Operand(1));
+ __ Branch(&process_bytecode, hi, bytecode, Operand(1));
__ Branch(&extra_wide, eq, bytecode, Operand(1));
// Load the next bytecode and update table to the wide scaled table.
@@ -846,7 +849,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ lbu(bytecode, MemOperand(scratch2));
__ Addu(bytecode_size_table, bytecode_size_table,
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ jmp(&load_size);
+ __ jmp(&process_bytecode);
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
@@ -856,8 +859,16 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ Addu(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- // Load the size of the current bytecode.
- __ bind(&load_size);
+ __ bind(&process_bytecode);
+
+// Bailout to the return label if this is a return bytecode.
+#define JUMP_IF_EQUAL(NAME) \
+ __ Branch(if_return, eq, bytecode, \
+ Operand(static_cast<int>(interpreter::Bytecode::k##NAME)));
+ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
+#undef JUMP_IF_EQUAL
+
+ // Otherwise, load the size of the current bytecode and advance the offset.
__ Lsa(scratch2, bytecode_size_table, bytecode, 2);
__ lw(scratch2, MemOperand(scratch2));
__ Addu(bytecode_offset, bytecode_offset, scratch2);
@@ -886,7 +897,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load the feedback vector from the closure.
__ lw(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
@@ -992,10 +1003,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
masm->isolate())));
__ Addu(a0, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
- __ lbu(a0, MemOperand(a0));
- __ Lsa(at, kInterpreterDispatchTableRegister, a0, kPointerSizeLog2);
- __ lw(at, MemOperand(at));
- __ Call(at);
+ __ lbu(t3, MemOperand(a0));
+ __ Lsa(at, kInterpreterDispatchTableRegister, t3, kPointerSizeLog2);
+ __ lw(kJavaScriptCallCodeStartRegister, MemOperand(at));
+ __ Call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are either due to the return bytecode
@@ -1007,18 +1018,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ lw(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
-
- // Check if we should return.
+ // Either return, or advance to the next bytecode and dispatch.
Label do_return;
__ Addu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
__ lbu(a1, MemOperand(a1));
- __ Branch(&do_return, eq, a1,
- Operand(static_cast<int>(interpreter::Bytecode::kReturn)));
-
- // Advance to the next bytecode and dispatch.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, a1, a2, a3);
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, a1, a2, a3,
+ &do_return);
__ jmp(&do_dispatch);
__ bind(&do_return);
@@ -1219,10 +1226,10 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Dispatch to the target bytecode.
__ Addu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
- __ lbu(a1, MemOperand(a1));
- __ Lsa(a1, kInterpreterDispatchTableRegister, a1, kPointerSizeLog2);
- __ lw(a1, MemOperand(a1));
- __ Jump(a1);
+ __ lbu(t3, MemOperand(a1));
+ __ Lsa(a1, kInterpreterDispatchTableRegister, t3, kPointerSizeLog2);
+ __ lw(kJavaScriptCallCodeStartRegister, MemOperand(a1));
+ __ Jump(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
@@ -1241,14 +1248,20 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ lbu(a1, MemOperand(a1));
// Advance to the next bytecode.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, a1, a2, a3);
+ Label if_return;
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, a1, a2, a3,
+ &if_return);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
__ sw(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
+
+ // We should never take the if_return path.
+ __ bind(&if_return);
+ __ Abort(AbortReason::kInvalidBytecodeAdvance);
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1266,7 +1279,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// Get the feedback vector.
Register feedback_vector = a2;
__ lw(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
@@ -1280,7 +1293,11 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, t0, t3, t1);
// Otherwise, tail call the SFI code.
- GenerateTailCallToSharedCode(masm);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
+ __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(a2);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
@@ -1309,7 +1326,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Do we have a valid feedback vector?
__ lw(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
@@ -1484,8 +1501,10 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
}
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
- __ lw(t0, FieldMemOperand(a1, JSFunction::kCodeOffset));
- __ Jump(t0, Code::kHeaderSize - kHeapObjectTag);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
+ __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(a2);
}
namespace {
@@ -1984,7 +2003,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
- __ And(at, a3, Operand(SharedFunctionInfo::kClassConstructorMask));
+ __ And(at, a3, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ Branch(&class_constructor, ne, at, Operand(zero_reg));
// Enter the context of the function; ToObject has to run in the function
@@ -2510,8 +2529,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a0 : expected number of arguments
// a1 : function (passed through to callee)
// a3 : new target (passed through to callee)
- __ lw(t0, FieldMemOperand(a1, JSFunction::kCodeOffset));
- __ Call(t0, Code::kHeaderSize - kHeapObjectTag);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
+ __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag);
+ __ Call(a2);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -2524,8 +2545,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Don't adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
- __ lw(t0, FieldMemOperand(a1, JSFunction::kCodeOffset));
- __ Jump(t0, Code::kHeaderSize - kHeapObjectTag);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
+ __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(a2);
__ bind(&stack_overflow);
{
diff --git a/deps/v8/src/builtins/mips64/OWNERS b/deps/v8/src/builtins/mips64/OWNERS
index 978563cab5..4ce9d7f91d 100644
--- a/deps/v8/src/builtins/mips64/OWNERS
+++ b/deps/v8/src/builtins/mips64/OWNERS
@@ -1,2 +1,3 @@
ivica.bogosavljevic@mips.com
Miran.Karic@mips.com
+sreten.kovacevic@mips.com \ No newline at end of file
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index 266393070c..80ac1fadb1 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -154,13 +154,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ Ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
- __ Daddu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
-}
-
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
@@ -181,8 +174,9 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ SmiUntag(a0);
}
- __ Daddu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Daddu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(a2);
}
namespace {
@@ -287,7 +281,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ Ld(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lwu(t2, FieldMemOperand(t2, SharedFunctionInfo::kCompilerHintsOffset));
- __ And(t2, t2, Operand(SharedFunctionInfo::kDerivedConstructorMask));
+ __ And(t2, t2, Operand(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
__ Branch(&not_create_implicit_receiver, ne, t2, Operand(zero_reg));
// If not derived class constructor: Allocate the new receiver object.
@@ -408,7 +402,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ Ld(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ Ld(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lwu(t2, FieldMemOperand(t2, SharedFunctionInfo::kCompilerHintsOffset));
- __ And(t2, t2, Operand(SharedFunctionInfo::kClassConstructorMask));
+ __ And(t2, t2, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
@@ -547,6 +541,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// undefined because generator functions are non-constructable.
__ Move(a3, a1);
__ Move(a1, a4);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Ld(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(a2);
@@ -806,9 +801,11 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
- __ Daddu(optimized_code_entry, optimized_code_entry,
+
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Daddu(a2, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(optimized_code_entry);
+ __ Jump(a2);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// losure's code.
@@ -822,10 +819,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
// Advance the current bytecode offset. This simulates what all bytecode
-// handlers do upon completion of the underlying operation.
-static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
- Register bytecode_offset, Register bytecode,
- Register scratch1, Register scratch2) {
+// handlers do upon completion of the underlying operation. Will bail out to a
+// label if the bytecode (without prefix) is a return bytecode.
+static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
+ Register bytecode_array,
+ Register bytecode_offset,
+ Register bytecode, Register scratch1,
+ Register scratch2, Label* if_return) {
Register bytecode_size_table = scratch1;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
@@ -834,10 +834,10 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
- Label load_size, extra_wide;
+ Label process_bytecode, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
- __ Branch(&load_size, hi, bytecode, Operand(1));
+ __ Branch(&process_bytecode, hi, bytecode, Operand(1));
__ Branch(&extra_wide, eq, bytecode, Operand(1));
// Load the next bytecode and update table to the wide scaled table.
@@ -846,7 +846,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ Lbu(bytecode, MemOperand(scratch2));
__ Daddu(bytecode_size_table, bytecode_size_table,
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ jmp(&load_size);
+ __ jmp(&process_bytecode);
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
@@ -856,8 +856,16 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ Daddu(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- // Load the size of the current bytecode.
- __ bind(&load_size);
+ __ bind(&process_bytecode);
+
+// Bailout to the return label if this is a return bytecode.
+#define JUMP_IF_EQUAL(NAME) \
+ __ Branch(if_return, eq, bytecode, \
+ Operand(static_cast<int>(interpreter::Bytecode::k##NAME)));
+ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
+#undef JUMP_IF_EQUAL
+
+ // Otherwise, load the size of the current bytecode and advance the offset.
__ Dlsa(scratch2, bytecode_size_table, bytecode, 2);
__ Lw(scratch2, MemOperand(scratch2));
__ Daddu(bytecode_offset, bytecode_offset, scratch2);
@@ -886,7 +894,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load the feedback vector from the closure.
__ Ld(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
@@ -992,10 +1000,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
masm->isolate())));
__ Daddu(a0, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
- __ Lbu(a0, MemOperand(a0));
- __ Dlsa(at, kInterpreterDispatchTableRegister, a0, kPointerSizeLog2);
- __ Ld(at, MemOperand(at));
- __ Call(at);
+ __ Lbu(a7, MemOperand(a0));
+ __ Dlsa(at, kInterpreterDispatchTableRegister, a7, kPointerSizeLog2);
+ __ Ld(kJavaScriptCallCodeStartRegister, MemOperand(at));
+ __ Call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are either due to the return bytecode
@@ -1008,17 +1016,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
- // Check if we should return.
+ // Either return, or advance to the next bytecode and dispatch.
Label do_return;
__ Daddu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
__ Lbu(a1, MemOperand(a1));
- __ Branch(&do_return, eq, a1,
- Operand(static_cast<int>(interpreter::Bytecode::kReturn)));
-
- // Advance to the next bytecode and dispatch.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, a1, a2, a3);
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, a1, a2, a3,
+ &do_return);
__ jmp(&do_dispatch);
__ bind(&do_return);
@@ -1219,10 +1224,10 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Dispatch to the target bytecode.
__ Daddu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
- __ Lbu(a1, MemOperand(a1));
- __ Dlsa(a1, kInterpreterDispatchTableRegister, a1, kPointerSizeLog2);
- __ Ld(a1, MemOperand(a1));
- __ Jump(a1);
+ __ Lbu(a7, MemOperand(a1));
+ __ Dlsa(a1, kInterpreterDispatchTableRegister, a7, kPointerSizeLog2);
+ __ Ld(kJavaScriptCallCodeStartRegister, MemOperand(a1));
+ __ Jump(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
@@ -1241,14 +1246,20 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ Lbu(a1, MemOperand(a1));
// Advance to the next bytecode.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, a1, a2, a3);
+ Label if_return;
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, a1, a2, a3,
+ &if_return);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
__ Sd(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
+
+ // We should never take the if_return path.
+ __ bind(&if_return);
+ __ Abort(AbortReason::kInvalidBytecodeAdvance);
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1266,7 +1277,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// Get the feedback vector.
Register feedback_vector = a2;
__ Ld(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
@@ -1280,7 +1291,11 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, a4, t3, a5);
// Otherwise, tail call the SFI code.
- GenerateTailCallToSharedCode(masm);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
+ __ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(a2);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
@@ -1309,7 +1324,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Do we have a valid feedback vector?
__ Ld(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
@@ -1486,9 +1501,10 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
}
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
- __ Ld(t0, FieldMemOperand(a1, JSFunction::kCodeOffset));
- __ Daddu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(t0);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Ld(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
+ __ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(a2);
}
namespace {
@@ -1624,6 +1640,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
Register scratch = a4;
__ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
+
// 1. Load receiver into a1, argArray into a2 (if present), remove all
// arguments from the stack (including the receiver), and push thisArg (if
// present) instead.
@@ -1732,6 +1749,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
Register scratch = a4;
__ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
+
// 1. Load target into a1 (if present), argumentsList into a2 (if present),
// remove all arguments from the stack (including the receiver), and push
// thisArgument (if present) instead.
@@ -1786,6 +1804,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
Register undefined_value = a4;
Register scratch = a5;
+ __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
+
// 1. Load target into a1 (if present), argumentsList into a2 (if present),
// new.target into a3 (if present, otherwise use target), remove all
// arguments from the stack (including the receiver), and push thisArgument
@@ -2008,7 +2028,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
- __ And(at, a3, Operand(SharedFunctionInfo::kClassConstructorMask));
+ __ And(at, a3, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ Branch(&class_constructor, ne, at, Operand(zero_reg));
// Enter the context of the function; ToObject has to run in the function
@@ -2531,9 +2551,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a0 : expected number of arguments
// a1 : function (passed through to callee)
// a3: new target (passed through to callee)
- __ Ld(a4, FieldMemOperand(a1, JSFunction::kCodeOffset));
- __ Daddu(a4, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(a4);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Ld(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
+ __ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(a2);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -2546,9 +2567,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Don't adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
- __ Ld(a4, FieldMemOperand(a1, JSFunction::kCodeOffset));
- __ Daddu(a4, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(a4);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Ld(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
+ __ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(a2);
__ bind(&stack_overflow);
{
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index 34da70ff0f..7ae635b0c1 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -150,13 +150,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ LoadP(ip, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
- __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(ip);
-}
-
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
@@ -179,8 +172,9 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Pop(r3, r4, r6);
__ SmiUntag(r3);
}
- __ addi(ip, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
+ __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r5);
}
namespace {
@@ -293,7 +287,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r7, SharedFunctionInfo::kDerivedConstructorMask, r0);
+ __ TestBitMask(r7, SharedFunctionInfo::IsDerivedConstructorBit::kMask, r0);
__ bne(&not_create_implicit_receiver, cr0);
// If not derived class constructor: Allocate the new receiver object.
@@ -420,7 +414,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ LoadP(r7, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ LoadP(r7, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r7, SharedFunctionInfo::kClassConstructorMask, r0);
+ __ TestBitMask(r7, SharedFunctionInfo::IsClassConstructorBit::kMask, r0);
__ beq(&use_receiver, cr0);
} else {
@@ -563,9 +557,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// undefined because generator functions are non-constructable.
__ mr(r6, r4);
__ mr(r4, r7);
- __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeOffset));
- __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
+ __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r5);
}
__ bind(&prepare_step_in_if_stepping);
@@ -827,10 +822,11 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
- __ addi(optimized_code_entry, optimized_code_entry,
+ static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
+ __ addi(r5, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
- __ mr(ip, optimized_code_entry);
- __ Jump(optimized_code_entry);
+ __ mr(ip, r5);
+ __ Jump(r5);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
@@ -844,10 +840,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
// Advance the current bytecode offset. This simulates what all bytecode
-// handlers do upon completion of the underlying operation.
-static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
- Register bytecode_offset, Register bytecode,
- Register scratch1) {
+// handlers do upon completion of the underlying operation. Will bail out to a
+// label if the bytecode (without prefix) is a return bytecode.
+static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
+ Register bytecode_array,
+ Register bytecode_offset,
+ Register bytecode, Register scratch1,
+ Label* if_return) {
Register bytecode_size_table = scratch1;
Register scratch2 = bytecode;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
@@ -857,11 +856,11 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
- Label load_size, extra_wide;
+ Label process_bytecode, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
__ cmpi(bytecode, Operand(0x1));
- __ bgt(&load_size);
+ __ bgt(&process_bytecode);
__ beq(&extra_wide);
// Load the next bytecode and update table to the wide scaled table.
@@ -869,7 +868,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ lbzx(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ addi(bytecode_size_table, bytecode_size_table,
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ b(&load_size);
+ __ b(&process_bytecode);
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
@@ -879,7 +878,17 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
// Load the size of the current bytecode.
- __ bind(&load_size);
+ __ bind(&process_bytecode);
+
+// Bailout to the return label if this is a return bytecode.
+#define JUMP_IF_EQUAL(NAME) \
+ __ cmpi(bytecode, \
+ Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
+ __ beq(if_return);
+ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
+#undef JUMP_IF_EQUAL
+
+ // Otherwise, load the size of the current bytecode and advance the offset.
__ ShiftLeftImm(scratch2, bytecode, Operand(2));
__ lwzx(scratch2, MemOperand(bytecode_size_table, scratch2));
__ add(bytecode_offset, bytecode_offset, scratch2);
@@ -908,7 +917,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load the feedback vector from the closure.
__ LoadP(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadP(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
@@ -1021,11 +1030,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ mov(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
- __ lbzx(r4, MemOperand(kInterpreterBytecodeArrayRegister,
+ __ lbzx(r6, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ ShiftLeftImm(ip, r4, Operand(kPointerSizeLog2));
- __ LoadPX(ip, MemOperand(kInterpreterDispatchTableRegister, ip));
- __ Call(ip);
+ __ ShiftLeftImm(r6, r6, Operand(kPointerSizeLog2));
+ __ LoadPX(kJavaScriptCallCodeStartRegister,
+ MemOperand(kInterpreterDispatchTableRegister, r6));
+ __ Call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
@@ -1039,16 +1049,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
- // Check if we should return.
+ // Either return, or advance to the next bytecode and dispatch.
Label do_return;
__ lbzx(r4, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ cmpi(r4, Operand(static_cast<int>(interpreter::Bytecode::kReturn)));
- __ beq(&do_return);
-
- // Advance to the next bytecode and dispatch.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, r4, r5);
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, r4, r5,
+ &do_return);
__ b(&do_dispatch);
__ bind(&do_return);
@@ -1251,11 +1258,12 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
- __ lbzx(r4, MemOperand(kInterpreterBytecodeArrayRegister,
+ __ lbzx(ip, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ ShiftLeftImm(ip, r4, Operand(kPointerSizeLog2));
- __ LoadPX(ip, MemOperand(kInterpreterDispatchTableRegister, ip));
- __ Jump(ip);
+ __ ShiftLeftImm(ip, ip, Operand(kPointerSizeLog2));
+ __ LoadPX(kJavaScriptCallCodeStartRegister,
+ MemOperand(kInterpreterDispatchTableRegister, ip));
+ __ Jump(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
@@ -1271,8 +1279,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister));
// Advance to the next bytecode.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, r4, r5);
+ Label if_return;
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, r4, r5,
+ &if_return);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(r5, kInterpreterBytecodeOffsetRegister);
@@ -1280,6 +1290,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
+
+ // We should never take the if_return path.
+ __ bind(&if_return);
+ __ Abort(AbortReason::kInvalidBytecodeAdvance);
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1297,7 +1311,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// Get the feedback vector.
Register feedback_vector = r5;
__ LoadP(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadP(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
@@ -1311,7 +1325,11 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r7, r9, r8);
// Otherwise, tail call the SFI code.
- GenerateTailCallToSharedCode(masm);
+ static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kCodeOffset));
+ __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r5);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
@@ -1340,7 +1358,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Do we have a valid feedback vector?
__ LoadP(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadP(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
@@ -1524,9 +1542,10 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
}
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
- __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeOffset));
- __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
+ __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r5);
}
namespace {
@@ -2051,7 +2070,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r6, SharedFunctionInfo::kClassConstructorMask, r0);
+ __ TestBitMask(r6, SharedFunctionInfo::IsClassConstructorBit::kMask, r0);
__ bne(&class_constructor, cr0);
// Enter the context of the function; ToObject has to run in the function
@@ -2443,8 +2462,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label invoke, dont_adapt_arguments, stack_overflow;
Label enough, too_few;
- __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeOffset));
- __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ cmp(r3, r5);
__ blt(&too_few);
__ cmpi(r5, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
@@ -2460,7 +2477,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r4: function
// r5: expected number of arguments
// r6: new target (passed through to callee)
- // ip: code entry to call
__ SmiToPtrArrayOffset(r3, r3);
__ add(r3, r3, fp);
// adjust for return address and receiver
@@ -2474,7 +2490,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r5: expected number of arguments
// r6: new target (passed through to callee)
// r7: copy end address
- // ip: code entry to call
Label copy;
__ bind(&copy);
@@ -2498,7 +2513,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r4: function
// r5: expected number of arguments
// r6: new target (passed through to callee)
- // ip: code entry to call
__ SmiToPtrArrayOffset(r3, r3);
__ add(r3, r3, fp);
@@ -2507,7 +2521,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r4: function
// r5: expected number of arguments
// r6: new target (passed through to callee)
- // ip: code entry to call
Label copy;
__ bind(&copy);
// Adjust load for return address and receiver.
@@ -2521,7 +2534,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r4: function
// r5: expected number of arguments
// r6: new target (passed through to callee)
- // ip: code entry to call
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ ShiftLeftImm(r7, r5, Operand(kPointerSizeLog2));
__ sub(r7, fp, r7);
@@ -2543,7 +2555,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r3 : expected number of arguments
// r4 : function (passed through to callee)
// r6 : new target (passed through to callee)
- __ CallJSEntry(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
+ __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ CallJSEntry(r5);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -2556,7 +2571,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
- __ JumpToJSEntry(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
+ __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r5);
__ bind(&stack_overflow);
{
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index 020b04b91d..9d7bc3fb80 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -150,13 +150,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ LoadP(ip, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
- __ AddP(ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(ip);
-}
-
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
@@ -179,8 +172,9 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Pop(r2, r3, r5);
__ SmiUntag(r2);
}
- __ AddP(ip, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
+ __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r4);
}
namespace {
@@ -288,7 +282,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadlW(r6,
FieldMemOperand(r6, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r6, SharedFunctionInfo::kDerivedConstructorMask, r0);
+ __ TestBitMask(r6, SharedFunctionInfo::IsDerivedConstructorBit::kMask, r0);
__ bne(&not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
@@ -414,7 +408,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ LoadP(r6, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
__ LoadlW(r6,
FieldMemOperand(r6, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r6, SharedFunctionInfo::kClassConstructorMask, r0);
+ __ TestBitMask(r6, SharedFunctionInfo::IsClassConstructorBit::kMask, r0);
__ beq(&use_receiver);
} else {
__ b(&use_receiver);
@@ -558,9 +552,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// undefined because generator functions are non-constructable.
__ LoadRR(r5, r3);
__ LoadRR(r3, r6);
- __ LoadP(ip, FieldMemOperand(r3, JSFunction::kCodeOffset));
- __ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
+ __ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
+ __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r4);
}
__ bind(&prepare_step_in_if_stepping);
@@ -830,9 +825,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
- __ AddP(optimized_code_entry, optimized_code_entry,
+ static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
+ __ AddP(r4, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(optimized_code_entry);
+ __ Jump(r4);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
@@ -846,10 +842,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
// Advance the current bytecode offset. This simulates what all bytecode
-// handlers do upon completion of the underlying operation.
-static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
- Register bytecode_offset, Register bytecode,
- Register scratch1) {
+// handlers do upon completion of the underlying operation. Will bail out to a
+// label if the bytecode (without prefix) is a return bytecode.
+static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
+ Register bytecode_array,
+ Register bytecode_offset,
+ Register bytecode, Register scratch1,
+ Label* if_return) {
Register bytecode_size_table = scratch1;
Register scratch2 = bytecode;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
@@ -859,11 +858,11 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
- Label load_size, extra_wide;
+ Label process_bytecode, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
__ CmpP(bytecode, Operand(0x1));
- __ bgt(&load_size);
+ __ bgt(&process_bytecode);
__ beq(&extra_wide);
// Load the next bytecode and update table to the wide scaled table.
@@ -871,7 +870,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ LoadlB(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ AddP(bytecode_size_table, bytecode_size_table,
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ b(&load_size);
+ __ b(&process_bytecode);
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
@@ -881,7 +880,17 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
// Load the size of the current bytecode.
- __ bind(&load_size);
+ __ bind(&process_bytecode);
+
+// Bailout to the return label if this is a return bytecode.
+#define JUMP_IF_EQUAL(NAME) \
+ __ CmpP(bytecode, \
+ Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
+ __ beq(if_return);
+ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
+#undef JUMP_IF_EQUAL
+
+ // Otherwise, load the size of the current bytecode and advance the offset.
__ ShiftLeftP(scratch2, bytecode, Operand(2));
__ LoadlW(scratch2, MemOperand(bytecode_size_table, scratch2));
__ AddP(bytecode_offset, bytecode_offset, scratch2);
@@ -911,7 +920,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load the feedback vector from the closure.
__ LoadP(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadP(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
@@ -1020,11 +1029,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
- __ LoadlB(r3, MemOperand(kInterpreterBytecodeArrayRegister,
+ __ LoadlB(r5, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ ShiftLeftP(ip, r3, Operand(kPointerSizeLog2));
- __ LoadP(ip, MemOperand(kInterpreterDispatchTableRegister, ip));
- __ Call(ip);
+ __ ShiftLeftP(r5, r5, Operand(kPointerSizeLog2));
+ __ LoadP(kJavaScriptCallCodeStartRegister,
+ MemOperand(kInterpreterDispatchTableRegister, r5));
+ __ Call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
@@ -1038,16 +1048,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
- // Check if we should return.
+ // Either return, or advance to the next bytecode and dispatch.
Label do_return;
__ LoadlB(r3, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ CmpP(r3, Operand(static_cast<int>(interpreter::Bytecode::kReturn)));
- __ beq(&do_return);
-
- // Advance to the next bytecode and dispatch.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, r3, r4);
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, r3, r4,
+ &do_return);
__ b(&do_dispatch);
__ bind(&do_return);
@@ -1248,11 +1255,12 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
- __ LoadlB(r3, MemOperand(kInterpreterBytecodeArrayRegister,
+ __ LoadlB(ip, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ ShiftLeftP(ip, r3, Operand(kPointerSizeLog2));
- __ LoadP(ip, MemOperand(kInterpreterDispatchTableRegister, ip));
- __ Jump(ip);
+ __ ShiftLeftP(ip, ip, Operand(kPointerSizeLog2));
+ __ LoadP(kJavaScriptCallCodeStartRegister,
+ MemOperand(kInterpreterDispatchTableRegister, ip));
+ __ Jump(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
@@ -1268,8 +1276,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister));
// Advance to the next bytecode.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, r3, r4);
+ Label if_return;
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, r3, r4,
+ &if_return);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(r4, kInterpreterBytecodeOffsetRegister);
@@ -1277,6 +1287,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
+
+ // We should never take the if_return path.
+ __ bind(&if_return);
+ __ Abort(AbortReason::kInvalidBytecodeAdvance);
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1294,7 +1308,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// Get the feedback vector.
Register feedback_vector = r4;
__ LoadP(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadP(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
@@ -1308,7 +1322,11 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7);
// Otherwise, tail call the SFI code.
- GenerateTailCallToSharedCode(masm);
+ static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
+ __ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r4, FieldMemOperand(r4, SharedFunctionInfo::kCodeOffset));
+ __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r4);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
@@ -1337,7 +1355,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Do we have a valid feedback vector?
__ LoadP(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadP(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
@@ -1520,9 +1538,10 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
}
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
- __ LoadP(ip, FieldMemOperand(r3, JSFunction::kCodeOffset));
- __ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
+ __ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
+ __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r4);
}
namespace {
@@ -2048,7 +2067,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadlW(r5, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r5, SharedFunctionInfo::kClassConstructorMask, r0);
+ __ TestBitMask(r5, SharedFunctionInfo::IsClassConstructorBit::kMask, r0);
__ bne(&class_constructor);
// Enter the context of the function; ToObject has to run in the function
@@ -2442,8 +2461,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label invoke, dont_adapt_arguments, stack_overflow;
Label enough, too_few;
- __ LoadP(ip, FieldMemOperand(r3, JSFunction::kCodeOffset));
- __ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ CmpP(r2, r4);
__ blt(&too_few);
__ CmpP(r4, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
@@ -2459,7 +2476,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r3: function
// r4: expected number of arguments
// r5: new target (passed through to callee)
- // ip: code entry to call
__ SmiToPtrArrayOffset(r2, r2);
__ AddP(r2, fp);
// adjust for return address and receiver
@@ -2473,7 +2489,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r4: expected number of arguments
// r5: new target (passed through to callee)
// r6: copy end address
- // ip: code entry to call
Label copy;
__ bind(&copy);
@@ -2497,7 +2512,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r3: function
// r4: expected number of arguments
// r5: new target (passed through to callee)
- // ip: code entry to call
__ SmiToPtrArrayOffset(r2, r2);
__ lay(r2, MemOperand(r2, fp));
@@ -2506,7 +2520,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r3: function
// r4: expected number of arguments
// r5: new target (passed through to callee)
- // ip: code entry to call
Label copy;
__ bind(&copy);
// Adjust load for return address and receiver.
@@ -2519,7 +2532,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Fill the remaining expected arguments with undefined.
// r3: function
// r4: expected number of argumentus
- // ip: code entry to call
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ ShiftLeftP(r6, r4, Operand(kPointerSizeLog2));
__ SubP(r6, fp, r6);
@@ -2541,7 +2553,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r2 : expected number of arguments
// r3 : function (passed through to callee)
// r5 : new target (passed through to callee)
- __ CallJSEntry(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
+ __ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
+ __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ CallJSEntry(r4);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -2554,7 +2569,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
- __ JumpToJSEntry(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
+ __ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
+ __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r4);
__ bind(&stack_overflow);
{
diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc
index 5a09658867..d30cd02ab5 100644
--- a/deps/v8/src/builtins/setup-builtins-internal.cc
+++ b/deps/v8/src/builtins/setup-builtins-internal.cc
@@ -186,7 +186,7 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
if (!target->is_builtin()) continue;
Code* new_target =
Code::cast(builtins->builtins_[target->builtin_index()]);
- rinfo->set_target_address(isolate, new_target->instruction_start(),
+ rinfo->set_target_address(new_target->instruction_start(),
UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
} else {
DCHECK(RelocInfo::IsEmbeddedObject(rinfo->rmode()));
@@ -202,7 +202,7 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
flush_icache = true;
}
if (flush_icache) {
- Assembler::FlushICache(isolate, code->instruction_start(),
+ Assembler::FlushICache(code->instruction_start(),
code->instruction_size());
}
}
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index cd35abb362..898fe9c14c 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -87,15 +87,6 @@ void Builtins::Generate_AdaptorWithBuiltinExitFrame(MacroAssembler* masm) {
AdaptorWithExitFrameType(masm, BUILTIN_EXIT);
}
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ movp(kScratchRegister,
- FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movp(kScratchRegister,
- FieldOperand(kScratchRegister, SharedFunctionInfo::kCodeOffset));
- __ leap(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
- __ jmp(kScratchRegister);
-}
-
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
@@ -115,7 +106,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Push(rdi);
__ CallRuntime(function_id, 1);
- __ movp(rbx, rax);
+ __ movp(rcx, rax);
// Restore target function and new target.
__ Pop(rdx);
@@ -123,8 +114,9 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Pop(rax);
__ SmiToInteger32(rax, rax);
}
- __ leap(rbx, FieldOperand(rbx, Code::kHeaderSize));
- __ jmp(rbx);
+ static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
+ __ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
+ __ jmp(rcx);
}
namespace {
@@ -230,7 +222,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ movp(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ testl(FieldOperand(rbx, SharedFunctionInfo::kCompilerHintsOffset),
- Immediate(SharedFunctionInfo::kDerivedConstructorMask));
+ Immediate(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
__ j(not_zero, &not_create_implicit_receiver, Label::kNear);
// If not derived class constructor: Allocate the new receiver object.
@@ -350,7 +342,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ movp(rbx, Operand(rbp, ConstructFrameConstants::kConstructorOffset));
__ movp(rbx, FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset));
__ testl(FieldOperand(rbx, SharedFunctionInfo::kCompilerHintsOffset),
- Immediate(SharedFunctionInfo::kClassConstructorMask));
+ Immediate(SharedFunctionInfo::IsClassConstructorBit::kMask));
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
@@ -660,6 +652,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
+ static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ movp(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
__ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(rcx);
@@ -820,9 +813,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
- __ addp(optimized_code_entry,
- Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(optimized_code_entry);
+ static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
+ __ Move(rcx, optimized_code_entry);
+ __ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(rcx);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
@@ -836,10 +830,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
// Advance the current bytecode offset. This simulates what all bytecode
-// handlers do upon completion of the underlying operation.
-static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
- Register bytecode_offset, Register bytecode,
- Register scratch1) {
+// handlers do upon completion of the underlying operation. Will bail out to a
+// label if the bytecode (without prefix) is a return bytecode.
+static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
+ Register bytecode_array,
+ Register bytecode_offset,
+ Register bytecode, Register scratch1,
+ Label* if_return) {
Register bytecode_size_table = scratch1;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
@@ -848,11 +845,11 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
ExternalReference::bytecode_size_table_address(masm->isolate()));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
- Label load_size, extra_wide;
+ Label process_bytecode, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
__ cmpb(bytecode, Immediate(0x1));
- __ j(above, &load_size, Label::kNear);
+ __ j(above, &process_bytecode, Label::kNear);
__ j(equal, &extra_wide, Label::kNear);
// Load the next bytecode and update table to the wide scaled table.
@@ -860,7 +857,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ movzxbp(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
__ addp(bytecode_size_table,
Immediate(kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ jmp(&load_size, Label::kNear);
+ __ jmp(&process_bytecode, Label::kNear);
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
@@ -869,8 +866,17 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ addp(bytecode_size_table,
Immediate(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- // Load the size of the current bytecode.
- __ bind(&load_size);
+ __ bind(&process_bytecode);
+
+// Bailout to the return label if this is a return bytecode.
+#define JUMP_IF_EQUAL(NAME) \
+ __ cmpb(bytecode, \
+ Immediate(static_cast<int>(interpreter::Bytecode::k##NAME))); \
+ __ j(equal, if_return, Label::kNear);
+ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
+#undef JUMP_IF_EQUAL
+
+ // Otherwise, load the size of the current bytecode and advance the offset.
__ addl(bytecode_offset, Operand(bytecode_size_table, bytecode, times_4, 0));
}
@@ -896,7 +902,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load the feedback vector from the closure.
__ movp(feedback_vector,
- FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ movp(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
@@ -1000,11 +1006,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Move(
kInterpreterDispatchTableRegister,
ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
- __ movzxbp(rbx, Operand(kInterpreterBytecodeArrayRegister,
+ __ movzxbp(r11, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ movp(rbx, Operand(kInterpreterDispatchTableRegister, rbx,
- times_pointer_size, 0));
- __ call(rbx);
+ __ movp(
+ kJavaScriptCallCodeStartRegister,
+ Operand(kInterpreterDispatchTableRegister, r11, times_pointer_size, 0));
+ __ call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are either due to the return bytecode
@@ -1018,16 +1025,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ SmiToInteger32(kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeOffsetRegister);
- // Check if we should return.
+ // Either return, or advance to the next bytecode and dispatch.
Label do_return;
__ movzxbp(rbx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ cmpb(rbx, Immediate(static_cast<int>(interpreter::Bytecode::kReturn)));
- __ j(equal, &do_return, Label::kNear);
-
- // Advance to the next bytecode and dispatch.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, rbx, rcx);
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, rbx, rcx,
+ &do_return);
__ jmp(&do_dispatch);
__ bind(&do_return);
@@ -1200,6 +1204,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
+ // TODO(jgruber,v8:6666): Update logic once builtin is off-heap-safe.
+ DCHECK(!Builtins::IsOffHeapSafe(Builtins::kInterpreterEntryTrampoline));
Smi* interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
@@ -1234,11 +1240,12 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
- __ movzxbp(rbx, Operand(kInterpreterBytecodeArrayRegister,
+ __ movzxbp(r11, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ movp(rbx, Operand(kInterpreterDispatchTableRegister, rbx,
- times_pointer_size, 0));
- __ jmp(rbx);
+ __ movp(
+ kJavaScriptCallCodeStartRegister,
+ Operand(kInterpreterDispatchTableRegister, r11, times_pointer_size, 0));
+ __ jmp(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
@@ -1255,14 +1262,20 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister, times_1, 0));
// Advance to the next bytecode.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, rbx, rcx);
+ Label if_return;
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, rbx, rcx,
+ &if_return);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ Integer32ToSmi(rbx, kInterpreterBytecodeOffsetRegister);
__ movp(Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp), rbx);
Generate_InterpreterEnterBytecode(masm);
+
+ // We should never take the if_return path.
+ __ bind(&if_return);
+ __ Abort(AbortReason::kInvalidBytecodeAdvance);
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1280,7 +1293,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// Get the feedback vector.
Register feedback_vector = rbx;
__ movp(feedback_vector,
- FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ movp(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
@@ -1293,7 +1306,11 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, rcx, r14, r15);
// Otherwise, tail call the SFI code.
- GenerateTailCallToSharedCode(masm);
+ static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kCodeOffset));
+ __ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
+ __ jmp(rcx);
}
// TODO(jupvfranco): investigate whether there is any case where the CompileLazy
@@ -1325,7 +1342,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Do we have a valid feedback vector?
__ movp(feedback_vector,
- FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ movp(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
@@ -2015,6 +2032,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// rax : expected number of arguments
// rdx : new target (passed through to callee)
// rdi : function (passed through to callee)
+ static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ movp(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
__ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(rcx);
@@ -2030,6 +2048,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
+ static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ movp(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
__ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(rcx);
@@ -2196,7 +2215,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ testl(FieldOperand(rdx, SharedFunctionInfo::kCompilerHintsOffset),
- Immediate(SharedFunctionInfo::kClassConstructorMask));
+ Immediate(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ j(not_zero, &class_constructor);
// ----------- S t a t e -------------