summaryrefslogtreecommitdiff
path: root/deps/v8/src/builtins
diff options
context:
space:
mode:
authorMichaƫl Zasso <targos@protonmail.com>2018-05-31 11:11:57 +0200
committerMyles Borins <mylesborins@google.com>2018-06-01 09:58:27 +0200
commit352a525eb984b8fa2d6f0f6fd68395e6a080bba4 (patch)
treea105ae93f8fd8f533cce19a429f1b6e95d6e11ca /deps/v8/src/builtins
parentfaf449ca0490f5371dc6cbbc94a87eb697b00fcc (diff)
downloadandroid-node-v8-352a525eb984b8fa2d6f0f6fd68395e6a080bba4.tar.gz
android-node-v8-352a525eb984b8fa2d6f0f6fd68395e6a080bba4.tar.bz2
android-node-v8-352a525eb984b8fa2d6f0f6fd68395e6a080bba4.zip
deps: update V8 to 6.7.288.43
PR-URL: https://github.com/nodejs/node/pull/19989 Reviewed-By: James M Snell <jasnell@gmail.com> Reviewed-By: Matheus Marchini <matheus@sthima.com> Reviewed-By: Gus Caplan <me@gus.host> Reviewed-By: Myles Borins <myles.borins@gmail.com>
Diffstat (limited to 'deps/v8/src/builtins')
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc319
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc315
-rw-r--r--deps/v8/src/builtins/builtins-api.cc67
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.cc1567
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.h19
-rw-r--r--deps/v8/src/builtins/builtins-array.cc84
-rw-r--r--deps/v8/src/builtins/builtins-arraybuffer.cc41
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.cc4
-rw-r--r--deps/v8/src/builtins/builtins-async-generator-gen.cc80
-rw-r--r--deps/v8/src/builtins/builtins-bigint.cc39
-rw-r--r--deps/v8/src/builtins/builtins-boolean.cc37
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.cc137
-rw-r--r--deps/v8/src/builtins/builtins-collections-gen.cc63
-rw-r--r--deps/v8/src/builtins/builtins-console.cc4
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.cc246
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.h3
-rw-r--r--deps/v8/src/builtins/builtins-conversion-gen.cc10
-rw-r--r--deps/v8/src/builtins/builtins-dataview.cc176
-rw-r--r--deps/v8/src/builtins/builtins-date.cc155
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h76
-rw-r--r--deps/v8/src/builtins/builtins-function-gen.cc28
-rw-r--r--deps/v8/src/builtins/builtins-handler-gen.cc9
-rw-r--r--deps/v8/src/builtins/builtins-ic-gen.cc1
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc64
-rw-r--r--deps/v8/src/builtins/builtins-interpreter-gen.cc17
-rw-r--r--deps/v8/src/builtins/builtins-interpreter.cc17
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.cc51
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.cc39
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.cc489
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.h32
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.cc102
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc396
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.h13
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer.cc6
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.cc146
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.h7
-rw-r--r--deps/v8/src/builtins/builtins-string.cc4
-rw-r--r--deps/v8/src/builtins/builtins-symbol.cc32
-rw-r--r--deps/v8/src/builtins/builtins-typedarray-gen.cc147
-rw-r--r--deps/v8/src/builtins/builtins-utils.h12
-rw-r--r--deps/v8/src/builtins/builtins.cc820
-rw-r--r--deps/v8/src/builtins/builtins.h30
-rw-r--r--deps/v8/src/builtins/constants-table-builder.cc3
-rw-r--r--deps/v8/src/builtins/constants-table-builder.h4
-rw-r--r--deps/v8/src/builtins/growable-fixed-array-gen.cc4
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc316
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc312
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc313
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc322
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc310
-rw-r--r--deps/v8/src/builtins/setup-builtins-internal.cc25
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc313
52 files changed, 4308 insertions, 3518 deletions
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index 329fee575f..de372a6453 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -88,19 +88,6 @@ void Builtins::Generate_AdaptorWithBuiltinExitFrame(MacroAssembler* masm) {
AdaptorWithExitFrameType(masm, BUILTIN_EXIT);
}
-// Load the built-in InternalArray function from the current context.
-static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
- Register result) {
- // Load the InternalArray function from the current native context.
- __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
-}
-
-// Load the built-in Array function from the current context.
-static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the Array function from the current native context.
- __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
-}
-
void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
@@ -109,9 +96,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// -----------------------------------
Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
- // Get the InternalArray function.
- GenerateLoadInternalArrayFunction(masm, r1);
-
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
@@ -124,6 +108,7 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
// tail call a stub
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
InternalArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -131,27 +116,30 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
+ // -- r1 : array function
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
- // Get the Array function.
- GenerateLoadArrayFunction(masm, r1);
-
if (FLAG_debug_code) {
// Initial map for the builtin Array functions should be maps.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ SmiTst(r2);
+ __ ldr(r7, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ SmiTst(r7);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction);
- __ CompareObjectType(r2, r3, r4, MAP_TYPE);
+ __ CompareObjectType(r7, r8, r9, MAP_TYPE);
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
- __ mov(r3, r1);
+ // r2 is the AllocationSite - here undefined.
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ // If r3 (new target) is undefined, then this is the 'Call' case, so move
+ // r1 (the constructor) to r3.
+ __ cmp(r3, r2);
+ __ mov(r3, r1, LeaveCC, eq);
+
// Run the native code for the Array function called as a normal function.
// tail call a stub
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -290,7 +278,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -----------------------------------
__ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
+ __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
__ tst(r4, Operand(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
__ b(ne, &not_create_implicit_receiver);
@@ -410,7 +398,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ bind(&other_result);
__ ldr(r4, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ ldr(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
+ __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
__ tst(r4, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
if (restrict_constructor_return) {
@@ -453,13 +441,23 @@ void Builtins::Generate_JSConstructStubGenericUnrestrictedReturn(
MacroAssembler* masm) {
Generate_JSConstructStubGeneric(masm, false);
}
-void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSBuiltinsConstructStubHelper(masm);
-}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
+static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
+ Register sfi_data,
+ Register scratch1) {
+ Label done;
+
+ __ CompareObjectType(sfi_data, scratch1, scratch1, INTERPRETER_DATA_TYPE);
+ __ b(ne, &done);
+ __ ldr(sfi_data,
+ FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
+
+ __ bind(&done);
+}
+
// static
void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -539,6 +537,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, r3, r0);
__ CompareObjectType(r3, r3, r3, BYTECODE_ARRAY_TYPE);
__ Assert(eq, AbortReason::kMissingBytecodeArray);
}
@@ -752,7 +751,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
DCHECK(
!AreAliased(feedback_vector, r0, r1, r3, scratch1, scratch2, scratch3));
- Label optimized_code_slot_is_cell, fallthrough;
+ Label optimized_code_slot_is_weak_ref, fallthrough;
Register closure = r1;
Register optimized_code_entry = scratch1;
@@ -762,9 +761,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
- // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // optimisation marker. Otherwise, interpret it as a weak reference to a code
// object.
- __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
{
// Optimized code slot is a Smi optimization marker.
@@ -799,12 +798,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
{
- // Optimized code slot is a WeakCell.
- __ bind(&optimized_code_slot_is_cell);
+ // Optimized code slot is a weak reference.
+ __ bind(&optimized_code_slot_is_weak_ref);
- __ ldr(optimized_code_entry,
- FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
- __ JumpIfSmi(optimized_code_entry, &fallthrough);
+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
@@ -936,6 +933,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ ldr(r0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r0, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, r4);
__ ldr(r4, FieldMemOperand(r0, SharedFunctionInfo::kDebugInfoOffset));
__ SmiTst(r4);
__ b(ne, &maybe_load_debug_bytecode_array);
@@ -1055,11 +1053,31 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// kInterpreterBytecodeArrayRegister is already loaded with
// SharedFunctionInfo::kFunctionDataOffset.
__ bind(&maybe_load_debug_bytecode_array);
+ __ ldr(r9, FieldMemOperand(r4, DebugInfo::kDebugBytecodeArrayOffset), ne);
+ __ JumpIfRoot(r9, Heap::kUndefinedValueRootIndex, &bytecode_array_loaded);
+
+ __ mov(kInterpreterBytecodeArrayRegister, r9);
__ ldr(r9, FieldMemOperand(r4, DebugInfo::kFlagsOffset));
__ SmiUntag(r9);
- __ tst(r9, Operand(DebugInfo::kHasBreakInfo));
- __ ldr(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(r4, DebugInfo::kDebugBytecodeArrayOffset), ne);
+ __ And(r9, r9, Operand(DebugInfo::kDebugExecutionMode));
+
+ ExternalReference debug_execution_mode =
+ ExternalReference::debug_execution_mode_address(masm->isolate());
+ __ mov(r4, Operand(debug_execution_mode));
+ __ ldrsb(r4, MemOperand(r4));
+ STATIC_ASSERT(static_cast<int>(DebugInfo::kDebugExecutionMode) ==
+ static_cast<int>(DebugInfo::kSideEffects));
+ __ cmp(r4, r9);
+ __ b(eq, &bytecode_array_loaded);
+
+ __ push(closure);
+ __ push(feedback_vector);
+ __ push(kInterpreterBytecodeArrayRegister);
+ __ push(closure);
+ __ CallRuntime(Runtime::kDebugApplyInstrumentation);
+ __ pop(kInterpreterBytecodeArrayRegister);
+ __ pop(feedback_vector);
+ __ pop(closure);
__ b(&bytecode_array_loaded);
}
@@ -1085,6 +1103,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
InterpreterPushArgsMode mode) {
+ DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
// -- r2 : the address of the first argument to be pushed. Subsequent
@@ -1113,11 +1132,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
}
// Call the target.
- if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(
- masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny),
- RelocInfo::CODE_TARGET);
- } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
RelocInfo::CODE_TARGET);
} else {
@@ -1161,15 +1176,13 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
__ AssertUndefinedOrAllocationSite(r2, r5);
}
- if (mode == InterpreterPushArgsMode::kJSFunction) {
+ if (mode == InterpreterPushArgsMode::kArrayFunction) {
__ AssertFunction(r1);
- // Tail call to the function-specific construct stub (still in the caller
+ // Tail call to the array construct stub (still in the caller
// context at this point).
- __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kConstructStubOffset));
- // Jump to the construct function.
- __ add(pc, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ ArrayConstructorStub array_constructor_stub(masm->isolate());
+ __ Jump(array_constructor_stub.GetCode(), RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with r0, r1, and r3 unmodified.
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
@@ -1191,10 +1204,29 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
+ Label builtin_trampoline, trampoline_loaded;
Smi* interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
+
+ // If the SFI function_data is an InterpreterData, get the trampoline stored
+ // in it, otherwise get the trampoline from the builtins list.
+ __ ldr(r2, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ ldr(r2, FieldMemOperand(r2, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kFunctionDataOffset));
+ __ CompareObjectType(r2, kInterpreterDispatchTableRegister,
+ kInterpreterDispatchTableRegister,
+ INTERPRETER_DATA_TYPE);
+ __ b(ne, &builtin_trampoline);
+
+ __ ldr(r2,
+ FieldMemOperand(r2, InterpreterData::kInterpreterTrampolineOffset));
+ __ b(&trampoline_loaded);
+
+ __ bind(&builtin_trampoline);
__ Move(r2, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+
+ __ bind(&trampoline_loaded);
__ add(lr, r2, Operand(interpreter_entry_return_pc_offset->value() +
Code::kHeaderSize - kHeapObjectTag));
@@ -1267,42 +1299,9 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
-void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : argument count (preserved for callee)
- // -- r3 : new target (preserved for callee)
- // -- r1 : target function (preserved for callee)
- // -----------------------------------
- Register closure = r1;
-
- // Get the feedback vector.
- Register feedback_vector = r2;
- __ ldr(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
-
- // The feedback vector must be defined.
- if (FLAG_debug_code) {
- __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
- __ Assert(ne, AbortReason::kExpectedFeedbackVector);
- }
-
- // Is there an optimization marker or optimized code in the feedback vector?
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6, r5);
-
- // Otherwise, tail call the SFI code.
- static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
- __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(r2);
-}
-
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
- // Set the code slot inside the JSFunction to the trampoline to the
- // interpreter entry.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
+ // Set the code slot inside the JSFunction to CompileLazy.
+ __ Move(r2, BUILTIN_CODE(masm->isolate(), CompileLazy));
__ str(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
__ RecordWriteField(r1, JSFunction::kCodeOffset, r2, r4, kLRHasNotBeenSaved,
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
@@ -1310,6 +1309,76 @@ void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
Generate_CompileLazy(masm);
}
+static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data,
+ Register scratch1) {
+ // Figure out the SFI's code object.
+ Label done;
+ Label check_is_bytecode_array;
+ Label check_is_code;
+ Label check_is_fixed_array;
+ Label check_is_pre_parsed_scope_data;
+ Label check_is_function_template_info;
+ Label check_is_interpreter_data;
+
+ Register data_type = scratch1;
+
+ // IsSmi: Is builtin
+ __ JumpIfNotSmi(sfi_data, &check_is_bytecode_array);
+ __ Move(scratch1,
+ Operand(ExternalReference::builtins_address(masm->isolate())));
+ __ ldr(sfi_data, MemOperand::PointerAddressFromSmiKey(scratch1, sfi_data));
+ __ b(&done);
+
+ // Get map for subsequent checks.
+ __ bind(&check_is_bytecode_array);
+ __ ldr(data_type, FieldMemOperand(sfi_data, HeapObject::kMapOffset));
+ __ ldrh(data_type, FieldMemOperand(data_type, Map::kInstanceTypeOffset));
+
+ // IsBytecodeArray: Interpret bytecode
+ __ cmp(data_type, Operand(BYTECODE_ARRAY_TYPE));
+ __ b(ne, &check_is_code);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+ __ b(&done);
+
+ // IsCode: Run code
+ __ bind(&check_is_code);
+ __ cmp(data_type, Operand(CODE_TYPE));
+ __ b(eq, &done);
+
+ // IsFixedArray: Instantiate using AsmWasmData,
+ __ bind(&check_is_fixed_array);
+ __ cmp(data_type, Operand(FIXED_ARRAY_TYPE));
+ __ b(ne, &check_is_pre_parsed_scope_data);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InstantiateAsmJs));
+ __ b(&done);
+
+ // IsPreParsedScopeData: Compile lazy
+ __ bind(&check_is_pre_parsed_scope_data);
+ __ cmp(data_type, Operand(TUPLE2_TYPE));
+ __ b(ne, &check_is_function_template_info);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), CompileLazy));
+ __ b(&done);
+
+ // IsFunctionTemplateInfo: API call
+ __ bind(&check_is_function_template_info);
+ __ cmp(data_type, Operand(FUNCTION_TEMPLATE_INFO_TYPE));
+ __ b(ne, &check_is_interpreter_data);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
+ __ b(&done);
+
+ // IsInterpreterData: Interpret bytecode
+ __ bind(&check_is_interpreter_data);
+ if (FLAG_debug_code) {
+ __ cmp(data_type, Operand(INTERPRETER_DATA_TYPE));
+ __ Assert(eq, AbortReason::kInvalidSharedFunctionInfoData);
+ }
+ __ ldr(
+ sfi_data,
+ FieldMemOperand(sfi_data, InterpreterData::kInterpreterTrampolineOffset));
+
+ __ bind(&done);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argument count (preserved for callee)
@@ -1332,13 +1401,15 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6, r5);
- // We found no optimized code.
+ // We found no optimized code. Infer the code object needed for the SFI.
Register entry = r4;
__ ldr(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(entry,
+ FieldMemOperand(entry, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoCode(masm, entry, r5);
- // If SFI points to anything other than CompileLazy, install that.
- __ ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
+ // If code entry points to anything other than CompileLazy, install that.
__ Move(r5, masm->CodeObject());
__ cmp(entry, r5);
__ b(eq, &gotta_call_runtime);
@@ -1408,25 +1479,9 @@ void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
{
// If we've reached this spot, the target builtin has been deserialized and
- // we simply need to copy it over. First to the shared function info.
+ // we simply need to copy it over to the target function.
Register target_builtin = scratch1;
- Register shared = scratch0;
-
- __ ldr(shared,
- FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
-
- CHECK(r5 != target && r5 != scratch0 && r5 != scratch1);
- CHECK(r9 != target && r9 != scratch0 && r9 != scratch1);
-
- __ str(target_builtin,
- FieldMemOperand(shared, SharedFunctionInfo::kCodeOffset));
- __ mov(r9, target_builtin); // Write barrier clobbers r9 below.
- __ RecordWriteField(shared, SharedFunctionInfo::kCodeOffset, r9, r5,
- kLRHasNotBeenSaved, kDontSaveFPRegs,
- OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // And second to the target function.
__ str(target_builtin, FieldMemOperand(target, JSFunction::kCodeOffset));
__ mov(r9, target_builtin); // Write barrier clobbers r9 below.
@@ -1998,7 +2053,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Check that the function is not a "classConstructor".
Label class_constructor;
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kFlagsOffset));
__ tst(r3, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ b(ne, &class_constructor);
@@ -2008,7 +2063,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
- __ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kFlagsOffset));
__ tst(r3, Operand(SharedFunctionInfo::IsNativeBit::kMask |
SharedFunctionInfo::IsStrictBit::kMask));
__ b(ne, &done_convert);
@@ -2243,17 +2298,27 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// -- r1 : the constructor to call (checked to be a JSFunction)
// -- r3 : the new target (checked to be a constructor)
// -----------------------------------
+ __ AssertConstructor(r1);
__ AssertFunction(r1);
// Calling convention for function specific ConstructStubs require
// r2 to contain either an AllocationSite or undefined.
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- // Tail call to the function-specific construct stub (still in the caller
- // context at this point).
+ Label call_generic_stub;
+
+ // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
__ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kConstructStubOffset));
- __ add(pc, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
+ __ tst(r4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
+ __ b(eq, &call_generic_stub);
+
+ __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&call_generic_stub);
+ __ Jump(masm->isolate()->builtins()->JSConstructStubGeneric(),
+ RelocInfo::CODE_TARGET);
}
// static
@@ -2263,6 +2328,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// -- r1 : the function to call (checked to be a JSBoundFunction)
// -- r3 : the new target (checked to be a constructor)
// -----------------------------------
+ __ AssertConstructor(r1);
__ AssertBoundFunction(r1);
// Push the [[BoundArguments]] onto the stack.
@@ -2291,16 +2357,17 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
Label non_constructor, non_proxy;
__ JumpIfSmi(r1, &non_constructor);
- // Dispatch based on instance type.
- __ CompareObjectType(r1, r4, r5, JS_FUNCTION_TYPE);
- __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
- RelocInfo::CODE_TARGET, eq);
-
// Check if target has a [[Construct]] internal method.
+ __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r2, FieldMemOperand(r4, Map::kBitFieldOffset));
__ tst(r2, Operand(Map::IsConstructorBit::kMask));
__ b(eq, &non_constructor);
+ // Dispatch based on instance type.
+ __ CompareInstanceType(r4, r5, JS_FUNCTION_TYPE);
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
+ RelocInfo::CODE_TARGET, eq);
+
// Only dispatch to bound functions after checking whether they are
// constructors.
__ cmp(r5, Operand(JS_BOUND_FUNCTION_TYPE));
@@ -2503,28 +2570,34 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ auto wasm_instance_reg = r3; // TODO(titzer): put in a common place.
+
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- constexpr RegList gp_regs = Register::ListOf<r0, r1, r2, r3>();
+ constexpr RegList gp_regs = Register::ListOf<r0, r1, r2>();
constexpr DwVfpRegister lowest_fp_reg = d0;
constexpr DwVfpRegister highest_fp_reg = d7;
__ stm(db_w, sp, gp_regs);
__ vstm(db_w, sp, lowest_fp_reg, highest_fp_reg);
- // Initialize cp register with kZero, CEntryStub will use it to set the
- // current context on the isolate.
+ // Pass the WASM instance as an explicit argument to WasmCompileLazy.
+ __ push(wasm_instance_reg);
+ // Initialize the JavaScript context with 0. CEntryStub will use it to
+ // set the current context on the isolate.
__ Move(cp, Smi::kZero);
__ CallRuntime(Runtime::kWasmCompileLazy);
- // Store returned instruction start in r8.
- __ add(r8, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // The entrypoint address is the first return value.
+ __ mov(r8, kReturnRegister0);
+ // The WASM instance is the second return value.
+ __ mov(wasm_instance_reg, kReturnRegister1);
// Restore registers.
__ vldm(ia_w, sp, lowest_fp_reg, highest_fp_reg);
__ ldm(ia_w, sp, gp_regs);
}
- // Now jump to the instructions of the returned code object.
+ // Finally, jump to the entrypoint.
__ Jump(r8);
}
#undef __
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index f06969ec6b..aae189f19b 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -19,19 +19,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-// Load the built-in Array function from the current context.
-static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the InternalArray function from the native context.
- __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
-}
-
-// Load the built-in InternalArray function from the current context.
-static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
- Register result) {
- // Load the InternalArray function from the native context.
- __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
-}
-
void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
ExitFrameType exit_frame_type) {
__ Mov(x5, ExternalReference(address, masm->isolate()));
@@ -104,9 +91,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_InternalArrayConstructor");
Label generic_array_code;
- // Get the InternalArray function.
- GenerateLoadInternalArrayFunction(masm, x1);
-
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
__ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset));
@@ -118,6 +102,7 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
+ __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
InternalArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -125,15 +110,13 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : number of arguments
+ // -- x1 : array function
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
ASM_LOCATION("Builtins::Generate_ArrayConstructor");
Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
- // Get the Array function.
- GenerateLoadArrayFunction(masm, x1);
-
if (FLAG_debug_code) {
// Initial map for the builtin Array functions should be maps.
__ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset));
@@ -143,9 +126,14 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
- // Run the native code for the Array function called as a normal function.
+ // x2 is the AllocationSite - here undefined.
__ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
- __ Mov(x3, x1);
+ // If x3 (new target) is undefined, then this is the 'Call' case, so move
+ // x1 (the constructor) to x3.
+ __ Cmp(x3, x2);
+ __ CmovX(x3, x1, eq);
+
+ // Run the native code for the Array function called as a normal function.
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -325,7 +313,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -----------------------------------
__ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kCompilerHintsOffset));
+ __ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
__ TestAndBranchIfAnySet(w4,
SharedFunctionInfo::IsDerivedConstructorBit::kMask,
&not_create_implicit_receiver);
@@ -451,7 +439,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ Bind(&other_result);
__ Ldr(x4, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ Ldr(x4, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kCompilerHintsOffset));
+ __ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
@@ -495,9 +483,6 @@ void Builtins::Generate_JSConstructStubGenericUnrestrictedReturn(
MacroAssembler* masm) {
Generate_JSConstructStubGeneric(masm, false);
}
-void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSBuiltinsConstructStubHelper(masm);
-}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
@@ -596,8 +581,13 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Underlying function needs to have bytecode available.
if (FLAG_debug_code) {
+ Label check_has_bytecode_array;
__ Ldr(x3, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
+ __ CompareObjectType(x3, x0, x0, INTERPRETER_DATA_TYPE);
+ __ B(ne, &check_has_bytecode_array);
+ __ Ldr(x3, FieldMemOperand(x3, InterpreterData::kBytecodeArrayOffset));
+ __ Bind(&check_has_bytecode_array);
__ CompareObjectType(x3, x3, x3, BYTECODE_ARRAY_TYPE);
__ Assert(eq, AbortReason::kMissingBytecodeArray);
}
@@ -845,7 +835,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
DCHECK(
!AreAliased(feedback_vector, x0, x1, x3, scratch1, scratch2, scratch3));
- Label optimized_code_slot_is_cell, fallthrough;
+ Label optimized_code_slot_is_weak_ref, fallthrough;
Register closure = x1;
Register optimized_code_entry = scratch1;
@@ -855,9 +845,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
- // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // optimisation marker. Otherwise, interpret is at a weak reference to a code
// object.
- __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
{
// Optimized code slot is a Smi optimization marker.
@@ -892,12 +882,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
{
- // Optimized code slot is a WeakCell.
- __ bind(&optimized_code_slot_is_cell);
+ // Optimized code slot is a weak reference.
+ __ bind(&optimized_code_slot_is_weak_ref);
- __ Ldr(optimized_code_entry,
- FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
- __ JumpIfSmi(optimized_code_entry, &fallthrough);
+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
@@ -1025,10 +1013,18 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
- Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
+ Label maybe_load_debug_bytecode_array, bytecode_array_loaded,
+ has_bytecode_array;
__ Ldr(x0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(x0, SharedFunctionInfo::kFunctionDataOffset));
+ __ CompareObjectType(kInterpreterBytecodeArrayRegister, x11, x11,
+ INTERPRETER_DATA_TYPE);
+ __ B(ne, &has_bytecode_array);
+ __ Ldr(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ InterpreterData::kBytecodeArrayOffset));
+ __ Bind(&has_bytecode_array);
__ Ldr(x11, FieldMemOperand(x0, SharedFunctionInfo::kDebugInfoOffset));
__ JumpIfNotSmi(x11, &maybe_load_debug_bytecode_array);
__ Bind(&bytecode_array_loaded);
@@ -1149,12 +1145,26 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// kInterpreterBytecodeArrayRegister is already loaded with
// SharedFunctionInfo::kFunctionDataOffset.
__ Bind(&maybe_load_debug_bytecode_array);
- __ Ldrsw(x10, UntagSmiFieldMemOperand(x11, DebugInfo::kFlagsOffset));
- __ TestAndBranchIfAllClear(x10, DebugInfo::kHasBreakInfo,
- &bytecode_array_loaded);
- __ Ldr(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(x11, DebugInfo::kDebugBytecodeArrayOffset));
- __ B(&bytecode_array_loaded);
+ __ Ldr(x10, FieldMemOperand(x11, DebugInfo::kDebugBytecodeArrayOffset));
+ __ JumpIfRoot(x10, Heap::kUndefinedValueRootIndex, &bytecode_array_loaded);
+
+ __ Mov(kInterpreterBytecodeArrayRegister, x10);
+ __ Ldr(x10, UntagSmiFieldMemOperand(x11, DebugInfo::kFlagsOffset));
+ __ And(x10, x10, Immediate(DebugInfo::kDebugExecutionMode));
+
+ STATIC_ASSERT(static_cast<int>(DebugInfo::kDebugExecutionMode) ==
+ static_cast<int>(DebugInfo::kSideEffects));
+ ExternalReference debug_execution_mode =
+ ExternalReference::debug_execution_mode_address(masm->isolate());
+ __ Mov(x11, Operand(debug_execution_mode));
+ __ Ldrsb(x11, MemOperand(x11));
+ __ CompareAndBranch(x10, x11, eq, &bytecode_array_loaded);
+
+ __ Push(closure, feedback_vector);
+ __ PushArgument(closure);
+ __ CallRuntime(Runtime::kDebugApplyInstrumentation);
+ __ Pop(feedback_vector, closure);
+ __ jmp(&bytecode_array_loaded);
}
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
@@ -1236,6 +1246,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
InterpreterPushArgsMode mode) {
+ DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
// -- x2 : the address of the first argument to be pushed. Subsequent
@@ -1255,11 +1266,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
receiver_mode, mode);
// Call the target.
- if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(
- masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny),
- RelocInfo::CODE_TARGET);
- } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
RelocInfo::CODE_TARGET);
} else {
@@ -1290,15 +1297,13 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
Generate_InterpreterPushArgs(masm, num_args, first_arg_index, spread_arg_out,
ConvertReceiverMode::kNullOrUndefined, mode);
- if (mode == InterpreterPushArgsMode::kJSFunction) {
+ if (mode == InterpreterPushArgsMode::kArrayFunction) {
__ AssertFunction(x1);
- // Tail call to the function-specific construct stub (still in the caller
+ // Tail call to the array construct stub (still in the caller
// context at this point).
- __ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(x4, FieldMemOperand(x4, SharedFunctionInfo::kConstructStubOffset));
- __ Add(x4, x4, Code::kHeaderSize - kHeapObjectTag);
- __ Br(x4);
+ ArrayConstructorStub array_constructor_stub(masm->isolate());
+ __ Jump(array_constructor_stub.GetCode(), RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with x0, x1, and x3 unmodified.
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
@@ -1313,10 +1318,29 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
+ Label builtin_trampoline, trampoline_loaded;
Smi* interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
+
+ // If the SFI function_data is an InterpreterData, get the trampoline stored
+ // in it, otherwise get the trampoline from the builtins list.
+ __ Ldr(x1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ Ldr(x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x1, FieldMemOperand(x1, SharedFunctionInfo::kFunctionDataOffset));
+ __ CompareObjectType(x1, kInterpreterDispatchTableRegister,
+ kInterpreterDispatchTableRegister,
+ INTERPRETER_DATA_TYPE);
+ __ B(ne, &builtin_trampoline);
+
+ __ Ldr(x1,
+ FieldMemOperand(x1, InterpreterData::kInterpreterTrampolineOffset));
+ __ B(&trampoline_loaded);
+
+ __ Bind(&builtin_trampoline);
__ LoadObject(x1, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+
+ __ Bind(&trampoline_loaded);
__ Add(lr, x1, Operand(interpreter_entry_return_pc_offset->value() +
Code::kHeaderSize - kHeapObjectTag));
@@ -1387,42 +1411,9 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
-void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x0 : argument count (preserved for callee)
- // -- x3 : new target (preserved for callee)
- // -- x1 : target function (preserved for callee)
- // -----------------------------------
- Register closure = x1;
-
- // Get the feedback vector.
- Register feedback_vector = x2;
- __ Ldr(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
-
- // The feedback vector must be defined.
- if (FLAG_debug_code) {
- __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
- __ Assert(ne, AbortReason::kExpectedFeedbackVector);
- }
-
- // Is there an optimization marker or optimized code in the feedback vector?
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4, x5);
-
- // Otherwise, tail call the SFI code.
- static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
- __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(x2, FieldMemOperand(x2, SharedFunctionInfo::kCodeOffset));
- __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
- __ Br(x2);
-}
-
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
- // Set the code slot inside the JSFunction to the trampoline to the
- // interpreter entry.
- __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(x2, FieldMemOperand(x2, SharedFunctionInfo::kCodeOffset));
+ // Set the code slot inside the JSFunction to CompileLazy.
+ __ Mov(x2, BUILTIN_CODE(masm->isolate(), CompileLazy));
__ Str(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
__ RecordWriteField(x1, JSFunction::kCodeOffset, x2, x5, kLRHasNotBeenSaved,
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
@@ -1430,6 +1421,76 @@ void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
Generate_CompileLazy(masm);
}
+static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data,
+ Register scratch1) {
+ // Figure out the SFI's code object.
+ Label done;
+ Label check_is_bytecode_array;
+ Label check_is_code;
+ Label check_is_fixed_array;
+ Label check_is_pre_parsed_scope_data;
+ Label check_is_function_template_info;
+ Label check_is_interpreter_data;
+
+ Register data_type = scratch1;
+
+ // IsSmi: Is builtin
+ __ JumpIfNotSmi(sfi_data, &check_is_bytecode_array);
+ __ Mov(scratch1, ExternalReference::builtins_address(masm->isolate()));
+ __ Mov(sfi_data, Operand::UntagSmiAndScale(sfi_data, kPointerSizeLog2));
+ __ Ldr(sfi_data, MemOperand(scratch1, sfi_data));
+ __ B(&done);
+
+ // Get map for subsequent checks.
+ __ Bind(&check_is_bytecode_array);
+ __ Ldr(data_type, FieldMemOperand(sfi_data, HeapObject::kMapOffset));
+ __ Ldrh(data_type, FieldMemOperand(data_type, Map::kInstanceTypeOffset));
+
+ // IsBytecodeArray: Interpret bytecode
+ __ Cmp(data_type, Operand(BYTECODE_ARRAY_TYPE));
+ __ B(ne, &check_is_code);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+ __ B(&done);
+
+ // IsCode: Run code
+ __ Bind(&check_is_code);
+ __ Cmp(data_type, Operand(CODE_TYPE));
+ __ B(eq, &done);
+
+ // IsFixedArray: Instantiate using AsmWasmData,
+ __ Bind(&check_is_fixed_array);
+ __ Cmp(data_type, Operand(FIXED_ARRAY_TYPE));
+ __ B(ne, &check_is_pre_parsed_scope_data);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InstantiateAsmJs));
+ __ B(&done);
+
+ // IsPreParsedScopeData: Compile lazy
+ __ Bind(&check_is_pre_parsed_scope_data);
+ __ Cmp(data_type, Operand(TUPLE2_TYPE));
+ __ B(ne, &check_is_function_template_info);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), CompileLazy));
+ __ B(&done);
+
+ // IsFunctionTemplateInfo: API call
+ __ Bind(&check_is_function_template_info);
+ __ Cmp(data_type, Operand(FUNCTION_TEMPLATE_INFO_TYPE));
+ __ B(ne, &check_is_interpreter_data);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
+ __ B(&done);
+
+ // IsInterpreterData: Interpret bytecode
+ __ Bind(&check_is_interpreter_data);
+ if (FLAG_debug_code) {
+ __ Cmp(data_type, Operand(INTERPRETER_DATA_TYPE));
+ __ Assert(eq, AbortReason::kInvalidSharedFunctionInfoData);
+ }
+ __ Ldr(
+ sfi_data,
+ FieldMemOperand(sfi_data, InterpreterData::kInterpreterTrampolineOffset));
+
+ __ Bind(&done);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argument count (preserved for callee)
@@ -1452,13 +1513,15 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4, x5);
- // We found no optimized code.
+ // We found no optimized code. Infer the code object needed for the SFI.
Register entry = x7;
__ Ldr(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(entry,
+ FieldMemOperand(entry, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoCode(masm, entry, x5);
- // If SFI points to anything other than CompileLazy, install that.
- __ Ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
+ // If code entry points to anything other than CompileLazy, install that.
__ Move(x5, masm->CodeObject());
__ Cmp(entry, x5);
__ B(eq, &gotta_call_runtime);
@@ -1527,25 +1590,9 @@ void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
{
// If we've reached this spot, the target builtin has been deserialized and
- // we simply need to copy it over. First to the shared function info.
+ // we simply need to copy it over to the target function.
Register target_builtin = scratch1;
- Register shared = scratch0;
-
- __ Ldr(shared,
- FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
-
- CHECK(!x5.is(target) && !x5.is(scratch0) && !x5.is(scratch1));
- CHECK(!x9.is(target) && !x9.is(scratch0) && !x9.is(scratch1));
-
- __ Str(target_builtin,
- FieldMemOperand(shared, SharedFunctionInfo::kCodeOffset));
- __ Mov(x9, target_builtin); // Write barrier clobbers x9 below.
- __ RecordWriteField(shared, SharedFunctionInfo::kCodeOffset, x9, x5,
- kLRHasNotBeenSaved, kDontSaveFPRegs,
- OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // And second to the target function.
__ Str(target_builtin, FieldMemOperand(target, JSFunction::kCodeOffset));
__ Mov(x9, target_builtin); // Write barrier clobbers x9 below.
@@ -2360,7 +2407,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Check that function is not a "classConstructor".
Label class_constructor;
__ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kFlagsOffset));
__ TestAndBranchIfAnySet(w3, SharedFunctionInfo::IsClassConstructorBit::kMask,
&class_constructor);
@@ -2647,18 +2694,27 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// -- x1 : the constructor to call (checked to be a JSFunction)
// -- x3 : the new target (checked to be a constructor)
// -----------------------------------
+ __ AssertConstructor(x1);
__ AssertFunction(x1);
// Calling convention for function specific ConstructStubs require
// x2 to contain either an AllocationSite or undefined.
__ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
- // Tail call to the function-specific construct stub (still in the caller
- // context at this point).
+ Label call_generic_stub;
+
+ // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
__ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(x4, FieldMemOperand(x4, SharedFunctionInfo::kConstructStubOffset));
- __ Add(x4, x4, Code::kHeaderSize - kHeapObjectTag);
- __ Br(x4);
+ __ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
+ __ TestAndBranchIfAllClear(
+ w4, SharedFunctionInfo::ConstructAsBuiltinBit::kMask, &call_generic_stub);
+
+ __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&call_generic_stub);
+ __ Jump(masm->isolate()->builtins()->JSConstructStubGeneric(),
+ RelocInfo::CODE_TARGET);
}
// static
@@ -2668,6 +2724,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// -- x1 : the function to call (checked to be a JSBoundFunction)
// -- x3 : the new target (checked to be a constructor)
// -----------------------------------
+ __ AssertConstructor(x1);
__ AssertBoundFunction(x1);
// Push the [[BoundArguments]] onto the stack.
@@ -2701,16 +2758,17 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
Label non_constructor, non_proxy;
__ JumpIfSmi(x1, &non_constructor);
- // Dispatch based on instance type.
- __ CompareObjectType(x1, x4, x5, JS_FUNCTION_TYPE);
- __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
- RelocInfo::CODE_TARGET, eq);
-
// Check if target has a [[Construct]] internal method.
+ __ Ldr(x4, FieldMemOperand(x1, HeapObject::kMapOffset));
__ Ldrb(x2, FieldMemOperand(x4, Map::kBitFieldOffset));
__ TestAndBranchIfAllClear(x2, Map::IsConstructorBit::kMask,
&non_constructor);
+ // Dispatch based on instance type.
+ __ CompareInstanceType(x4, x5, JS_FUNCTION_TYPE);
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
+ RelocInfo::CODE_TARGET, eq);
+
// Only dispatch to bound functions after checking whether they are
// constructors.
__ Cmp(x5, JS_BOUND_FUNCTION_TYPE);
@@ -2977,28 +3035,35 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ auto wasm_instance_reg = x7; // TODO(titzer): put in a common place.
+
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- constexpr RegList gp_regs =
- Register::ListOf<x0, x1, x2, x3, x4, x5, x6, x7>();
+ constexpr RegList gp_regs = Register::ListOf<x0, x1, x2, x3, x4, x5>();
constexpr RegList fp_regs =
Register::ListOf<d0, d1, d2, d3, d4, d5, d6, d7>();
__ PushXRegList(gp_regs);
__ PushDRegList(fp_regs);
+ __ Push(x5, x6); // note: pushed twice because alignment required
- // Initialize cp register with kZero, CEntryStub will use it to set the
- // current context on the isolate.
+ // Pass the WASM instance as an explicit argument to WasmCompileLazy.
+ __ PushArgument(wasm_instance_reg);
+ // Initialize the JavaScript context with 0. CEntryStub will use it to
+ // set the current context on the isolate.
__ Move(cp, Smi::kZero);
__ CallRuntime(Runtime::kWasmCompileLazy);
- // Store returned instruction start in x8.
- __ Add(x8, x0, Code::kHeaderSize - kHeapObjectTag);
+ // The entrypoint address is the first return value.
+ __ mov(x8, kReturnRegister0);
+ // The WASM instance is the second return value.
+ __ mov(wasm_instance_reg, kReturnRegister1);
// Restore registers.
+ __ Pop(x6, x5); // note: pushed twice because alignment required
__ PopDRegList(fp_regs);
__ PopXRegList(gp_regs);
}
- // Now jump to the instructions of the returned code object.
+ // Finally, jump to the entrypoint.
__ Jump(x8);
}
diff --git a/deps/v8/src/builtins/builtins-api.cc b/deps/v8/src/builtins/builtins-api.cc
index bb66b082f3..ce19bf8662 100644
--- a/deps/v8/src/builtins/builtins-api.cc
+++ b/deps/v8/src/builtins/builtins-api.cc
@@ -4,7 +4,7 @@
#include "src/builtins/builtins.h"
-#include "src/api-arguments.h"
+#include "src/api-arguments-inl.h"
#include "src/api-natives.h"
#include "src/builtins/builtins-utils.h"
#include "src/counters.h"
@@ -21,21 +21,17 @@ namespace {
// Returns the holder JSObject if the function can legally be called with this
// receiver. Returns nullptr if the call is illegal.
// TODO(dcarney): CallOptimization duplicates this logic, merge.
-JSReceiver* GetCompatibleReceiver(Isolate* isolate, FunctionTemplateInfo* info,
- JSReceiver* receiver) {
+JSObject* GetCompatibleReceiver(Isolate* isolate, FunctionTemplateInfo* info,
+ JSObject* receiver) {
Object* recv_type = info->signature();
// No signature, return holder.
if (!recv_type->IsFunctionTemplateInfo()) return receiver;
- // A Proxy cannot have been created from the signature template.
- if (!receiver->IsJSObject()) return nullptr;
-
- JSObject* js_obj_receiver = JSObject::cast(receiver);
FunctionTemplateInfo* signature = FunctionTemplateInfo::cast(recv_type);
// Check the receiver. Fast path for receivers with no hidden prototypes.
- if (signature->IsTemplateFor(js_obj_receiver)) return receiver;
- if (!js_obj_receiver->map()->has_hidden_prototype()) return nullptr;
- for (PrototypeIterator iter(isolate, js_obj_receiver, kStartAtPrototype,
+ if (signature->IsTemplateFor(receiver)) return receiver;
+ if (!receiver->map()->has_hidden_prototype()) return nullptr;
+ for (PrototypeIterator iter(isolate, receiver, kStartAtPrototype,
PrototypeIterator::END_AT_NON_HIDDEN);
!iter.IsAtEnd(); iter.Advance()) {
JSObject* current = iter.GetCurrent<JSObject>();
@@ -45,12 +41,12 @@ JSReceiver* GetCompatibleReceiver(Isolate* isolate, FunctionTemplateInfo* info,
}
template <bool is_construct>
-MUST_USE_RESULT MaybeHandle<Object> HandleApiCallHelper(
+V8_WARN_UNUSED_RESULT MaybeHandle<Object> HandleApiCallHelper(
Isolate* isolate, Handle<HeapObject> function,
Handle<HeapObject> new_target, Handle<FunctionTemplateInfo> fun_data,
Handle<Object> receiver, BuiltinArguments args) {
- Handle<JSReceiver> js_receiver;
- JSReceiver* raw_holder;
+ Handle<JSObject> js_receiver;
+ JSObject* raw_holder;
if (is_construct) {
DCHECK(args.receiver()->IsTheHole(isolate));
if (fun_data->instance_template()->IsUndefined(isolate)) {
@@ -72,18 +68,21 @@ MUST_USE_RESULT MaybeHandle<Object> HandleApiCallHelper(
raw_holder = *js_receiver;
} else {
DCHECK(receiver->IsJSReceiver());
- js_receiver = Handle<JSReceiver>::cast(receiver);
+
+ if (!receiver->IsJSObject()) {
+ // This function cannot be called with the given receiver. Abort!
+ THROW_NEW_ERROR(
+ isolate, NewTypeError(MessageTemplate::kIllegalInvocation), Object);
+ }
+
+ js_receiver = Handle<JSObject>::cast(receiver);
if (!fun_data->accept_any_receiver() &&
- js_receiver->IsAccessCheckNeeded()) {
- // Proxies never need access checks.
- DCHECK(js_receiver->IsJSObject());
- Handle<JSObject> js_obj_receiver = Handle<JSObject>::cast(js_receiver);
- if (!isolate->MayAccess(handle(isolate->context()), js_obj_receiver)) {
- isolate->ReportFailedAccessCheck(js_obj_receiver);
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- return isolate->factory()->undefined_value();
- }
+ js_receiver->IsAccessCheckNeeded() &&
+ !isolate->MayAccess(handle(isolate->context()), js_receiver)) {
+ isolate->ReportFailedAccessCheck(js_receiver);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return isolate->factory()->undefined_value();
}
raw_holder = GetCompatibleReceiver(isolate, *fun_data, *js_receiver);
@@ -180,6 +179,25 @@ MaybeHandle<Object> Builtins::InvokeApiFunction(Isolate* isolate,
}
}
+ if (function->IsFunctionTemplateInfo()) {
+ Handle<FunctionTemplateInfo> info =
+ Handle<FunctionTemplateInfo>::cast(function);
+ // If we need to break at function entry, go the long way. Instantiate the
+ // function, use the DebugBreakTrampoline, and call it through JS.
+ if (info->BreakAtEntry()) {
+ DCHECK(!is_construct);
+ DCHECK(new_target->IsUndefined(isolate));
+ Handle<JSFunction> function;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, function,
+ ApiNatives::InstantiateFunction(
+ info, MaybeHandle<v8::internal::Name>()),
+ Object);
+ Handle<Code> trampoline = BUILTIN_CODE(isolate, DebugBreakTrampoline);
+ function->set_code(*trampoline);
+ return Execution::Call(isolate, function, receiver, argc, args);
+ }
+ }
+
Handle<FunctionTemplateInfo> fun_data =
function->IsFunctionTemplateInfo()
? Handle<FunctionTemplateInfo>::cast(function)
@@ -224,7 +242,7 @@ MaybeHandle<Object> Builtins::InvokeApiFunction(Isolate* isolate,
// Helper function to handle calls to non-function objects created through the
// API. The object can be called as either a constructor (using new) or just as
// a function (without new).
-MUST_USE_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
+V8_WARN_UNUSED_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
Isolate* isolate, bool is_construct_call, BuiltinArguments args) {
Handle<Object> receiver = args.receiver();
@@ -259,7 +277,6 @@ MUST_USE_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
{
HandleScope scope(isolate);
LOG(isolate, ApiObjectAccess("call non-function", obj));
-
FunctionCallbackArguments custom(isolate, call_data->data(), constructor,
obj, new_target, &args[0] - 1,
args.length() - 1);
diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc
index 52a6222882..cca395b2ce 100644
--- a/deps/v8/src/builtins/builtins-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-array-gen.cc
@@ -8,8 +8,8 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
-#include "src/factory-inl.h"
#include "src/frame-constants.h"
+#include "src/heap/factory-inl.h"
#include "src/builtins/builtins-array-gen.h"
@@ -151,7 +151,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
BIND(&fast);
{
GotoIf(SmiNotEqual(LoadJSArrayLength(a()), to_.value()), &runtime);
- kind = EnsureArrayPushable(a(), &runtime);
+ kind = EnsureArrayPushable(LoadMap(a()), &runtime);
GotoIf(IsElementsKindGreaterThan(kind, HOLEY_SMI_ELEMENTS),
&object_push_pre);
@@ -214,8 +214,9 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
CSA_ASSERT(this,
SmiLessThanOrEqual(
len_, LoadObjectField(a, JSTypedArray::kLengthOffset)));
- fast_typed_array_target_ = Word32Equal(LoadInstanceType(LoadElements(o_)),
- LoadInstanceType(LoadElements(a)));
+ fast_typed_array_target_ =
+ Word32Equal(LoadInstanceType(LoadElements(original_array)),
+ LoadInstanceType(LoadElements(a)));
a_.Bind(a);
}
@@ -413,15 +414,15 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
// 3. Let len be ToLength(Get(O, "length")).
// 4. ReturnIfAbrupt(len).
- VARIABLE(merged_length, MachineRepresentation::kTagged);
+ TVARIABLE(Number, merged_length);
Label has_length(this, &merged_length), not_js_array(this);
GotoIf(DoesntHaveInstanceType(o(), JS_ARRAY_TYPE), &not_js_array);
- merged_length.Bind(LoadJSArrayLength(o()));
+ merged_length = LoadJSArrayLength(CAST(o()));
Goto(&has_length);
BIND(&not_js_array);
Node* len_property =
GetProperty(context(), o(), isolate()->factory()->length_string());
- merged_length.Bind(ToLength_Inline(context(), len_property));
+ merged_length = ToLength_Inline(context(), len_property);
Goto(&has_length);
BIND(&has_length);
len_ = merged_length.value();
@@ -466,7 +467,8 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
void ArrayBuiltinsAssembler::InitIteratingArrayBuiltinLoopContinuation(
TNode<Context> context, TNode<Object> receiver, Node* callbackfn,
- Node* this_arg, Node* a, Node* o, Node* initial_k, Node* len, Node* to) {
+ Node* this_arg, Node* a, TNode<JSReceiver> o, Node* initial_k,
+ TNode<Number> len, Node* to) {
context_ = context;
this_arg_ = this_arg;
callbackfn_ = callbackfn;
@@ -489,14 +491,17 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
throw_detached(this, Label::kDeferred);
GotoIf(TaggedIsSmi(receiver_), &throw_not_typed_array);
- GotoIfNot(HasInstanceType(receiver_, JS_TYPED_ARRAY_TYPE),
+ GotoIfNot(HasInstanceType(CAST(receiver_), JS_TYPED_ARRAY_TYPE),
&throw_not_typed_array);
- o_ = receiver_;
- Node* array_buffer = LoadObjectField(o_, JSTypedArray::kBufferOffset);
+ TNode<JSTypedArray> typed_array = CAST(receiver_);
+ o_ = typed_array;
+
+ Node* array_buffer =
+ LoadObjectField(typed_array, JSTypedArray::kBufferOffset);
GotoIf(IsDetachedBuffer(array_buffer), &throw_detached);
- len_ = LoadObjectField(o_, JSTypedArray::kLengthOffset);
+ len_ = LoadObjectField<Smi>(typed_array, JSTypedArray::kLengthOffset);
Label throw_not_callable(this, Label::kDeferred);
Label distinguish_types(this);
@@ -540,7 +545,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
} else {
k_.Bind(NumberDec(len()));
}
- Node* instance_type = LoadInstanceType(LoadElements(o_));
+ Node* instance_type = LoadInstanceType(LoadElements(typed_array));
Switch(instance_type, &unexpected_instance_type, instance_types.data(),
label_ptrs.data(), labels.size());
@@ -552,7 +557,8 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
// TODO(tebbi): Silently cancelling the loop on buffer detachment is a
// spec violation. Should go to &throw_detached and throw a TypeError
// instead.
- VisitAllTypedArrayElements(array_buffer, processor, &done, direction);
+ VisitAllTypedArrayElements(array_buffer, processor, &done, direction,
+ typed_array);
Goto(&done);
// No exception, return success
BIND(&done);
@@ -638,12 +644,12 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
void ArrayBuiltinsAssembler::VisitAllTypedArrayElements(
Node* array_buffer, const CallResultProcessor& processor, Label* detached,
- ForEachDirection direction) {
+ ForEachDirection direction, TNode<JSTypedArray> typed_array) {
VariableList list({&a_, &k_, &to_}, zone());
FastLoopBody body = [&](Node* index) {
GotoIf(IsDetachedBuffer(array_buffer), detached);
- Node* elements = LoadElements(o_);
+ Node* elements = LoadElements(typed_array);
Node* base_ptr =
LoadObjectField(elements, FixedTypedArrayBase::kBasePointerOffset);
Node* external_ptr =
@@ -671,13 +677,13 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
void ArrayBuiltinsAssembler::VisitAllFastElementsOneKind(
ElementsKind kind, const CallResultProcessor& processor,
Label* array_changed, ParameterMode mode, ForEachDirection direction,
- MissingPropertyMode missing_property_mode) {
+ MissingPropertyMode missing_property_mode, TNode<Smi> length) {
Comment("begin VisitAllFastElementsOneKind");
VARIABLE(original_map, MachineRepresentation::kTagged);
original_map.Bind(LoadMap(o()));
VariableList list({&original_map, &a_, &k_, &to_}, zone());
Node* start = IntPtrOrSmiConstant(0, mode);
- Node* end = TaggedToParameter(len(), mode);
+ Node* end = TaggedToParameter(length, mode);
IndexAdvanceMode advance_mode = direction == ForEachDirection::kReverse
? IndexAdvanceMode::kPre
: IndexAdvanceMode::kPost;
@@ -695,13 +701,14 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
Node* o_map = LoadMap(o());
GotoIf(WordNotEqual(o_map, original_map.value()), array_changed);
+ TNode<JSArray> o_array = CAST(o());
// Check if o's length has changed during the callback and if the
// index is now out of range of the new length.
- GotoIf(SmiGreaterThanOrEqual(k_.value(), LoadJSArrayLength(o())),
+ GotoIf(SmiGreaterThanOrEqual(k_.value(), LoadJSArrayLength(o_array)),
array_changed);
// Re-load the elements array. If may have been resized.
- Node* elements = LoadElements(o());
+ Node* elements = LoadElements(o_array);
// Fast case: load the element directly from the elements FixedArray
// and call the callback if the element is not the hole.
@@ -757,6 +764,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
&switch_on_elements_kind, slow);
BIND(&switch_on_elements_kind);
+ TNode<Smi> smi_len = CAST(len());
// Select by ElementsKind
Node* o_map = LoadMap(o());
Node* bit_field2 = LoadMapBitField2(o_map);
@@ -768,7 +776,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
BIND(&fast_elements);
{
VisitAllFastElementsOneKind(PACKED_ELEMENTS, processor, slow, mode,
- direction, missing_property_mode);
+ direction, missing_property_mode, smi_len);
action(this);
@@ -783,7 +791,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
BIND(&fast_double_elements);
{
VisitAllFastElementsOneKind(PACKED_DOUBLE_ELEMENTS, processor, slow, mode,
- direction, missing_property_mode);
+ direction, missing_property_mode, smi_len);
action(this);
@@ -807,7 +815,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
GotoIfNot(IsPrototypeInitialArrayPrototype(context(), original_map),
&runtime);
- Node* species_protector = SpeciesProtectorConstant();
+ Node* species_protector = ArraySpeciesProtectorConstant();
Node* value =
LoadObjectField(species_protector, PropertyCell::kValueOffset);
TNode<Smi> const protector_invalid =
@@ -841,8 +849,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
}
// Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
- void ArrayBuiltinsAssembler::GenerateArraySpeciesCreate(
- SloppyTNode<Smi> len) {
+ void ArrayBuiltinsAssembler::GenerateArraySpeciesCreate(TNode<Number> len) {
Label runtime(this, Label::kDeferred), done(this);
Node* const original_map = LoadMap(o());
@@ -853,7 +860,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
GotoIfNot(IsPrototypeInitialArrayPrototype(context(), original_map),
&runtime);
- Node* species_protector = SpeciesProtectorConstant();
+ Node* species_protector = ArraySpeciesProtectorConstant();
Node* value =
LoadObjectField(species_protector, PropertyCell::kValueOffset);
Node* const protector_invalid = SmiConstant(Isolate::kProtectorInvalid);
@@ -1019,7 +1026,7 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
{
array_receiver = CAST(receiver);
arg_index = IntPtrConstant(0);
- kind = EnsureArrayPushable(array_receiver, &runtime);
+ kind = EnsureArrayPushable(LoadMap(array_receiver), &runtime);
GotoIf(IsElementsKindGreaterThan(kind, HOLEY_SMI_ELEMENTS),
&object_push_pre);
@@ -1146,7 +1153,7 @@ class ArrayPrototypeSliceCodeStubAssembler : public CodeStubAssembler {
GotoIf(IsNoElementsProtectorCellInvalid(), slow);
- GotoIf(IsSpeciesProtectorCellInvalid(), slow);
+ GotoIf(IsArraySpeciesProtectorCellInvalid(), slow);
// Bailout if receiver has slow elements.
Node* elements_kind = LoadMapElementsKind(map);
@@ -1647,9 +1654,9 @@ TF_BUILTIN(ArrayFindLoopContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
- Node* object = Parameter(Descriptor::kObject);
+ TNode<JSReceiver> object = CAST(Parameter(Descriptor::kObject));
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* to = Parameter(Descriptor::kTo);
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
@@ -1670,7 +1677,7 @@ TF_BUILTIN(ArrayFindLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Return(CallBuiltin(Builtins::kArrayFindLoopContinuation, context, receiver,
callbackfn, this_arg, UndefinedConstant(), receiver,
@@ -1685,7 +1692,7 @@ TF_BUILTIN(ArrayFindLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Return(CallBuiltin(Builtins::kArrayFindLoopContinuation, context, receiver,
callbackfn, this_arg, UndefinedConstant(), receiver,
@@ -1702,7 +1709,7 @@ TF_BUILTIN(ArrayFindLoopAfterCallbackLazyDeoptContinuation,
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* found_value = Parameter(Descriptor::kFoundValue);
Node* is_found = Parameter(Descriptor::kIsFound);
@@ -1748,9 +1755,9 @@ TF_BUILTIN(ArrayFindIndexLoopContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
- Node* object = Parameter(Descriptor::kObject);
+ TNode<JSReceiver> object = CAST(Parameter(Descriptor::kObject));
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* to = Parameter(Descriptor::kTo);
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
@@ -1769,7 +1776,7 @@ TF_BUILTIN(ArrayFindIndexLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Return(CallBuiltin(Builtins::kArrayFindIndexLoopContinuation, context,
receiver, callbackfn, this_arg, SmiConstant(-1), receiver,
@@ -1782,7 +1789,7 @@ TF_BUILTIN(ArrayFindIndexLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Return(CallBuiltin(Builtins::kArrayFindIndexLoopContinuation, context,
receiver, callbackfn, this_arg, SmiConstant(-1), receiver,
@@ -1796,7 +1803,7 @@ TF_BUILTIN(ArrayFindIndexLoopAfterCallbackLazyDeoptContinuation,
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* found_value = Parameter(Descriptor::kFoundValue);
Node* is_found = Parameter(Descriptor::kIsFound);
@@ -1938,13 +1945,10 @@ class ArrayPopulatorAssembler : public CodeStubAssembler {
void GenerateSetLength(TNode<Context> context, TNode<Object> array,
TNode<Number> length) {
Label fast(this), runtime(this), done(this);
- // TODO(delphick): We should be able to skip the fast set altogether, if the
- // length already equals the expected length, which it always is now on the
- // fast path.
- // Only set the length in this stub if
- // 1) the array has fast elements,
- // 2) the length is writable,
- // 3) the new length is equal to the old length.
+ // There's no need to set the length, if
+ // 1) the array is a fast JS array and
+ // 2) the new length is equal to the old length.
+ // as the set is not observable. Otherwise fall back to the run-time.
// 1) Check that the array has fast elements.
// TODO(delphick): Consider changing this since it does an an unnecessary
@@ -1961,20 +1965,11 @@ class ArrayPopulatorAssembler : public CodeStubAssembler {
TNode<Smi> old_length = LoadFastJSArrayLength(fast_array);
CSA_ASSERT(this, TaggedIsPositiveSmi(old_length));
- // 2) Ensure that the length is writable.
- // TODO(delphick): This check may be redundant due to the
- // BranchIfFastJSArray above.
- EnsureArrayLengthWritable(LoadMap(fast_array), &runtime);
-
- // 3) If the created array's length does not match the required length,
- // then use the runtime to set the property as that will insert holes
- // into excess elements or shrink the backing store as appropriate.
- GotoIf(SmiNotEqual(length_smi, old_length), &runtime);
-
- StoreObjectFieldNoWriteBarrier(fast_array, JSArray::kLengthOffset,
- length_smi);
-
- Goto(&done);
+ // 2) If the created array's length matches the required length, then
+ // there's nothing else to do. Otherwise use the runtime to set the
+ // property as that will insert holes into excess elements or shrink
+ // the backing store as appropriate.
+ Branch(SmiNotEqual(length_smi, old_length), &runtime, &done);
}
BIND(&runtime);
@@ -2128,14 +2123,11 @@ TF_BUILTIN(ArrayFrom, ArrayPopulatorAssembler) {
}
}
- // Since there's no iterator, items cannot be a Fast JS Array.
BIND(&not_iterable);
{
- CSA_ASSERT(this, Word32BinaryNot(IsFastJSArray(array_like, context)));
-
// Treat array_like as an array and try to get its length.
- length = CAST(ToLength_Inline(
- context, GetProperty(context, array_like, factory()->length_string())));
+ length = ToLength_Inline(
+ context, GetProperty(context, array_like, factory()->length_string()));
// Construct an array using the receiver as constructor with the same length
// as the input array.
@@ -2257,9 +2249,9 @@ TF_BUILTIN(ArrayForEachLoopContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
- Node* object = Parameter(Descriptor::kObject);
+ TNode<JSReceiver> object = CAST(Parameter(Descriptor::kObject));
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* to = Parameter(Descriptor::kTo);
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
@@ -2277,7 +2269,7 @@ TF_BUILTIN(ArrayForEachLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Return(CallBuiltin(Builtins::kArrayForEachLoopContinuation, context, receiver,
callbackfn, this_arg, UndefinedConstant(), receiver,
@@ -2290,7 +2282,7 @@ TF_BUILTIN(ArrayForEachLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Return(CallBuiltin(Builtins::kArrayForEachLoopContinuation, context, receiver,
callbackfn, this_arg, UndefinedConstant(), receiver,
@@ -2345,7 +2337,7 @@ TF_BUILTIN(ArraySomeLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* result = Parameter(Descriptor::kResult);
// This custom lazy deopt point is right after the callback. every() needs
@@ -2374,7 +2366,7 @@ TF_BUILTIN(ArraySomeLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Return(CallBuiltin(Builtins::kArraySomeLoopContinuation, context, receiver,
callbackfn, this_arg, FalseConstant(), receiver, initial_k,
@@ -2387,9 +2379,9 @@ TF_BUILTIN(ArraySomeLoopContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
- Node* object = Parameter(Descriptor::kObject);
+ TNode<JSReceiver> object = CAST(Parameter(Descriptor::kObject));
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* to = Parameter(Descriptor::kTo);
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
@@ -2448,7 +2440,7 @@ TF_BUILTIN(ArrayEveryLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* result = Parameter(Descriptor::kResult);
// This custom lazy deopt point is right after the callback. every() needs
@@ -2477,7 +2469,7 @@ TF_BUILTIN(ArrayEveryLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Return(CallBuiltin(Builtins::kArrayEveryLoopContinuation, context, receiver,
callbackfn, this_arg, TrueConstant(), receiver, initial_k,
@@ -2490,9 +2482,9 @@ TF_BUILTIN(ArrayEveryLoopContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
- Node* object = Parameter(Descriptor::kObject);
+ TNode<JSReceiver> object = CAST(Parameter(Descriptor::kObject));
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* to = Parameter(Descriptor::kTo);
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
@@ -2551,9 +2543,9 @@ TF_BUILTIN(ArrayReduceLoopContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* accumulator = Parameter(Descriptor::kAccumulator);
- Node* object = Parameter(Descriptor::kObject);
+ TNode<JSReceiver> object = CAST(Parameter(Descriptor::kObject));
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* to = Parameter(Descriptor::kTo);
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
@@ -2570,16 +2562,14 @@ TF_BUILTIN(ArrayReducePreLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
- Callable stub(
- Builtins::CallableFor(isolate(), Builtins::kArrayReduceLoopContinuation));
// Simulate starting the loop at 0, but ensuring that the accumulator is
// the hole. The continuation stub will search for the initial non-hole
// element, rightly throwing an exception if not found.
- Return(CallStub(stub, context, receiver, callbackfn, UndefinedConstant(),
- TheHoleConstant(), receiver, SmiConstant(0), len,
- UndefinedConstant()));
+ Return(CallBuiltin(Builtins::kArrayReduceLoopContinuation, context, receiver,
+ callbackfn, UndefinedConstant(), TheHoleConstant(),
+ receiver, SmiConstant(0), len, UndefinedConstant()));
}
TF_BUILTIN(ArrayReduceLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
@@ -2588,12 +2578,11 @@ TF_BUILTIN(ArrayReduceLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* accumulator = Parameter(Descriptor::kAccumulator);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
- Callable stub(
- Builtins::CallableFor(isolate(), Builtins::kArrayReduceLoopContinuation));
- Return(CallStub(stub, context, receiver, callbackfn, UndefinedConstant(),
- accumulator, receiver, initial_k, len, UndefinedConstant()));
+ Return(CallBuiltin(Builtins::kArrayReduceLoopContinuation, context, receiver,
+ callbackfn, UndefinedConstant(), accumulator, receiver,
+ initial_k, len, UndefinedConstant()));
}
TF_BUILTIN(ArrayReduceLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
@@ -2601,13 +2590,12 @@ TF_BUILTIN(ArrayReduceLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* result = Parameter(Descriptor::kResult);
- Callable stub(
- Builtins::CallableFor(isolate(), Builtins::kArrayReduceLoopContinuation));
- Return(CallStub(stub, context, receiver, callbackfn, UndefinedConstant(),
- result, receiver, initial_k, len, UndefinedConstant()));
+ Return(CallBuiltin(Builtins::kArrayReduceLoopContinuation, context, receiver,
+ callbackfn, UndefinedConstant(), result, receiver,
+ initial_k, len, UndefinedConstant()));
}
TF_BUILTIN(ArrayReduce, ArrayBuiltinsAssembler) {
@@ -2657,9 +2645,9 @@ TF_BUILTIN(ArrayReduceRightLoopContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* accumulator = Parameter(Descriptor::kAccumulator);
- Node* object = Parameter(Descriptor::kObject);
+ TNode<JSReceiver> object = CAST(Parameter(Descriptor::kObject));
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* to = Parameter(Descriptor::kTo);
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
@@ -2677,16 +2665,15 @@ TF_BUILTIN(ArrayReduceRightPreLoopEagerDeoptContinuation,
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Smi> len = CAST(Parameter(Descriptor::kLength));
- Callable stub(Builtins::CallableFor(
- isolate(), Builtins::kArrayReduceRightLoopContinuation));
// Simulate starting the loop at 0, but ensuring that the accumulator is
// the hole. The continuation stub will search for the initial non-hole
// element, rightly throwing an exception if not found.
- Return(CallStub(stub, context, receiver, callbackfn, UndefinedConstant(),
- TheHoleConstant(), receiver, SmiConstant(0), len,
- UndefinedConstant()));
+ Return(CallBuiltin(Builtins::kArrayReduceRightLoopContinuation, context,
+ receiver, callbackfn, UndefinedConstant(),
+ TheHoleConstant(), receiver, SmiSub(len, SmiConstant(1)),
+ len, UndefinedConstant()));
}
TF_BUILTIN(ArrayReduceRightLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
@@ -2695,12 +2682,11 @@ TF_BUILTIN(ArrayReduceRightLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* accumulator = Parameter(Descriptor::kAccumulator);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
- Callable stub(Builtins::CallableFor(
- isolate(), Builtins::kArrayReduceRightLoopContinuation));
- Return(CallStub(stub, context, receiver, callbackfn, UndefinedConstant(),
- accumulator, receiver, initial_k, len, UndefinedConstant()));
+ Return(CallBuiltin(Builtins::kArrayReduceRightLoopContinuation, context,
+ receiver, callbackfn, UndefinedConstant(), accumulator,
+ receiver, initial_k, len, UndefinedConstant()));
}
TF_BUILTIN(ArrayReduceRightLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
@@ -2708,13 +2694,12 @@ TF_BUILTIN(ArrayReduceRightLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* result = Parameter(Descriptor::kResult);
- Callable stub(Builtins::CallableFor(
- isolate(), Builtins::kArrayReduceRightLoopContinuation));
- Return(CallStub(stub, context, receiver, callbackfn, UndefinedConstant(),
- result, receiver, initial_k, len, UndefinedConstant()));
+ Return(CallBuiltin(Builtins::kArrayReduceRightLoopContinuation, context,
+ receiver, callbackfn, UndefinedConstant(), result,
+ receiver, initial_k, len, UndefinedConstant()));
}
TF_BUILTIN(ArrayReduceRight, ArrayBuiltinsAssembler) {
@@ -2767,9 +2752,9 @@ TF_BUILTIN(ArrayFilterLoopContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
- Node* object = Parameter(Descriptor::kObject);
+ TNode<JSReceiver> object = CAST(Parameter(Descriptor::kObject));
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* to = Parameter(Descriptor::kTo);
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
@@ -2788,7 +2773,7 @@ TF_BUILTIN(ArrayFilterLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* to = Parameter(Descriptor::kTo);
Return(CallBuiltin(Builtins::kArrayFilterLoopContinuation, context, receiver,
@@ -2803,7 +2788,7 @@ TF_BUILTIN(ArrayFilterLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* value_k = Parameter(Descriptor::kValueK);
Node* result = Parameter(Descriptor::kResult);
@@ -2864,9 +2849,9 @@ TF_BUILTIN(ArrayMapLoopContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
- Node* object = Parameter(Descriptor::kObject);
+ TNode<JSReceiver> object = CAST(Parameter(Descriptor::kObject));
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* to = Parameter(Descriptor::kTo);
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
@@ -2885,7 +2870,7 @@ TF_BUILTIN(ArrayMapLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Return(CallBuiltin(Builtins::kArrayMapLoopContinuation, context, receiver,
callbackfn, this_arg, array, receiver, initial_k, len,
@@ -2899,7 +2884,7 @@ TF_BUILTIN(ArrayMapLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* result = Parameter(Descriptor::kResult);
// This custom lazy deopt point is right after the callback. map() needs
@@ -2965,7 +2950,7 @@ TF_BUILTIN(ArrayIsArray, CodeStubAssembler) {
Label call_runtime(this), return_true(this), return_false(this);
GotoIf(TaggedIsSmi(object), &return_false);
- TNode<Word32T> instance_type = LoadInstanceType(CAST(object));
+ TNode<Int32T> instance_type = LoadInstanceType(CAST(object));
GotoIf(InstanceTypeEqual(instance_type, JS_ARRAY_TYPE), &return_true);
@@ -2991,6 +2976,15 @@ class ArrayIncludesIndexofAssembler : public CodeStubAssembler {
enum SearchVariant { kIncludes, kIndexOf };
void Generate(SearchVariant variant);
+ void GenerateSmiOrObject(SearchVariant variant, Node* context, Node* elements,
+ Node* search_element, Node* array_length,
+ Node* from_index);
+ void GeneratePackedDoubles(SearchVariant variant, Node* elements,
+ Node* search_element, Node* array_length,
+ Node* from_index);
+ void GenerateHoleyDoubles(SearchVariant variant, Node* elements,
+ Node* search_element, Node* array_length,
+ Node* from_index);
};
void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
@@ -3008,8 +3002,7 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
Node* intptr_zero = IntPtrConstant(0);
- Label init_index(this), return_found(this), return_not_found(this),
- call_runtime(this);
+ Label init_index(this), return_not_found(this), call_runtime(this);
// Take slow path if not a JSArray, if retrieving elements requires
// traversing prototype, or if access checks are required.
@@ -3021,7 +3014,8 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
// JSArray length is always a positive Smi for fast arrays.
CSA_ASSERT(this, TaggedIsPositiveSmi(LoadJSArrayLength(array)));
- Node* array_length = SmiUntag(LoadFastJSArrayLength(array));
+ Node* array_length = LoadFastJSArrayLength(array);
+ Node* array_length_untagged = SmiUntag(array_length);
{
// Initialize fromIndex.
@@ -3049,7 +3043,7 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
GotoIf(IntPtrGreaterThanOrEqual(index_var.value(), intptr_zero), &done);
// The fromIndex is negative: add it to the array's length.
- index_var.Bind(IntPtrAdd(array_length, index_var.value()));
+ index_var.Bind(IntPtrAdd(array_length_untagged, index_var.value()));
// Clamp negative results at zero.
GotoIf(IntPtrGreaterThanOrEqual(index_var.value(), intptr_zero), &done);
index_var.Bind(intptr_zero);
@@ -3059,7 +3053,7 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
}
// Fail early if startIndex >= array.length.
- GotoIf(IntPtrGreaterThanOrEqual(index_var.value(), array_length),
+ GotoIf(IntPtrGreaterThanOrEqual(index_var.value(), array_length_untagged),
&return_not_found);
Label if_smiorobjects(this), if_packed_doubles(this), if_holey_doubles(this);
@@ -3080,179 +3074,139 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
BIND(&if_smiorobjects);
{
- VARIABLE(search_num, MachineRepresentation::kFloat64);
- Label ident_loop(this, &index_var), heap_num_loop(this, &search_num),
- string_loop(this), bigint_loop(this, &index_var),
- undef_loop(this, &index_var), not_smi(this), not_heap_num(this);
-
- GotoIfNot(TaggedIsSmi(search_element), &not_smi);
- search_num.Bind(SmiToFloat64(CAST(search_element)));
- Goto(&heap_num_loop);
-
- BIND(&not_smi);
- if (variant == kIncludes) {
- GotoIf(IsUndefined(search_element), &undef_loop);
- }
- Node* map = LoadMap(CAST(search_element));
- GotoIfNot(IsHeapNumberMap(map), &not_heap_num);
- search_num.Bind(LoadHeapNumberValue(CAST(search_element)));
- Goto(&heap_num_loop);
-
- BIND(&not_heap_num);
- Node* search_type = LoadMapInstanceType(map);
- GotoIf(IsStringInstanceType(search_type), &string_loop);
- GotoIf(IsBigIntInstanceType(search_type), &bigint_loop);
- Goto(&ident_loop);
-
- BIND(&ident_loop);
- {
- GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
- &return_not_found);
- Node* element_k = LoadFixedArrayElement(elements, index_var.value());
- GotoIf(WordEqual(element_k, search_element), &return_found);
+ Callable callable =
+ (variant == kIncludes)
+ ? Builtins::CallableFor(isolate(),
+ Builtins::kArrayIncludesSmiOrObject)
+ : Builtins::CallableFor(isolate(),
+ Builtins::kArrayIndexOfSmiOrObject);
+ Node* result = CallStub(callable, context, elements, search_element,
+ array_length, SmiTag(index_var.value()));
+ args.PopAndReturn(result);
+ }
- Increment(&index_var);
- Goto(&ident_loop);
- }
+ BIND(&if_packed_doubles);
+ {
+ Callable callable =
+ (variant == kIncludes)
+ ? Builtins::CallableFor(isolate(),
+ Builtins::kArrayIncludesPackedDoubles)
+ : Builtins::CallableFor(isolate(),
+ Builtins::kArrayIndexOfPackedDoubles);
+ Node* result = CallStub(callable, context, elements, search_element,
+ array_length, SmiTag(index_var.value()));
+ args.PopAndReturn(result);
+ }
- if (variant == kIncludes) {
- BIND(&undef_loop);
+ BIND(&if_holey_doubles);
+ {
+ Callable callable =
+ (variant == kIncludes)
+ ? Builtins::CallableFor(isolate(),
+ Builtins::kArrayIncludesHoleyDoubles)
+ : Builtins::CallableFor(isolate(),
+ Builtins::kArrayIndexOfHoleyDoubles);
+ Node* result = CallStub(callable, context, elements, search_element,
+ array_length, SmiTag(index_var.value()));
+ args.PopAndReturn(result);
+ }
- GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
- &return_not_found);
- Node* element_k = LoadFixedArrayElement(elements, index_var.value());
- GotoIf(IsUndefined(element_k), &return_found);
- GotoIf(IsTheHole(element_k), &return_found);
+ BIND(&return_not_found);
+ if (variant == kIncludes) {
+ args.PopAndReturn(FalseConstant());
+ } else {
+ args.PopAndReturn(NumberConstant(-1));
+ }
- Increment(&index_var);
- Goto(&undef_loop);
- }
+ BIND(&call_runtime);
+ {
+ Node* start_from =
+ args.GetOptionalArgumentValue(kFromIndexArg, UndefinedConstant());
+ Runtime::FunctionId function = variant == kIncludes
+ ? Runtime::kArrayIncludes_Slow
+ : Runtime::kArrayIndexOf;
+ args.PopAndReturn(
+ CallRuntime(function, context, array, search_element, start_from));
+ }
+}
- BIND(&heap_num_loop);
- {
- Label nan_loop(this, &index_var), not_nan_loop(this, &index_var);
- Label* nan_handling =
- variant == kIncludes ? &nan_loop : &return_not_found;
- BranchIfFloat64IsNaN(search_num.value(), nan_handling, &not_nan_loop);
+void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
+ SearchVariant variant, Node* context, Node* elements, Node* search_element,
+ Node* array_length, Node* from_index) {
+ VARIABLE(index_var, MachineType::PointerRepresentation(),
+ SmiUntag(from_index));
+ VARIABLE(search_num, MachineRepresentation::kFloat64);
+ Node* array_length_untagged = SmiUntag(array_length);
- BIND(&not_nan_loop);
- {
- Label continue_loop(this), not_smi(this);
- GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
- &return_not_found);
- Node* element_k = LoadFixedArrayElement(elements, index_var.value());
- GotoIfNot(TaggedIsSmi(element_k), &not_smi);
- Branch(Float64Equal(search_num.value(), SmiToFloat64(element_k)),
- &return_found, &continue_loop);
-
- BIND(&not_smi);
- GotoIfNot(IsHeapNumber(element_k), &continue_loop);
- Branch(Float64Equal(search_num.value(), LoadHeapNumberValue(element_k)),
- &return_found, &continue_loop);
-
- BIND(&continue_loop);
- Increment(&index_var);
- Goto(&not_nan_loop);
- }
+ Label ident_loop(this, &index_var), heap_num_loop(this, &search_num),
+ string_loop(this), bigint_loop(this, &index_var),
+ undef_loop(this, &index_var), not_smi(this), not_heap_num(this),
+ return_found(this), return_not_found(this);
- // Array.p.includes uses SameValueZero comparisons, where NaN == NaN.
- if (variant == kIncludes) {
- BIND(&nan_loop);
- Label continue_loop(this);
- GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
- &return_not_found);
- Node* element_k = LoadFixedArrayElement(elements, index_var.value());
- GotoIf(TaggedIsSmi(element_k), &continue_loop);
- GotoIfNot(IsHeapNumber(element_k), &continue_loop);
- BranchIfFloat64IsNaN(LoadHeapNumberValue(element_k), &return_found,
- &continue_loop);
-
- BIND(&continue_loop);
- Increment(&index_var);
- Goto(&nan_loop);
- }
- }
+ GotoIfNot(TaggedIsSmi(search_element), &not_smi);
+ search_num.Bind(SmiToFloat64(search_element));
+ Goto(&heap_num_loop);
- BIND(&string_loop);
- {
- TNode<String> search_element_string = CAST(search_element);
- Label continue_loop(this), next_iteration(this, &index_var),
- slow_compare(this), runtime(this, Label::kDeferred);
- TNode<IntPtrT> search_length =
- LoadStringLengthAsWord(search_element_string);
- Goto(&next_iteration);
- BIND(&next_iteration);
- GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
- &return_not_found);
- Node* element_k = LoadFixedArrayElement(elements, index_var.value());
- GotoIf(TaggedIsSmi(element_k), &continue_loop);
- GotoIf(WordEqual(search_element_string, element_k), &return_found);
- Node* element_k_type = LoadInstanceType(element_k);
- GotoIfNot(IsStringInstanceType(element_k_type), &continue_loop);
- Branch(WordEqual(search_length, LoadStringLengthAsWord(element_k)),
- &slow_compare, &continue_loop);
-
- BIND(&slow_compare);
- StringBuiltinsAssembler string_asm(state());
- string_asm.StringEqual_Core(context, search_element_string, search_type,
- element_k, element_k_type, search_length,
- &return_found, &continue_loop, &runtime);
- BIND(&runtime);
- TNode<Object> result = CallRuntime(Runtime::kStringEqual, context,
- search_element_string, element_k);
- Branch(WordEqual(result, TrueConstant()), &return_found, &continue_loop);
+ BIND(&not_smi);
+ if (variant == kIncludes) {
+ GotoIf(IsUndefined(search_element), &undef_loop);
+ }
+ Node* map = LoadMap(search_element);
+ GotoIfNot(IsHeapNumberMap(map), &not_heap_num);
+ search_num.Bind(LoadHeapNumberValue(search_element));
+ Goto(&heap_num_loop);
+
+ BIND(&not_heap_num);
+ Node* search_type = LoadMapInstanceType(map);
+ GotoIf(IsStringInstanceType(search_type), &string_loop);
+ GotoIf(IsBigIntInstanceType(search_type), &bigint_loop);
+ Goto(&ident_loop);
+
+ BIND(&ident_loop);
+ {
+ GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
+ &return_not_found);
+ Node* element_k = LoadFixedArrayElement(elements, index_var.value());
+ GotoIf(WordEqual(element_k, search_element), &return_found);
- BIND(&continue_loop);
- Increment(&index_var);
- Goto(&next_iteration);
- }
+ Increment(&index_var);
+ Goto(&ident_loop);
+ }
- BIND(&bigint_loop);
- {
- GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
- &return_not_found);
+ if (variant == kIncludes) {
+ BIND(&undef_loop);
- Node* element_k = LoadFixedArrayElement(elements, index_var.value());
- Label continue_loop(this);
- GotoIf(TaggedIsSmi(element_k), &continue_loop);
- GotoIfNot(IsBigInt(element_k), &continue_loop);
- TNode<Object> result = CallRuntime(Runtime::kBigIntEqualToBigInt, context,
- search_element, element_k);
- Branch(WordEqual(result, TrueConstant()), &return_found, &continue_loop);
+ GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
+ &return_not_found);
+ Node* element_k = LoadFixedArrayElement(elements, index_var.value());
+ GotoIf(IsUndefined(element_k), &return_found);
+ GotoIf(IsTheHole(element_k), &return_found);
- BIND(&continue_loop);
- Increment(&index_var);
- Goto(&bigint_loop);
- }
+ Increment(&index_var);
+ Goto(&undef_loop);
}
- BIND(&if_packed_doubles);
+ BIND(&heap_num_loop);
{
- Label nan_loop(this, &index_var), not_nan_loop(this, &index_var),
- hole_loop(this, &index_var), search_notnan(this);
- VARIABLE(search_num, MachineRepresentation::kFloat64);
-
- GotoIfNot(TaggedIsSmi(search_element), &search_notnan);
- search_num.Bind(SmiToFloat64(CAST(search_element)));
- Goto(&not_nan_loop);
-
- BIND(&search_notnan);
- GotoIfNot(IsHeapNumber(search_element), &return_not_found);
-
- search_num.Bind(LoadHeapNumberValue(CAST(search_element)));
-
+ Label nan_loop(this, &index_var), not_nan_loop(this, &index_var);
Label* nan_handling = variant == kIncludes ? &nan_loop : &return_not_found;
BranchIfFloat64IsNaN(search_num.value(), nan_handling, &not_nan_loop);
BIND(&not_nan_loop);
{
- Label continue_loop(this);
- GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
+ Label continue_loop(this), not_smi(this);
+ GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
&return_not_found);
- Node* element_k = LoadFixedDoubleArrayElement(elements, index_var.value(),
- MachineType::Float64());
- Branch(Float64Equal(element_k, search_num.value()), &return_found,
- &continue_loop);
+ Node* element_k = LoadFixedArrayElement(elements, index_var.value());
+ GotoIfNot(TaggedIsSmi(element_k), &not_smi);
+ Branch(Float64Equal(search_num.value(), SmiToFloat64(element_k)),
+ &return_found, &continue_loop);
+
+ BIND(&not_smi);
+ GotoIfNot(IsHeapNumber(element_k), &continue_loop);
+ Branch(Float64Equal(search_num.value(), LoadHeapNumberValue(element_k)),
+ &return_found, &continue_loop);
+
BIND(&continue_loop);
Increment(&index_var);
Goto(&not_nan_loop);
@@ -3262,112 +3216,249 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
if (variant == kIncludes) {
BIND(&nan_loop);
Label continue_loop(this);
- GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
+ GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
&return_not_found);
- Node* element_k = LoadFixedDoubleArrayElement(elements, index_var.value(),
- MachineType::Float64());
- BranchIfFloat64IsNaN(element_k, &return_found, &continue_loop);
+ Node* element_k = LoadFixedArrayElement(elements, index_var.value());
+ GotoIf(TaggedIsSmi(element_k), &continue_loop);
+ GotoIfNot(IsHeapNumber(element_k), &continue_loop);
+ BranchIfFloat64IsNaN(LoadHeapNumberValue(element_k), &return_found,
+ &continue_loop);
+
BIND(&continue_loop);
Increment(&index_var);
Goto(&nan_loop);
}
}
- BIND(&if_holey_doubles);
+ BIND(&string_loop);
{
- Label nan_loop(this, &index_var), not_nan_loop(this, &index_var),
- hole_loop(this, &index_var), search_notnan(this);
- VARIABLE(search_num, MachineRepresentation::kFloat64);
-
- GotoIfNot(TaggedIsSmi(search_element), &search_notnan);
- search_num.Bind(SmiToFloat64(CAST(search_element)));
- Goto(&not_nan_loop);
-
- BIND(&search_notnan);
- if (variant == kIncludes) {
- GotoIf(IsUndefined(search_element), &hole_loop);
- }
- GotoIfNot(IsHeapNumber(search_element), &return_not_found);
+ TNode<String> search_element_string = CAST(search_element);
+ Label continue_loop(this), next_iteration(this, &index_var),
+ slow_compare(this), runtime(this, Label::kDeferred);
+ TNode<IntPtrT> search_length =
+ LoadStringLengthAsWord(search_element_string);
+ Goto(&next_iteration);
+ BIND(&next_iteration);
+ GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
+ &return_not_found);
+ Node* element_k = LoadFixedArrayElement(elements, index_var.value());
+ GotoIf(TaggedIsSmi(element_k), &continue_loop);
+ GotoIf(WordEqual(search_element_string, element_k), &return_found);
+ Node* element_k_type = LoadInstanceType(element_k);
+ GotoIfNot(IsStringInstanceType(element_k_type), &continue_loop);
+ Branch(WordEqual(search_length, LoadStringLengthAsWord(element_k)),
+ &slow_compare, &continue_loop);
+
+ BIND(&slow_compare);
+ StringBuiltinsAssembler string_asm(state());
+ string_asm.StringEqual_Core(context, search_element_string, search_type,
+ element_k, element_k_type, search_length,
+ &return_found, &continue_loop, &runtime);
+ BIND(&runtime);
+ TNode<Object> result = CallRuntime(Runtime::kStringEqual, context,
+ search_element_string, element_k);
+ Branch(WordEqual(result, TrueConstant()), &return_found, &continue_loop);
- search_num.Bind(LoadHeapNumberValue(CAST(search_element)));
+ BIND(&continue_loop);
+ Increment(&index_var);
+ Goto(&next_iteration);
+ }
- Label* nan_handling = variant == kIncludes ? &nan_loop : &return_not_found;
- BranchIfFloat64IsNaN(search_num.value(), nan_handling, &not_nan_loop);
+ BIND(&bigint_loop);
+ {
+ GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
+ &return_not_found);
+
+ Node* element_k = LoadFixedArrayElement(elements, index_var.value());
+ Label continue_loop(this);
+ GotoIf(TaggedIsSmi(element_k), &continue_loop);
+ GotoIfNot(IsBigInt(element_k), &continue_loop);
+ TNode<Object> result = CallRuntime(Runtime::kBigIntEqualToBigInt, context,
+ search_element, element_k);
+ Branch(WordEqual(result, TrueConstant()), &return_found, &continue_loop);
+
+ BIND(&continue_loop);
+ Increment(&index_var);
+ Goto(&bigint_loop);
+ }
+ BIND(&return_found);
+ if (variant == kIncludes) {
+ Return(TrueConstant());
+ } else {
+ Return(SmiTag(index_var.value()));
+ }
- BIND(&not_nan_loop);
- {
- Label continue_loop(this);
- GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
- &return_not_found);
+ BIND(&return_not_found);
+ if (variant == kIncludes) {
+ Return(FalseConstant());
+ } else {
+ Return(NumberConstant(-1));
+ }
+}
- // No need for hole checking here; the following Float64Equal will
- // return 'not equal' for holes anyway.
- Node* element_k = LoadFixedDoubleArrayElement(elements, index_var.value(),
- MachineType::Float64());
+void ArrayIncludesIndexofAssembler::GeneratePackedDoubles(SearchVariant variant,
+ Node* elements,
+ Node* search_element,
+ Node* array_length,
+ Node* from_index) {
+ VARIABLE(index_var, MachineType::PointerRepresentation(),
+ SmiUntag(from_index));
+ Node* array_length_untagged = SmiUntag(array_length);
- Branch(Float64Equal(element_k, search_num.value()), &return_found,
- &continue_loop);
- BIND(&continue_loop);
- Increment(&index_var);
- Goto(&not_nan_loop);
- }
+ Label nan_loop(this, &index_var), not_nan_loop(this, &index_var),
+ hole_loop(this, &index_var), search_notnan(this), return_found(this),
+ return_not_found(this);
+ VARIABLE(search_num, MachineRepresentation::kFloat64);
+ search_num.Bind(Float64Constant(0));
- // Array.p.includes uses SameValueZero comparisons, where NaN == NaN.
- if (variant == kIncludes) {
- BIND(&nan_loop);
- Label continue_loop(this);
- GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
- &return_not_found);
+ GotoIfNot(TaggedIsSmi(search_element), &search_notnan);
+ search_num.Bind(SmiToFloat64(search_element));
+ Goto(&not_nan_loop);
- // Load double value or continue if it's the hole NaN.
- Node* element_k = LoadFixedDoubleArrayElement(
- elements, index_var.value(), MachineType::Float64(), 0,
- INTPTR_PARAMETERS, &continue_loop);
+ BIND(&search_notnan);
+ GotoIfNot(IsHeapNumber(search_element), &return_not_found);
- BranchIfFloat64IsNaN(element_k, &return_found, &continue_loop);
- BIND(&continue_loop);
- Increment(&index_var);
- Goto(&nan_loop);
- }
+ search_num.Bind(LoadHeapNumberValue(search_element));
- // Array.p.includes treats the hole as undefined.
- if (variant == kIncludes) {
- BIND(&hole_loop);
- GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
- &return_not_found);
+ Label* nan_handling = variant == kIncludes ? &nan_loop : &return_not_found;
+ BranchIfFloat64IsNaN(search_num.value(), nan_handling, &not_nan_loop);
- // Check if the element is a double hole, but don't load it.
- LoadFixedDoubleArrayElement(elements, index_var.value(),
- MachineType::None(), 0, INTPTR_PARAMETERS,
- &return_found);
+ BIND(&not_nan_loop);
+ {
+ Label continue_loop(this);
+ GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
+ &return_not_found);
+ Node* element_k = LoadFixedDoubleArrayElement(elements, index_var.value(),
+ MachineType::Float64());
+ Branch(Float64Equal(element_k, search_num.value()), &return_found,
+ &continue_loop);
+ BIND(&continue_loop);
+ Increment(&index_var);
+ Goto(&not_nan_loop);
+ }
- Increment(&index_var);
- Goto(&hole_loop);
- }
+ // Array.p.includes uses SameValueZero comparisons, where NaN == NaN.
+ if (variant == kIncludes) {
+ BIND(&nan_loop);
+ Label continue_loop(this);
+ GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
+ &return_not_found);
+ Node* element_k = LoadFixedDoubleArrayElement(elements, index_var.value(),
+ MachineType::Float64());
+ BranchIfFloat64IsNaN(element_k, &return_found, &continue_loop);
+ BIND(&continue_loop);
+ Increment(&index_var);
+ Goto(&nan_loop);
}
BIND(&return_found);
if (variant == kIncludes) {
- args.PopAndReturn(TrueConstant());
+ Return(TrueConstant());
} else {
- args.PopAndReturn(SmiTag(index_var.value()));
+ Return(SmiTag(index_var.value()));
}
BIND(&return_not_found);
if (variant == kIncludes) {
- args.PopAndReturn(FalseConstant());
+ Return(FalseConstant());
} else {
- args.PopAndReturn(NumberConstant(-1));
+ Return(NumberConstant(-1));
}
+}
- BIND(&call_runtime);
+void ArrayIncludesIndexofAssembler::GenerateHoleyDoubles(SearchVariant variant,
+ Node* elements,
+ Node* search_element,
+ Node* array_length,
+ Node* from_index) {
+ VARIABLE(index_var, MachineType::PointerRepresentation(),
+ SmiUntag(from_index));
+ Node* array_length_untagged = SmiUntag(array_length);
+
+ Label nan_loop(this, &index_var), not_nan_loop(this, &index_var),
+ hole_loop(this, &index_var), search_notnan(this), return_found(this),
+ return_not_found(this);
+ VARIABLE(search_num, MachineRepresentation::kFloat64);
+ search_num.Bind(Float64Constant(0));
+
+ GotoIfNot(TaggedIsSmi(search_element), &search_notnan);
+ search_num.Bind(SmiToFloat64(search_element));
+ Goto(&not_nan_loop);
+
+ BIND(&search_notnan);
+ if (variant == kIncludes) {
+ GotoIf(IsUndefined(search_element), &hole_loop);
+ }
+ GotoIfNot(IsHeapNumber(search_element), &return_not_found);
+
+ search_num.Bind(LoadHeapNumberValue(search_element));
+
+ Label* nan_handling = variant == kIncludes ? &nan_loop : &return_not_found;
+ BranchIfFloat64IsNaN(search_num.value(), nan_handling, &not_nan_loop);
+
+ BIND(&not_nan_loop);
{
- Node* start_from = args.GetOptionalArgumentValue(kFromIndexArg);
- Runtime::FunctionId function = variant == kIncludes
- ? Runtime::kArrayIncludes_Slow
- : Runtime::kArrayIndexOf;
- args.PopAndReturn(
- CallRuntime(function, context, array, search_element, start_from));
+ Label continue_loop(this);
+ GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
+ &return_not_found);
+
+ // No need for hole checking here; the following Float64Equal will
+ // return 'not equal' for holes anyway.
+ Node* element_k = LoadFixedDoubleArrayElement(elements, index_var.value(),
+ MachineType::Float64());
+
+ Branch(Float64Equal(element_k, search_num.value()), &return_found,
+ &continue_loop);
+ BIND(&continue_loop);
+ Increment(&index_var);
+ Goto(&not_nan_loop);
+ }
+
+ // Array.p.includes uses SameValueZero comparisons, where NaN == NaN.
+ if (variant == kIncludes) {
+ BIND(&nan_loop);
+ Label continue_loop(this);
+ GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
+ &return_not_found);
+
+ // Load double value or continue if it's the hole NaN.
+ Node* element_k = LoadFixedDoubleArrayElement(
+ elements, index_var.value(), MachineType::Float64(), 0,
+ INTPTR_PARAMETERS, &continue_loop);
+
+ BranchIfFloat64IsNaN(element_k, &return_found, &continue_loop);
+ BIND(&continue_loop);
+ Increment(&index_var);
+ Goto(&nan_loop);
+ }
+
+ // Array.p.includes treats the hole as undefined.
+ if (variant == kIncludes) {
+ BIND(&hole_loop);
+ GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
+ &return_not_found);
+
+ // Check if the element is a double hole, but don't load it.
+ LoadFixedDoubleArrayElement(elements, index_var.value(),
+ MachineType::None(), 0, INTPTR_PARAMETERS,
+ &return_found);
+
+ Increment(&index_var);
+ Goto(&hole_loop);
+ }
+
+ BIND(&return_found);
+ if (variant == kIncludes) {
+ Return(TrueConstant());
+ } else {
+ Return(SmiTag(index_var.value()));
+ }
+
+ BIND(&return_not_found);
+ if (variant == kIncludes) {
+ Return(FalseConstant());
+ } else {
+ Return(NumberConstant(-1));
}
}
@@ -3375,69 +3466,95 @@ TF_BUILTIN(ArrayIncludes, ArrayIncludesIndexofAssembler) {
Generate(kIncludes);
}
-TF_BUILTIN(ArrayIndexOf, ArrayIncludesIndexofAssembler) { Generate(kIndexOf); }
+TF_BUILTIN(ArrayIncludesSmiOrObject, ArrayIncludesIndexofAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* elements = Parameter(Descriptor::kElements);
+ Node* search_element = Parameter(Descriptor::kSearchElement);
+ Node* array_length = Parameter(Descriptor::kLength);
+ Node* from_index = Parameter(Descriptor::kFromIndex);
-class ArrayPrototypeIterationAssembler : public CodeStubAssembler {
- public:
- explicit ArrayPrototypeIterationAssembler(compiler::CodeAssemblerState* state)
- : CodeStubAssembler(state) {}
+ GenerateSmiOrObject(kIncludes, context, elements, search_element,
+ array_length, from_index);
+}
+
+TF_BUILTIN(ArrayIncludesPackedDoubles, ArrayIncludesIndexofAssembler) {
+ Node* elements = Parameter(Descriptor::kElements);
+ Node* search_element = Parameter(Descriptor::kSearchElement);
+ Node* array_length = Parameter(Descriptor::kLength);
+ Node* from_index = Parameter(Descriptor::kFromIndex);
+
+ GeneratePackedDoubles(kIncludes, elements, search_element, array_length,
+ from_index);
+}
- protected:
- void Generate_ArrayPrototypeIterationMethod(TNode<Context> context,
- TNode<Object> receiver,
- IterationKind iteration_kind) {
- VARIABLE(var_array, MachineRepresentation::kTagged);
- VARIABLE(var_map, MachineRepresentation::kTagged);
- VARIABLE(var_type, MachineRepresentation::kWord32);
+TF_BUILTIN(ArrayIncludesHoleyDoubles, ArrayIncludesIndexofAssembler) {
+ Node* elements = Parameter(Descriptor::kElements);
+ Node* search_element = Parameter(Descriptor::kSearchElement);
+ Node* array_length = Parameter(Descriptor::kLength);
+ Node* from_index = Parameter(Descriptor::kFromIndex);
- Label if_isnotobject(this, Label::kDeferred);
- Label create_array_iterator(this);
+ GenerateHoleyDoubles(kIncludes, elements, search_element, array_length,
+ from_index);
+}
- GotoIf(TaggedIsSmi(receiver), &if_isnotobject);
+TF_BUILTIN(ArrayIndexOf, ArrayIncludesIndexofAssembler) { Generate(kIndexOf); }
- TNode<HeapObject> object_receiver = CAST(receiver);
- var_array.Bind(object_receiver);
- var_map.Bind(LoadMap(object_receiver));
- var_type.Bind(LoadMapInstanceType(var_map.value()));
- Branch(IsJSReceiverInstanceType(var_type.value()), &create_array_iterator,
- &if_isnotobject);
+TF_BUILTIN(ArrayIndexOfSmiOrObject, ArrayIncludesIndexofAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* elements = Parameter(Descriptor::kElements);
+ Node* search_element = Parameter(Descriptor::kSearchElement);
+ Node* array_length = Parameter(Descriptor::kLength);
+ Node* from_index = Parameter(Descriptor::kFromIndex);
- BIND(&if_isnotobject);
- {
- TNode<JSReceiver> result = ToObject(context, receiver);
- var_array.Bind(result);
- var_map.Bind(LoadMap(result));
- var_type.Bind(LoadMapInstanceType(var_map.value()));
- Goto(&create_array_iterator);
- }
+ GenerateSmiOrObject(kIndexOf, context, elements, search_element, array_length,
+ from_index);
+}
- BIND(&create_array_iterator);
- Return(CreateArrayIterator(var_array.value(), var_map.value(),
- var_type.value(), context, iteration_kind));
- }
-};
+TF_BUILTIN(ArrayIndexOfPackedDoubles, ArrayIncludesIndexofAssembler) {
+ Node* elements = Parameter(Descriptor::kElements);
+ Node* search_element = Parameter(Descriptor::kSearchElement);
+ Node* array_length = Parameter(Descriptor::kLength);
+ Node* from_index = Parameter(Descriptor::kFromIndex);
+
+ GeneratePackedDoubles(kIndexOf, elements, search_element, array_length,
+ from_index);
+}
+
+TF_BUILTIN(ArrayIndexOfHoleyDoubles, ArrayIncludesIndexofAssembler) {
+ Node* elements = Parameter(Descriptor::kElements);
+ Node* search_element = Parameter(Descriptor::kSearchElement);
+ Node* array_length = Parameter(Descriptor::kLength);
+ Node* from_index = Parameter(Descriptor::kFromIndex);
-TF_BUILTIN(ArrayPrototypeValues, ArrayPrototypeIterationAssembler) {
+ GenerateHoleyDoubles(kIndexOf, elements, search_element, array_length,
+ from_index);
+}
+
+// ES #sec-array.prototype.values
+TF_BUILTIN(ArrayPrototypeValues, CodeStubAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Generate_ArrayPrototypeIterationMethod(context, receiver,
- IterationKind::kValues);
+ Return(CreateArrayIterator(context, ToObject(context, receiver),
+ IterationKind::kValues));
}
-TF_BUILTIN(ArrayPrototypeEntries, ArrayPrototypeIterationAssembler) {
+// ES #sec-array.prototype.entries
+TF_BUILTIN(ArrayPrototypeEntries, CodeStubAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Generate_ArrayPrototypeIterationMethod(context, receiver,
- IterationKind::kEntries);
+ Return(CreateArrayIterator(context, ToObject(context, receiver),
+ IterationKind::kEntries));
}
-TF_BUILTIN(ArrayPrototypeKeys, ArrayPrototypeIterationAssembler) {
+// ES #sec-array.prototype.keys
+TF_BUILTIN(ArrayPrototypeKeys, CodeStubAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Generate_ArrayPrototypeIterationMethod(context, receiver,
- IterationKind::kKeys);
+ Return(CreateArrayIterator(context, ToObject(context, receiver),
+ IterationKind::kKeys));
}
+// ES #sec-%arrayiteratorprototype%.next
TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
const char* method_name = "Array Iterator.prototype.next";
@@ -3454,16 +3571,13 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
Label throw_bad_receiver(this, Label::kDeferred);
Label set_done(this);
- Label allocate_key_result(this);
Label allocate_entry_if_needed(this);
Label allocate_iterator_result(this);
- Label generic_values(this);
// If O does not have all of the internal slots of an Array Iterator Instance
// (22.1.5.3), throw a TypeError exception
GotoIf(TaggedIsSmi(iterator), &throw_bad_receiver);
- TNode<Int32T> instance_type = LoadInstanceType(iterator);
- GotoIf(IsArrayIteratorInstanceType(instance_type), &throw_bad_receiver);
+ GotoIfNot(IsJSArrayIterator(iterator), &throw_bad_receiver);
// Let a be O.[[IteratedObject]].
Node* array =
@@ -3471,19 +3585,23 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
// Let index be O.[[ArrayIteratorNextIndex]].
Node* index = LoadObjectField(iterator, JSArrayIterator::kNextIndexOffset);
- Node* orig_map =
- LoadObjectField(iterator, JSArrayIterator::kIteratedObjectMapOffset);
Node* array_map = LoadMap(array);
- Label if_isfastarray(this), if_isnotfastarray(this),
- if_isdetached(this, Label::kDeferred);
+ Label if_detached(this, Label::kDeferred);
+
+ Label if_typedarray(this), if_other(this, Label::kDeferred), if_array(this),
+ if_generic(this, Label::kDeferred);
- Branch(WordEqual(orig_map, array_map), &if_isfastarray, &if_isnotfastarray);
+ Node* array_type = LoadInstanceType(array);
+ GotoIf(InstanceTypeEqual(array_type, JS_ARRAY_TYPE), &if_array);
+ Branch(InstanceTypeEqual(array_type, JS_TYPED_ARRAY_TYPE), &if_typedarray,
+ &if_other);
- BIND(&if_isfastarray);
+ BIND(&if_array);
{
- CSA_ASSERT(
- this, InstanceTypeEqual(LoadMapInstanceType(array_map), JS_ARRAY_TYPE));
+ // We can only handle fast elements here.
+ Node* elements_kind = LoadMapElementsKind(array_map);
+ GotoIfNot(IsFastElementsKind(elements_kind), &if_other);
Node* length = LoadJSArrayLength(array);
@@ -3492,295 +3610,178 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
GotoIfNot(SmiBelow(index, length), &set_done);
+ var_value.Bind(index);
Node* one = SmiConstant(1);
StoreObjectFieldNoWriteBarrier(iterator, JSArrayIterator::kNextIndexOffset,
SmiAdd(index, one));
-
var_done.Bind(FalseConstant());
- Node* elements = LoadElements(array);
- static int32_t kInstanceType[] = {
- JS_FAST_ARRAY_KEY_ITERATOR_TYPE,
- JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE,
- JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE,
- JS_FAST_ARRAY_VALUE_ITERATOR_TYPE,
- JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE,
- JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE,
- JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE,
- };
-
- Label packed_object_values(this), holey_object_values(this),
- packed_double_values(this), holey_double_values(this);
- Label* kInstanceTypeHandlers[] = {
- &allocate_key_result, &packed_object_values, &holey_object_values,
- &packed_object_values, &holey_object_values, &packed_double_values,
- &holey_double_values, &packed_object_values, &holey_object_values,
- &packed_object_values, &holey_object_values, &packed_double_values,
- &holey_double_values};
-
- Switch(instance_type, &throw_bad_receiver, kInstanceType,
- kInstanceTypeHandlers, arraysize(kInstanceType));
+ GotoIf(Word32Equal(LoadAndUntagToWord32ObjectField(
+ iterator, JSArrayIterator::kKindOffset),
+ Int32Constant(static_cast<int>(IterationKind::kKeys))),
+ &allocate_iterator_result);
- BIND(&packed_object_values);
+ Node* elements = LoadElements(array);
+ Label if_packed(this), if_holey(this), if_packed_double(this),
+ if_holey_double(this), if_unknown_kind(this, Label::kDeferred);
+ int32_t kinds[] = {// Handled by if_packed.
+ PACKED_SMI_ELEMENTS, PACKED_ELEMENTS,
+ // Handled by if_holey.
+ HOLEY_SMI_ELEMENTS, HOLEY_ELEMENTS,
+ // Handled by if_packed_double.
+ PACKED_DOUBLE_ELEMENTS,
+ // Handled by if_holey_double.
+ HOLEY_DOUBLE_ELEMENTS};
+ Label* labels[] = {// PACKED_{SMI,}_ELEMENTS
+ &if_packed, &if_packed,
+ // HOLEY_{SMI,}_ELEMENTS
+ &if_holey, &if_holey,
+ // PACKED_DOUBLE_ELEMENTS
+ &if_packed_double,
+ // HOLEY_DOUBLE_ELEMENTS
+ &if_holey_double};
+ Switch(elements_kind, &if_unknown_kind, kinds, labels, arraysize(kinds));
+
+ BIND(&if_packed);
{
var_value.Bind(LoadFixedArrayElement(elements, index, 0, SMI_PARAMETERS));
Goto(&allocate_entry_if_needed);
}
- BIND(&packed_double_values);
+ BIND(&if_holey);
{
- Node* value = LoadFixedDoubleArrayElement(
- elements, index, MachineType::Float64(), 0, SMI_PARAMETERS);
- var_value.Bind(AllocateHeapNumberWithValue(value));
+ Node* element = LoadFixedArrayElement(elements, index, 0, SMI_PARAMETERS);
+ var_value.Bind(element);
+ GotoIfNot(WordEqual(element, TheHoleConstant()),
+ &allocate_entry_if_needed);
+ GotoIf(IsNoElementsProtectorCellInvalid(), &if_generic);
+ var_value.Bind(UndefinedConstant());
Goto(&allocate_entry_if_needed);
}
- BIND(&holey_object_values);
+ BIND(&if_packed_double);
{
- // Check the no_elements_protector cell, and take the slow path if it's
- // invalid.
- GotoIf(IsNoElementsProtectorCellInvalid(), &generic_values);
-
- var_value.Bind(UndefinedConstant());
- Node* value = LoadFixedArrayElement(elements, index, 0, SMI_PARAMETERS);
- GotoIf(WordEqual(value, TheHoleConstant()), &allocate_entry_if_needed);
- var_value.Bind(value);
+ Node* value = LoadFixedDoubleArrayElement(
+ elements, index, MachineType::Float64(), 0, SMI_PARAMETERS);
+ var_value.Bind(AllocateHeapNumberWithValue(value));
Goto(&allocate_entry_if_needed);
}
- BIND(&holey_double_values);
+ BIND(&if_holey_double);
{
- // Check the no_elements_protector cell, and take the slow path if it's
- // invalid.
- GotoIf(IsNoElementsProtectorCellInvalid(), &generic_values);
-
- var_value.Bind(UndefinedConstant());
+ Label if_hole(this, Label::kDeferred);
Node* value = LoadFixedDoubleArrayElement(
- elements, index, MachineType::Float64(), 0, SMI_PARAMETERS,
- &allocate_entry_if_needed);
+ elements, index, MachineType::Float64(), 0, SMI_PARAMETERS, &if_hole);
var_value.Bind(AllocateHeapNumberWithValue(value));
Goto(&allocate_entry_if_needed);
+ BIND(&if_hole);
+ GotoIf(IsNoElementsProtectorCellInvalid(), &if_generic);
+ var_value.Bind(UndefinedConstant());
+ Goto(&allocate_entry_if_needed);
}
+
+ BIND(&if_unknown_kind);
+ Unreachable();
}
- BIND(&if_isnotfastarray);
+ BIND(&if_other);
{
- Label if_istypedarray(this), if_isgeneric(this);
-
// If a is undefined, return CreateIterResultObject(undefined, true)
GotoIf(IsUndefined(array), &allocate_iterator_result);
- Node* array_type = LoadInstanceType(array);
- Branch(InstanceTypeEqual(array_type, JS_TYPED_ARRAY_TYPE), &if_istypedarray,
- &if_isgeneric);
-
- BIND(&if_isgeneric);
- {
- Label if_wasfastarray(this);
+ Node* length =
+ CallBuiltin(Builtins::kToLength, context,
+ GetProperty(context, array, factory()->length_string()));
- Node* length = nullptr;
- {
- VARIABLE(var_length, MachineRepresentation::kTagged);
- Label if_isarray(this), if_isnotarray(this), done(this);
- Branch(InstanceTypeEqual(array_type, JS_ARRAY_TYPE), &if_isarray,
- &if_isnotarray);
+ GotoIfNumberGreaterThanOrEqual(index, length, &set_done);
- BIND(&if_isarray);
- {
- var_length.Bind(LoadJSArrayLength(array));
+ var_value.Bind(index);
+ StoreObjectField(iterator, JSArrayIterator::kNextIndexOffset,
+ NumberInc(index));
+ var_done.Bind(FalseConstant());
- // Invalidate protector cell if needed
- Branch(WordNotEqual(orig_map, UndefinedConstant()), &if_wasfastarray,
- &done);
+ GotoIf(Word32Equal(LoadAndUntagToWord32ObjectField(
+ iterator, JSArrayIterator::kKindOffset),
+ Int32Constant(static_cast<int>(IterationKind::kKeys))),
+ &allocate_iterator_result);
+ Goto(&if_generic);
+ }
- BIND(&if_wasfastarray);
- {
- Label if_invalid(this, Label::kDeferred);
- // A fast array iterator transitioned to a slow iterator during
- // iteration. Invalidate fast_array_iteration_protector cell to
- // prevent potential deopt loops.
- StoreObjectFieldNoWriteBarrier(
- iterator, JSArrayIterator::kIteratedObjectMapOffset,
- UndefinedConstant());
- GotoIf(Uint32LessThanOrEqual(
- instance_type,
- Int32Constant(JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE)),
- &done);
-
- Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
- Node* cell = LoadRoot(Heap::kFastArrayIterationProtectorRootIndex);
- StoreObjectFieldNoWriteBarrier(cell, Cell::kValueOffset, invalid);
- Goto(&done);
- }
- }
+ BIND(&if_generic);
+ {
+ var_value.Bind(GetProperty(context, array, index));
+ Goto(&allocate_entry_if_needed);
+ }
- BIND(&if_isnotarray);
- {
- Node* length =
- GetProperty(context, array, factory()->length_string());
- var_length.Bind(ToLength_Inline(context, length));
- Goto(&done);
- }
+ BIND(&if_typedarray);
+ {
+ Node* buffer = LoadObjectField(array, JSTypedArray::kBufferOffset);
+ GotoIf(IsDetachedBuffer(buffer), &if_detached);
- BIND(&done);
- length = var_length.value();
- }
+ Node* length = LoadObjectField(array, JSTypedArray::kLengthOffset);
- GotoIfNumberGreaterThanOrEqual(index, length, &set_done);
+ CSA_ASSERT(this, TaggedIsSmi(length));
+ CSA_ASSERT(this, TaggedIsSmi(index));
- StoreObjectField(iterator, JSArrayIterator::kNextIndexOffset,
- NumberInc(index));
- var_done.Bind(FalseConstant());
+ GotoIfNot(SmiBelow(index, length), &set_done);
- Branch(
- Uint32LessThanOrEqual(
- instance_type, Int32Constant(JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE)),
- &allocate_key_result, &generic_values);
+ var_value.Bind(index);
+ Node* one = SmiConstant(1);
+ StoreObjectFieldNoWriteBarrier(iterator, JSArrayIterator::kNextIndexOffset,
+ SmiAdd(index, one));
+ var_done.Bind(FalseConstant());
- BIND(&generic_values);
- {
- var_value.Bind(GetProperty(context, array, index));
- Goto(&allocate_entry_if_needed);
- }
- }
+ GotoIf(Word32Equal(LoadAndUntagToWord32ObjectField(
+ iterator, JSArrayIterator::kKindOffset),
+ Int32Constant(static_cast<int>(IterationKind::kKeys))),
+ &allocate_iterator_result);
- BIND(&if_istypedarray);
- {
- Node* buffer = LoadObjectField(array, JSTypedArray::kBufferOffset);
- GotoIf(IsDetachedBuffer(buffer), &if_isdetached);
+ Node* elements_kind = LoadMapElementsKind(array_map);
+ Node* elements = LoadElements(array);
+ Node* base_ptr =
+ LoadObjectField(elements, FixedTypedArrayBase::kBasePointerOffset);
+ Node* external_ptr =
+ LoadObjectField(elements, FixedTypedArrayBase::kExternalPointerOffset,
+ MachineType::Pointer());
+ Node* data_ptr = IntPtrAdd(BitcastTaggedToWord(base_ptr), external_ptr);
+
+ Label if_unknown_type(this, Label::kDeferred);
+ int32_t elements_kinds[] = {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) TYPE##_ELEMENTS,
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ };
- Node* length = LoadObjectField(array, JSTypedArray::kLengthOffset);
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ Label if_##type##array(this);
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
- CSA_ASSERT(this, TaggedIsSmi(length));
- CSA_ASSERT(this, TaggedIsSmi(index));
+ Label* elements_kind_labels[] = {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) &if_##type##array,
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ };
+ STATIC_ASSERT(arraysize(elements_kinds) == arraysize(elements_kind_labels));
- GotoIfNot(SmiBelow(index, length), &set_done);
+ Switch(elements_kind, &if_unknown_type, elements_kinds,
+ elements_kind_labels, arraysize(elements_kinds));
- Node* one = SmiConstant(1);
- StoreObjectFieldNoWriteBarrier(
- iterator, JSArrayIterator::kNextIndexOffset, SmiAdd(index, one));
- var_done.Bind(FalseConstant());
+ BIND(&if_unknown_type);
+ Unreachable();
- Node* elements = LoadElements(array);
- Node* base_ptr =
- LoadObjectField(elements, FixedTypedArrayBase::kBasePointerOffset);
- Node* external_ptr =
- LoadObjectField(elements, FixedTypedArrayBase::kExternalPointerOffset,
- MachineType::Pointer());
- Node* data_ptr = IntPtrAdd(BitcastTaggedToWord(base_ptr), external_ptr);
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ BIND(&if_##type##array); \
+ { \
+ var_value.Bind(LoadFixedTypedArrayElementAsTagged( \
+ data_ptr, index, TYPE##_ELEMENTS, SMI_PARAMETERS)); \
+ Goto(&allocate_entry_if_needed); \
+ }
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
- static int32_t kInstanceType[] = {
- JS_TYPED_ARRAY_KEY_ITERATOR_TYPE,
- JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- JS_BIGUINT64_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- JS_BIGINT64_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE,
- JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE,
- JS_INT8_ARRAY_VALUE_ITERATOR_TYPE,
- JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE,
- JS_INT16_ARRAY_VALUE_ITERATOR_TYPE,
- JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE,
- JS_INT32_ARRAY_VALUE_ITERATOR_TYPE,
- JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE,
- JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE,
- JS_BIGUINT64_ARRAY_VALUE_ITERATOR_TYPE,
- JS_BIGINT64_ARRAY_VALUE_ITERATOR_TYPE,
- };
-
- Label uint8_values(this), int8_values(this), uint16_values(this),
- int16_values(this), uint32_values(this), int32_values(this),
- float32_values(this), float64_values(this), biguint64_values(this),
- bigint64_values(this);
- Label* kInstanceTypeHandlers[] = {
- &allocate_key_result, &uint8_values, &uint8_values,
- &int8_values, &uint16_values, &int16_values,
- &uint32_values, &int32_values, &float32_values,
- &float64_values, &biguint64_values, &bigint64_values,
- &uint8_values, &uint8_values, &int8_values,
- &uint16_values, &int16_values, &uint32_values,
- &int32_values, &float32_values, &float64_values,
- &biguint64_values, &bigint64_values,
- };
-
- var_done.Bind(FalseConstant());
- Switch(instance_type, &throw_bad_receiver, kInstanceType,
- kInstanceTypeHandlers, arraysize(kInstanceType));
-
- BIND(&uint8_values);
- {
- var_value.Bind(LoadFixedTypedArrayElementAsTagged(
- data_ptr, index, UINT8_ELEMENTS, SMI_PARAMETERS));
- Goto(&allocate_entry_if_needed);
- }
- BIND(&int8_values);
- {
- var_value.Bind(LoadFixedTypedArrayElementAsTagged(
- data_ptr, index, INT8_ELEMENTS, SMI_PARAMETERS));
- Goto(&allocate_entry_if_needed);
- }
- BIND(&uint16_values);
- {
- var_value.Bind(LoadFixedTypedArrayElementAsTagged(
- data_ptr, index, UINT16_ELEMENTS, SMI_PARAMETERS));
- Goto(&allocate_entry_if_needed);
- }
- BIND(&int16_values);
- {
- var_value.Bind(LoadFixedTypedArrayElementAsTagged(
- data_ptr, index, INT16_ELEMENTS, SMI_PARAMETERS));
- Goto(&allocate_entry_if_needed);
- }
- BIND(&uint32_values);
- {
- var_value.Bind(LoadFixedTypedArrayElementAsTagged(
- data_ptr, index, UINT32_ELEMENTS, SMI_PARAMETERS));
- Goto(&allocate_entry_if_needed);
- }
- BIND(&int32_values);
- {
- var_value.Bind(LoadFixedTypedArrayElementAsTagged(
- data_ptr, index, INT32_ELEMENTS, SMI_PARAMETERS));
- Goto(&allocate_entry_if_needed);
- }
- BIND(&float32_values);
- {
- var_value.Bind(LoadFixedTypedArrayElementAsTagged(
- data_ptr, index, FLOAT32_ELEMENTS, SMI_PARAMETERS));
- Goto(&allocate_entry_if_needed);
- }
- BIND(&float64_values);
- {
- var_value.Bind(LoadFixedTypedArrayElementAsTagged(
- data_ptr, index, FLOAT64_ELEMENTS, SMI_PARAMETERS));
- Goto(&allocate_entry_if_needed);
- }
- BIND(&biguint64_values);
- {
- var_value.Bind(LoadFixedTypedArrayElementAsTagged(
- data_ptr, index, BIGUINT64_ELEMENTS, SMI_PARAMETERS));
- Goto(&allocate_entry_if_needed);
- }
- BIND(&bigint64_values);
- {
- var_value.Bind(LoadFixedTypedArrayElementAsTagged(
- data_ptr, index, BIGINT64_ELEMENTS, SMI_PARAMETERS));
- Goto(&allocate_entry_if_needed);
- }
- }
+ BIND(&if_detached);
+ ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);
}
BIND(&set_done);
@@ -3790,17 +3791,11 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
Goto(&allocate_iterator_result);
}
- BIND(&allocate_key_result);
- {
- var_value.Bind(index);
- var_done.Bind(FalseConstant());
- Goto(&allocate_iterator_result);
- }
-
BIND(&allocate_entry_if_needed);
{
- GotoIf(Uint32LessThan(Int32Constant(LAST_ARRAY_KEY_VALUE_ITERATOR_TYPE),
- instance_type),
+ GotoIf(Word32Equal(LoadAndUntagToWord32ObjectField(
+ iterator, JSArrayIterator::kKindOffset),
+ Int32Constant(static_cast<int>(IterationKind::kValues))),
&allocate_iterator_result);
Node* elements = AllocateFixedArray(PACKED_ELEMENTS, IntPtrConstant(2));
@@ -3845,9 +3840,261 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
StringConstant(method_name), iterator);
}
+}
+
+namespace {
+
+class ArrayFlattenAssembler : public CodeStubAssembler {
+ public:
+ explicit ArrayFlattenAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ // https://tc39.github.io/proposal-flatMap/#sec-FlattenIntoArray
+ Node* FlattenIntoArray(Node* context, Node* target, Node* source,
+ Node* source_length, Node* start, Node* depth,
+ Node* mapper_function = nullptr,
+ Node* this_arg = nullptr) {
+ CSA_ASSERT(this, IsJSReceiver(target));
+ CSA_ASSERT(this, IsJSReceiver(source));
+ CSA_ASSERT(this, IsNumberPositive(source_length));
+ CSA_ASSERT(this, IsNumberPositive(start));
+ CSA_ASSERT(this, IsNumber(depth));
+
+ // 1. Let targetIndex be start.
+ VARIABLE(var_target_index, MachineRepresentation::kTagged, start);
+
+ // 2. Let sourceIndex be 0.
+ VARIABLE(var_source_index, MachineRepresentation::kTagged, SmiConstant(0));
+
+ // 3. Repeat...
+ Label loop(this, {&var_target_index, &var_source_index}), done_loop(this);
+ Goto(&loop);
+ BIND(&loop);
+ {
+ Node* const source_index = var_source_index.value();
+ Node* const target_index = var_target_index.value();
+
+ // ...while sourceIndex < sourceLen
+ GotoIfNumberGreaterThanOrEqual(source_index, source_length, &done_loop);
+
+ // a. Let P be ! ToString(sourceIndex).
+ // b. Let exists be ? HasProperty(source, P).
+ CSA_ASSERT(this, SmiGreaterThanOrEqual(source_index, SmiConstant(0)));
+ Node* const exists =
+ HasProperty(source, source_index, context, kHasProperty);
+
+ // c. If exists is true, then
+ Label next(this);
+ GotoIfNot(IsTrue(exists), &next);
+ {
+ // i. Let element be ? Get(source, P).
+ Node* element = GetProperty(context, source, source_index);
+
+ // ii. If mapperFunction is present, then
+ if (mapper_function != nullptr) {
+ CSA_ASSERT(this, Word32Or(IsUndefined(mapper_function),
+ IsCallable(mapper_function)));
+ DCHECK_NOT_NULL(this_arg);
+
+ // 1. Set element to ? Call(mapperFunction, thisArg , Ā« element,
+ // sourceIndex, source Ā»).
+ element =
+ CallJS(CodeFactory::Call(isolate()), context, mapper_function,
+ this_arg, element, source_index, source);
+ }
+
+ // iii. Let shouldFlatten be false.
+ Label if_flatten_array(this), if_flatten_proxy(this, Label::kDeferred),
+ if_noflatten(this);
+ // iv. If depth > 0, then
+ GotoIfNumberGreaterThanOrEqual(SmiConstant(0), depth, &if_noflatten);
+ // 1. Set shouldFlatten to ? IsArray(element).
+ GotoIf(TaggedIsSmi(element), &if_noflatten);
+ GotoIf(IsJSArray(element), &if_flatten_array);
+ GotoIfNot(IsJSProxy(element), &if_noflatten);
+ Branch(IsTrue(CallRuntime(Runtime::kArrayIsArray, context, element)),
+ &if_flatten_proxy, &if_noflatten);
+
+ BIND(&if_flatten_array);
+ {
+ CSA_ASSERT(this, IsJSArray(element));
+
+ // 1. Let elementLen be ? ToLength(? Get(element, "length")).
+ Node* const element_length =
+ LoadObjectField(element, JSArray::kLengthOffset);
+
+ // 2. Set targetIndex to ? FlattenIntoArray(target, element,
+ // elementLen, targetIndex,
+ // depth - 1).
+ var_target_index.Bind(
+ CallBuiltin(Builtins::kFlattenIntoArray, context, target, element,
+ element_length, target_index, NumberDec(depth)));
+ Goto(&next);
+ }
+
+ BIND(&if_flatten_proxy);
+ {
+ CSA_ASSERT(this, IsJSProxy(element));
+
+ // 1. Let elementLen be ? ToLength(? Get(element, "length")).
+ Node* const element_length = ToLength_Inline(
+ context, GetProperty(context, element, LengthStringConstant()));
+
+ // 2. Set targetIndex to ? FlattenIntoArray(target, element,
+ // elementLen, targetIndex,
+ // depth - 1).
+ var_target_index.Bind(
+ CallBuiltin(Builtins::kFlattenIntoArray, context, target, element,
+ element_length, target_index, NumberDec(depth)));
+ Goto(&next);
+ }
+
+ BIND(&if_noflatten);
+ {
+ // 1. If targetIndex >= 2^53-1, throw a TypeError exception.
+ Label throw_error(this, Label::kDeferred);
+ GotoIfNumberGreaterThanOrEqual(
+ target_index, NumberConstant(kMaxSafeInteger), &throw_error);
+
+ // 2. Perform ? CreateDataPropertyOrThrow(target,
+ // ! ToString(targetIndex),
+ // element).
+ CallRuntime(Runtime::kCreateDataProperty, context, target,
+ target_index, element);
+
+ // 3. Increase targetIndex by 1.
+ var_target_index.Bind(NumberInc(target_index));
+ Goto(&next);
+
+ BIND(&throw_error);
+ ThrowTypeError(context, MessageTemplate::kFlattenPastSafeLength,
+ source_length, target_index);
+ }
+ }
+ BIND(&next);
+
+ // d. Increase sourceIndex by 1.
+ var_source_index.Bind(NumberInc(source_index));
+ Goto(&loop);
+ }
+
+ BIND(&done_loop);
+ return var_target_index.value();
+ }
+};
+
+} // namespace
+
+// https://tc39.github.io/proposal-flatMap/#sec-FlattenIntoArray
+TF_BUILTIN(FlattenIntoArray, ArrayFlattenAssembler) {
+ Node* const context = Parameter(Descriptor::kContext);
+ Node* const target = Parameter(Descriptor::kTarget);
+ Node* const source = Parameter(Descriptor::kSource);
+ Node* const source_length = Parameter(Descriptor::kSourceLength);
+ Node* const start = Parameter(Descriptor::kStart);
+ Node* const depth = Parameter(Descriptor::kDepth);
+
+ Return(
+ FlattenIntoArray(context, target, source, source_length, start, depth));
+}
+
+// https://tc39.github.io/proposal-flatMap/#sec-FlattenIntoArray
+TF_BUILTIN(FlatMapIntoArray, ArrayFlattenAssembler) {
+ Node* const context = Parameter(Descriptor::kContext);
+ Node* const target = Parameter(Descriptor::kTarget);
+ Node* const source = Parameter(Descriptor::kSource);
+ Node* const source_length = Parameter(Descriptor::kSourceLength);
+ Node* const start = Parameter(Descriptor::kStart);
+ Node* const depth = Parameter(Descriptor::kDepth);
+ Node* const mapper_function = Parameter(Descriptor::kMapperFunction);
+ Node* const this_arg = Parameter(Descriptor::kThisArg);
+
+ Return(FlattenIntoArray(context, target, source, source_length, start, depth,
+ mapper_function, this_arg));
+}
+
+// https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flatten
+TF_BUILTIN(ArrayPrototypeFlatten, CodeStubAssembler) {
+ Node* const argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+ Node* const context = Parameter(BuiltinDescriptor::kContext);
+ Node* const receiver = args.GetReceiver();
+ Node* const depth = args.GetOptionalArgumentValue(0);
+
+ // 1. Let O be ? ToObject(this value).
+ Node* const o = ToObject(context, receiver);
+
+ // 2. Let sourceLen be ? ToLength(? Get(O, "length")).
+ Node* const source_length =
+ ToLength_Inline(context, GetProperty(context, o, LengthStringConstant()));
+
+ // 3. Let depthNum be 1.
+ VARIABLE(var_depth_num, MachineRepresentation::kTagged, SmiConstant(1));
+
+ // 4. If depth is not undefined, then
+ Label done(this);
+ GotoIf(IsUndefined(depth), &done);
+ {
+ // a. Set depthNum to ? ToInteger(depth).
+ var_depth_num.Bind(ToInteger_Inline(context, depth));
+ Goto(&done);
+ }
+ BIND(&done);
+
+ // 5. Let A be ? ArraySpeciesCreate(O, 0).
+ Node* const constructor =
+ CallRuntime(Runtime::kArraySpeciesConstructor, context, o);
+ Node* const a = ConstructJS(CodeFactory::Construct(isolate()), context,
+ constructor, SmiConstant(0));
+
+ // 6. Perform ? FlattenIntoArray(A, O, sourceLen, 0, depthNum).
+ CallBuiltin(Builtins::kFlattenIntoArray, context, a, o, source_length,
+ SmiConstant(0), var_depth_num.value());
+
+ // 7. Return A.
+ args.PopAndReturn(a);
+}
+
+// https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flatMap
+TF_BUILTIN(ArrayPrototypeFlatMap, CodeStubAssembler) {
+ Node* const argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+ Node* const context = Parameter(BuiltinDescriptor::kContext);
+ Node* const receiver = args.GetReceiver();
+ Node* const mapper_function = args.GetOptionalArgumentValue(0);
+
+ // 1. Let O be ? ToObject(this value).
+ Node* const o = ToObject(context, receiver);
+
+ // 2. Let sourceLen be ? ToLength(? Get(O, "length")).
+ Node* const source_length =
+ ToLength_Inline(context, GetProperty(context, o, LengthStringConstant()));
+
+ // 3. If IsCallable(mapperFunction) is false, throw a TypeError exception.
+ Label if_not_callable(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(mapper_function), &if_not_callable);
+ GotoIfNot(IsCallable(mapper_function), &if_not_callable);
+
+ // 4. If thisArg is present, let T be thisArg; else let T be undefined.
+ Node* const t = args.GetOptionalArgumentValue(1);
+
+ // 5. Let A be ? ArraySpeciesCreate(O, 0).
+ Node* const constructor =
+ CallRuntime(Runtime::kArraySpeciesConstructor, context, o);
+ Node* const a = ConstructJS(CodeFactory::Construct(isolate()), context,
+ constructor, SmiConstant(0));
+
+ // 6. Perform ? FlattenIntoArray(A, O, sourceLen, 0, 1, mapperFunction, T).
+ CallBuiltin(Builtins::kFlatMapIntoArray, context, a, o, source_length,
+ SmiConstant(0), SmiConstant(1), mapper_function, t);
+
+ // 7. Return A.
+ args.PopAndReturn(a);
- BIND(&if_isdetached);
- ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);
+ BIND(&if_not_callable);
+ { ThrowTypeError(context, MessageTemplate::kMapperFunctionNonCallable); }
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-array-gen.h b/deps/v8/src/builtins/builtins-array-gen.h
index 67ac51480c..aabd4bab6e 100644
--- a/deps/v8/src/builtins/builtins-array-gen.h
+++ b/deps/v8/src/builtins/builtins-array-gen.h
@@ -73,8 +73,8 @@ class ArrayBuiltinsAssembler : public CodeStubAssembler {
TNode<Object> receiver() { return receiver_; }
Node* new_target() { return new_target_; }
TNode<IntPtrT> argc() { return argc_; }
- Node* o() { return o_; }
- Node* len() { return len_; }
+ TNode<JSReceiver> o() { return o_; }
+ TNode<Number> len() { return len_; }
Node* callbackfn() { return callbackfn_; }
Node* this_arg() { return this_arg_; }
Node* k() { return k_.value(); }
@@ -95,7 +95,8 @@ class ArrayBuiltinsAssembler : public CodeStubAssembler {
ForEachDirection direction = ForEachDirection::kForward);
void InitIteratingArrayBuiltinLoopContinuation(
TNode<Context> context, TNode<Object> receiver, Node* callbackfn,
- Node* this_arg, Node* a, Node* o, Node* initial_k, Node* len, Node* to);
+ Node* this_arg, Node* a, TNode<JSReceiver> o, Node* initial_k,
+ TNode<Number> len, Node* to);
void GenerateIteratingTypedArrayBuiltinBody(
const char* name, const BuiltinResultGenerator& generator,
@@ -112,13 +113,15 @@ class ArrayBuiltinsAssembler : public CodeStubAssembler {
void VisitAllTypedArrayElements(Node* array_buffer,
const CallResultProcessor& processor,
- Label* detached, ForEachDirection direction);
+ Label* detached, ForEachDirection direction,
+ TNode<JSTypedArray> typed_array);
void VisitAllFastElementsOneKind(ElementsKind kind,
const CallResultProcessor& processor,
Label* array_changed, ParameterMode mode,
ForEachDirection direction,
- MissingPropertyMode missing_property_mode);
+ MissingPropertyMode missing_property_mode,
+ TNode<Smi> length);
void HandleFastElements(const CallResultProcessor& processor,
const PostLoopAction& action, Label* slow,
@@ -131,12 +134,12 @@ class ArrayBuiltinsAssembler : public CodeStubAssembler {
void GenerateArraySpeciesCreate();
// Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
- void GenerateArraySpeciesCreate(SloppyTNode<Smi> len);
+ void GenerateArraySpeciesCreate(TNode<Number> len);
Node* callbackfn_ = nullptr;
- Node* o_ = nullptr;
+ TNode<JSReceiver> o_;
Node* this_arg_ = nullptr;
- Node* len_ = nullptr;
+ TNode<Number> len_;
TNode<Context> context_;
TNode<Object> receiver_;
Node* new_target_ = nullptr;
diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc
index f400e824f0..f57c7d39ca 100644
--- a/deps/v8/src/builtins/builtins-array.cc
+++ b/deps/v8/src/builtins/builtins-array.cc
@@ -78,7 +78,7 @@ inline bool HasOnlySimpleElements(Isolate* isolate, JSReceiver* receiver) {
}
// Returns |false| if not applicable.
-MUST_USE_RESULT
+V8_WARN_UNUSED_RESULT
inline bool EnsureJSArrayWithWritableFastElements(Isolate* isolate,
Handle<Object> receiver,
BuiltinArguments* args,
@@ -128,9 +128,8 @@ inline bool EnsureJSArrayWithWritableFastElements(Isolate* isolate,
return true;
}
-MUST_USE_RESULT static Object* CallJsIntrinsic(Isolate* isolate,
- Handle<JSFunction> function,
- BuiltinArguments args) {
+V8_WARN_UNUSED_RESULT static Object* CallJsIntrinsic(
+ Isolate* isolate, Handle<JSFunction> function, BuiltinArguments args) {
HandleScope handleScope(isolate);
int argc = args.length() - 1;
ScopedVector<Handle<Object>> argv(argc);
@@ -240,75 +239,6 @@ BUILTIN(ArrayUnshift) {
return Smi::FromInt(new_length);
}
-BUILTIN(ArraySlice) {
- HandleScope scope(isolate);
- Handle<Object> receiver = args.receiver();
- int len = -1;
- int relative_start = 0;
- int relative_end = 0;
-
- if (receiver->IsJSArray()) {
- DisallowHeapAllocation no_gc;
- JSArray* array = JSArray::cast(*receiver);
- if (V8_UNLIKELY(!array->HasFastElements() ||
- !IsJSArrayFastElementMovingAllowed(isolate, array) ||
- !isolate->IsSpeciesLookupChainIntact() ||
- // If this is a subclass of Array, then call out to JS
- !array->HasArrayPrototype(isolate))) {
- AllowHeapAllocation allow_allocation;
- return CallJsIntrinsic(isolate, isolate->array_slice(), args);
- }
- len = Smi::ToInt(array->length());
- } else if (receiver->IsJSObject() &&
- JSSloppyArgumentsObject::GetSloppyArgumentsLength(
- isolate, Handle<JSObject>::cast(receiver), &len)) {
- // Array.prototype.slice.call(arguments, ...) is quite a common idiom
- // (notably more than 50% of invocations in Web apps).
- // Treat it in C++ as well.
- DCHECK(JSObject::cast(*receiver)->HasFastElements() ||
- JSObject::cast(*receiver)->HasFastArgumentsElements());
- } else {
- AllowHeapAllocation allow_allocation;
- return CallJsIntrinsic(isolate, isolate->array_slice(), args);
- }
- DCHECK_LE(0, len);
- int argument_count = args.length() - 1;
- // Note carefully chosen defaults---if argument is missing,
- // it's undefined which gets converted to 0 for relative_start
- // and to len for relative_end.
- relative_start = 0;
- relative_end = len;
- if (argument_count > 0) {
- DisallowHeapAllocation no_gc;
- if (!ClampedToInteger(isolate, args[1], &relative_start)) {
- AllowHeapAllocation allow_allocation;
- return CallJsIntrinsic(isolate, isolate->array_slice(), args);
- }
- if (argument_count > 1) {
- Object* end_arg = args[2];
- // slice handles the end_arg specially
- if (end_arg->IsUndefined(isolate)) {
- relative_end = len;
- } else if (!ClampedToInteger(isolate, end_arg, &relative_end)) {
- AllowHeapAllocation allow_allocation;
- return CallJsIntrinsic(isolate, isolate->array_slice(), args);
- }
- }
- }
-
- // ECMAScript 232, 3rd Edition, Section 15.4.4.10, step 6.
- uint32_t actual_start = (relative_start < 0) ? Max(len + relative_start, 0)
- : Min(relative_start, len);
-
- // ECMAScript 232, 3rd Edition, Section 15.4.4.10, step 8.
- uint32_t actual_end =
- (relative_end < 0) ? Max(len + relative_end, 0) : Min(relative_end, len);
-
- Handle<JSObject> object = Handle<JSObject>::cast(receiver);
- ElementsAccessor* accessor = object->GetElementsAccessor();
- return *accessor->Slice(object, actual_start, actual_end);
-}
-
BUILTIN(ArraySplice) {
HandleScope scope(isolate);
Handle<Object> receiver = args.receiver();
@@ -317,7 +247,7 @@ BUILTIN(ArraySplice) {
// If this is a subclass of Array, then call out to JS.
!Handle<JSArray>::cast(receiver)->HasArrayPrototype(isolate) ||
// If anything with @@species has been messed with, call out to JS.
- !isolate->IsSpeciesLookupChainIntact())) {
+ !isolate->IsArraySpeciesLookupChainIntact())) {
return CallJsIntrinsic(isolate, isolate->array_splice(), args);
}
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
@@ -403,7 +333,7 @@ class ArrayConcatVisitor {
~ArrayConcatVisitor() { clear_storage(); }
- MUST_USE_RESULT bool visit(uint32_t i, Handle<Object> elm) {
+ V8_WARN_UNUSED_RESULT bool visit(uint32_t i, Handle<Object> elm) {
uint32_t index = index_offset_ + i;
if (i >= JSObject::kMaxElementCount - index_offset_) {
@@ -481,7 +411,7 @@ class ArrayConcatVisitor {
return array;
}
- MUST_USE_RESULT MaybeHandle<JSReceiver> ToJSReceiver() {
+ V8_WARN_UNUSED_RESULT MaybeHandle<JSReceiver> ToJSReceiver() {
DCHECK(!is_fixed_array());
Handle<JSReceiver> result = Handle<JSReceiver>::cast(storage_);
Handle<Object> length =
@@ -1187,7 +1117,7 @@ BUILTIN(ArrayConcat) {
// Avoid a real species read to avoid extra lookups to the array constructor
if (V8_LIKELY(receiver->IsJSArray() &&
Handle<JSArray>::cast(receiver)->HasArrayPrototype(isolate) &&
- isolate->IsSpeciesLookupChainIntact())) {
+ isolate->IsArraySpeciesLookupChainIntact())) {
if (Fast_ArrayConcat(isolate, &args).ToHandle(&result_array)) {
return *result_array;
}
diff --git a/deps/v8/src/builtins/builtins-arraybuffer.cc b/deps/v8/src/builtins/builtins-arraybuffer.cc
index 4f9078b4b6..9c77a0047d 100644
--- a/deps/v8/src/builtins/builtins-arraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-arraybuffer.cc
@@ -23,17 +23,6 @@ namespace internal {
// -----------------------------------------------------------------------------
// ES6 section 21.1 ArrayBuffer Objects
-// ES6 section 24.1.2.1 ArrayBuffer ( length ) for the [[Call]] case.
-BUILTIN(ArrayBufferConstructor) {
- HandleScope scope(isolate);
- Handle<JSFunction> target = args.target();
- DCHECK(*target == target->native_context()->array_buffer_fun() ||
- *target == target->native_context()->shared_array_buffer_fun());
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
- handle(target->shared()->name(), isolate)));
-}
-
namespace {
Object* ConstructBuffer(Isolate* isolate, Handle<JSFunction> target,
@@ -62,24 +51,30 @@ Object* ConstructBuffer(Isolate* isolate, Handle<JSFunction> target,
} // namespace
-// ES6 section 24.1.2.1 ArrayBuffer ( length ) for the [[Construct]] case.
-BUILTIN(ArrayBufferConstructor_ConstructStub) {
+// ES #sec-arraybuffer-constructor
+BUILTIN(ArrayBufferConstructor) {
HandleScope scope(isolate);
Handle<JSFunction> target = args.target();
- Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
- Handle<Object> length = args.atOrUndefined(isolate, 1);
DCHECK(*target == target->native_context()->array_buffer_fun() ||
*target == target->native_context()->shared_array_buffer_fun());
-
- Handle<Object> number_length;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_length,
- Object::ToInteger(isolate, length));
- if (number_length->Number() < 0.0) {
+ if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
+ isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
+ handle(target->shared()->Name(), isolate)));
+ } else { // [[Construct]]
+ Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+ Handle<Object> length = args.atOrUndefined(isolate, 1);
+
+ Handle<Object> number_length;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_length,
+ Object::ToInteger(isolate, length));
+ if (number_length->Number() < 0.0) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
+ }
+
+ return ConstructBuffer(isolate, target, new_target, number_length, true);
}
-
- return ConstructBuffer(isolate, target, new_target, number_length, true);
}
// This is a helper to construct an ArrayBuffer with uinitialized memory.
diff --git a/deps/v8/src/builtins/builtins-async-gen.cc b/deps/v8/src/builtins/builtins-async-gen.cc
index 7958afba00..073c96a2e0 100644
--- a/deps/v8/src/builtins/builtins-async-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-gen.cc
@@ -5,7 +5,7 @@
#include "src/builtins/builtins-async-gen.h"
#include "src/builtins/builtins-utils-gen.h"
-#include "src/factory-inl.h"
+#include "src/heap/factory-inl.h"
#include "src/objects/shared-function-info.h"
namespace v8 {
@@ -44,7 +44,7 @@ void AsyncBuiltinsAssembler::Await(Node* context, Node* generator, Node* value,
// When debugging, we need to link from the {generator} to the
// {outer_promise} of the async function/generator.
Label done(this);
- GotoIfNot(IsDebugActive(), &done);
+ GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &done);
CallRuntime(Runtime::kSetProperty, native_context, generator,
LoadRoot(Heap::kgenerator_outer_promise_symbolRootIndex),
outer_promise, SmiConstant(LanguageMode::kStrict));
diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc
index b78747aaa9..290252da62 100644
--- a/deps/v8/src/builtins/builtins-async-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc
@@ -29,13 +29,13 @@ class AsyncGeneratorBuiltinsAssembler : public AsyncBuiltinsAssembler {
: AsyncBuiltinsAssembler(state) {}
inline Node* TaggedIsAsyncGenerator(Node* tagged_object) {
- Node* if_notsmi = TaggedIsNotSmi(tagged_object);
- return Select(if_notsmi,
- [=]() {
- return HasInstanceType(tagged_object,
- JS_ASYNC_GENERATOR_OBJECT_TYPE);
- },
- [=]() { return if_notsmi; }, MachineRepresentation::kBit);
+ TNode<BoolT> if_notsmi = TaggedIsNotSmi(tagged_object);
+ return Select<BoolT>(if_notsmi,
+ [=] {
+ return HasInstanceType(
+ tagged_object, JS_ASYNC_GENERATOR_OBJECT_TYPE);
+ },
+ [=] { return if_notsmi; });
}
inline Node* LoadGeneratorState(Node* const generator) {
return LoadObjectField(generator, JSGeneratorObject::kContinuationOffset);
@@ -518,11 +518,34 @@ TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) {
done);
}
- // Perform Call(promiseCapability.[[Resolve]], undefined, Ā«iteratorResultĀ»).
- CallBuiltin(Builtins::kResolvePromise, context, promise, iter_result);
+ // We know that {iter_result} itself doesn't have any "then" property and
+ // we also know that the [[Prototype]] of {iter_result} is the intrinsic
+ // %ObjectPrototype%. So we can skip the [[Resolve]] logic here completely
+ // and directly call into the FulfillPromise operation if we can prove
+ // that the %ObjectPrototype% also doesn't have any "then" property. This
+ // is guarded by the Promise#then protector.
+ Label if_fast(this), if_slow(this, Label::kDeferred), return_promise(this);
+ GotoIfForceSlowPath(&if_slow);
+ Branch(IsPromiseThenProtectorCellInvalid(), &if_slow, &if_fast);
+
+ BIND(&if_fast);
+ {
+ // Skip the "then" on {iter_result} and directly fulfill the {promise}
+ // with the {iter_result}.
+ CallBuiltin(Builtins::kFulfillPromise, context, promise, iter_result);
+ Goto(&return_promise);
+ }
+
+ BIND(&if_slow);
+ {
+ // Perform Call(promiseCapability.[[Resolve]], undefined, Ā«iteratorResultĀ»).
+ CallBuiltin(Builtins::kResolvePromise, context, promise, iter_result);
+ Goto(&return_promise);
+ }
// Per spec, AsyncGeneratorResolve() returns undefined. However, for the
// benefit of %TraceExit(), return the Promise.
+ BIND(&return_promise);
Return(promise);
}
@@ -548,11 +571,42 @@ TF_BUILTIN(AsyncGeneratorYield, AsyncGeneratorBuiltinsAssembler) {
Node* const request = LoadFirstAsyncGeneratorRequestFromQueue(generator);
Node* const outer_promise = LoadPromiseFromAsyncGeneratorRequest(request);
+ // Mark the generator as "awaiting".
SetGeneratorAwaiting(generator);
- Await(context, generator, value, outer_promise,
- Builtins::kAsyncGeneratorYieldFulfill,
- Builtins::kAsyncGeneratorAwaitReject, is_caught);
- Return(UndefinedConstant());
+
+ // We can skip the creation of a temporary promise and the whole
+ // [[Resolve]] logic if we already know that the {value} that's
+ // being yielded is a primitive, as in that case we would immediately
+ // fulfill the temporary promise anyways and schedule a fulfill
+ // reaction job. This gives a nice performance boost for async
+ // generators that yield only primitives, e.g. numbers or strings.
+ Label if_primitive(this), if_generic(this);
+ GotoIfForceSlowPath(&if_generic);
+ GotoIf(IsPromiseHookEnabledOrDebugIsActive(), &if_generic);
+ GotoIf(TaggedIsSmi(value), &if_primitive);
+ Branch(IsJSReceiver(value), &if_generic, &if_primitive);
+
+ BIND(&if_generic);
+ {
+ Await(context, generator, value, outer_promise,
+ Builtins::kAsyncGeneratorYieldFulfill,
+ Builtins::kAsyncGeneratorAwaitReject, is_caught);
+ Return(UndefinedConstant());
+ }
+
+ BIND(&if_primitive);
+ {
+ // For primitive {value}s we can skip the allocation of the temporary
+ // promise and the resolution of that, and directly allocate the fulfill
+ // reaction job.
+ Node* const microtask = AllocatePromiseReactionJobTask(
+ Heap::kPromiseFulfillReactionJobTaskMapRootIndex, context, value,
+ HeapConstant(Builtins::CallableFor(
+ isolate(), Builtins::kAsyncGeneratorYieldFulfill)
+ .code()),
+ generator);
+ TailCallBuiltin(Builtins::kEnqueueMicrotask, context, microtask);
+ }
}
TF_BUILTIN(AsyncGeneratorYieldFulfill, AsyncGeneratorBuiltinsAssembler) {
diff --git a/deps/v8/src/builtins/builtins-bigint.cc b/deps/v8/src/builtins/builtins-bigint.cc
index fdbd3937d4..6c04c9dcb7 100644
--- a/deps/v8/src/builtins/builtins-bigint.cc
+++ b/deps/v8/src/builtins/builtins-bigint.cc
@@ -13,29 +13,28 @@ namespace internal {
BUILTIN(BigIntConstructor) {
HandleScope scope(isolate);
- Handle<Object> value = args.atOrUndefined(isolate, 1);
-
- if (value->IsJSReceiver()) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, value,
- JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(value),
- ToPrimitiveHint::kNumber));
- }
-
- if (value->IsNumber()) {
- RETURN_RESULT_OR_FAILURE(isolate, BigInt::FromNumber(isolate, value));
- } else {
- RETURN_RESULT_OR_FAILURE(isolate, BigInt::FromObject(isolate, value));
+ if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
+ Handle<Object> value = args.atOrUndefined(isolate, 1);
+
+ if (value->IsJSReceiver()) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, value,
+ JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(value),
+ ToPrimitiveHint::kNumber));
+ }
+
+ if (value->IsNumber()) {
+ RETURN_RESULT_OR_FAILURE(isolate, BigInt::FromNumber(isolate, value));
+ } else {
+ RETURN_RESULT_OR_FAILURE(isolate, BigInt::FromObject(isolate, value));
+ }
+ } else { // [[Construct]]
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kNotConstructor,
+ isolate->factory()->BigInt_string()));
}
}
-BUILTIN(BigIntConstructor_ConstructStub) {
- HandleScope scope(isolate);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kNotConstructor,
- isolate->factory()->BigInt_string()));
-}
-
BUILTIN(BigIntAsUintN) {
HandleScope scope(isolate);
Handle<Object> bits_obj = args.atOrUndefined(isolate, 1);
diff --git a/deps/v8/src/builtins/builtins-boolean.cc b/deps/v8/src/builtins/builtins-boolean.cc
index dabd803dc1..5dc42d506f 100644
--- a/deps/v8/src/builtins/builtins-boolean.cc
+++ b/deps/v8/src/builtins/builtins-boolean.cc
@@ -11,28 +11,27 @@ namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
-// ES6 section 19.3 Boolean Objects
+// ES #sec-boolean-objects
-// ES6 section 19.3.1.1 Boolean ( value ) for the [[Call]] case.
+// ES #sec-boolean-constructor
BUILTIN(BooleanConstructor) {
HandleScope scope(isolate);
- Handle<Object> value = args.atOrUndefined(isolate, 1);
- return isolate->heap()->ToBoolean(value->BooleanValue());
-}
-
-// ES6 section 19.3.1.1 Boolean ( value ) for the [[Construct]] case.
-BUILTIN(BooleanConstructor_ConstructStub) {
- HandleScope scope(isolate);
- Handle<Object> value = args.atOrUndefined(isolate, 1);
- Handle<JSFunction> target = args.target();
- Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
- DCHECK(*target == target->native_context()->boolean_function());
- Handle<JSObject> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- JSObject::New(target, new_target));
- Handle<JSValue>::cast(result)->set_value(
- isolate->heap()->ToBoolean(value->BooleanValue()));
- return *result;
+ if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
+ Handle<Object> value = args.atOrUndefined(isolate, 1);
+ return isolate->heap()->ToBoolean(value->BooleanValue());
+ } else { // [[Construct]]
+ HandleScope scope(isolate);
+ Handle<Object> value = args.atOrUndefined(isolate, 1);
+ Handle<JSFunction> target = args.target();
+ Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+ DCHECK(*target == target->native_context()->boolean_function());
+ Handle<JSObject> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ JSObject::New(target, new_target));
+ Handle<JSValue>::cast(result)->set_value(
+ isolate->heap()->ToBoolean(value->BooleanValue()));
+ return *result;
+ }
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc
index 7443202c98..34c88670f4 100644
--- a/deps/v8/src/builtins/builtins-call-gen.cc
+++ b/deps/v8/src/builtins/builtins-call-gen.cc
@@ -268,91 +268,88 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructDoubleVarargs(
void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
Node* target, Node* new_target, Node* spread, Node* args_count,
Node* context) {
- Label if_done(this), if_holey(this), if_runtime(this, Label::kDeferred);
+ Label if_smiorobject(this), if_double(this),
+ if_generic(this, Label::kDeferred);
- VARIABLE(spread_result, MachineRepresentation::kTagged, spread);
+ VARIABLE(var_length, MachineRepresentation::kWord32);
+ VARIABLE(var_elements, MachineRepresentation::kTagged);
+ VARIABLE(var_elements_kind, MachineRepresentation::kWord32);
- GotoIf(TaggedIsSmi(spread), &if_runtime);
+ GotoIf(TaggedIsSmi(spread), &if_generic);
Node* spread_map = LoadMap(spread);
- GotoIfNot(IsJSArrayMap(spread_map), &if_runtime);
+ GotoIfNot(IsJSArrayMap(spread_map), &if_generic);
+
+ // Check that we have the original Array.prototype.
+ GotoIfNot(IsPrototypeInitialArrayPrototype(context, spread_map), &if_generic);
- // Check that we have the original ArrayPrototype.
- GotoIfNot(IsPrototypeInitialArrayPrototype(context, spread_map), &if_runtime);
+ // Check that there are no elements on the Array.prototype chain.
+ GotoIf(IsNoElementsProtectorCellInvalid(), &if_generic);
- // Check that the ArrayPrototype hasn't been modified in a way that would
+ // Check that the Array.prototype hasn't been modified in a way that would
// affect iteration.
Node* protector_cell = LoadRoot(Heap::kArrayIteratorProtectorRootIndex);
DCHECK(isolate()->heap()->array_iterator_protector()->IsPropertyCell());
- GotoIfNot(
- WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
- SmiConstant(Isolate::kProtectorValid)),
- &if_runtime);
-
- // Check that the map of the initial array iterator hasn't changed.
- TNode<Context> native_context = LoadNativeContext(context);
- GotoIfNot(HasInitialArrayIteratorPrototypeMap(native_context), &if_runtime);
-
- Node* kind = LoadMapElementsKind(spread_map);
-
- STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
- STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(PACKED_ELEMENTS == 2);
- STATIC_ASSERT(HOLEY_ELEMENTS == 3);
- STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS == 4);
- STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == 5);
- STATIC_ASSERT(LAST_FAST_ELEMENTS_KIND == HOLEY_DOUBLE_ELEMENTS);
-
- GotoIf(Int32GreaterThan(kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)),
- &if_runtime);
- Branch(Word32And(kind, Int32Constant(1)), &if_holey, &if_done);
-
- // Check the NoElementsProtector cell for holey arrays.
- BIND(&if_holey);
- { Branch(IsNoElementsProtectorCellInvalid(), &if_runtime, &if_done); }
-
- BIND(&if_runtime);
+ GotoIf(WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
+ SmiConstant(Isolate::kProtectorInvalid)),
+ &if_generic);
+
+ // The fast-path accesses the {spread} elements directly.
+ Node* spread_kind = LoadMapElementsKind(spread_map);
+ var_elements_kind.Bind(spread_kind);
+ var_length.Bind(
+ LoadAndUntagToWord32ObjectField(spread, JSArray::kLengthOffset));
+ var_elements.Bind(LoadObjectField(spread, JSArray::kElementsOffset));
+
+ // Check elements kind of {spread}.
+ GotoIf(Int32LessThan(spread_kind, Int32Constant(PACKED_DOUBLE_ELEMENTS)),
+ &if_smiorobject);
+ Branch(Int32GreaterThan(spread_kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)),
+ &if_generic, &if_double);
+
+ BIND(&if_generic);
{
- Node* spread_iterable = LoadContextElement(LoadNativeContext(context),
- Context::SPREAD_ITERABLE_INDEX);
- spread_result.Bind(CallJS(CodeFactory::Call(isolate()), context,
- spread_iterable, UndefinedConstant(), spread));
- CSA_ASSERT(this, IsJSArray(spread_result.value()));
- Goto(&if_done);
+ Label if_iterator_fn_not_callable(this, Label::kDeferred);
+ Node* iterator_fn = GetProperty(context, spread, IteratorSymbolConstant());
+ GotoIf(TaggedIsSmi(iterator_fn), &if_iterator_fn_not_callable);
+ GotoIfNot(IsCallable(iterator_fn), &if_iterator_fn_not_callable);
+ Node* list =
+ CallBuiltin(Builtins::kIterableToList, context, spread, iterator_fn);
+ CSA_ASSERT(this, IsJSArray(list));
+ Node* list_kind = LoadMapElementsKind(LoadMap(list));
+ var_length.Bind(
+ LoadAndUntagToWord32ObjectField(list, JSArray::kLengthOffset));
+ var_elements.Bind(LoadObjectField(list, JSArray::kElementsOffset));
+ var_elements_kind.Bind(list_kind);
+ Branch(Int32LessThan(list_kind, Int32Constant(PACKED_DOUBLE_ELEMENTS)),
+ &if_smiorobject, &if_double);
+
+ BIND(&if_iterator_fn_not_callable);
+ ThrowTypeError(context, MessageTemplate::kIteratorSymbolNonCallable);
}
- BIND(&if_done);
+ BIND(&if_smiorobject);
{
- // The result from if_runtime can be an array of doubles.
- Label if_not_double(this), if_double(this);
- Node* elements =
- LoadObjectField(spread_result.value(), JSArray::kElementsOffset);
- Node* length = LoadAndUntagToWord32ObjectField(spread_result.value(),
- JSArray::kLengthOffset);
-
- Node* kind = LoadMapElementsKind(LoadMap(elements));
- CSA_ASSERT(this, Int32LessThanOrEqual(
- kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)));
-
- Branch(Int32GreaterThan(kind, Int32Constant(HOLEY_ELEMENTS)), &if_double,
- &if_not_double);
+ Node* const elements = var_elements.value();
+ Node* const length = var_length.value();
- BIND(&if_not_double);
- {
- if (new_target == nullptr) {
- Callable callable = CodeFactory::CallVarargs(isolate());
- TailCallStub(callable, context, target, args_count, elements, length);
- } else {
- Callable callable = CodeFactory::ConstructVarargs(isolate());
- TailCallStub(callable, context, target, new_target, args_count,
- elements, length);
- }
+ if (new_target == nullptr) {
+ Callable callable = CodeFactory::CallVarargs(isolate());
+ TailCallStub(callable, context, target, args_count, elements, length);
+ } else {
+ Callable callable = CodeFactory::ConstructVarargs(isolate());
+ TailCallStub(callable, context, target, new_target, args_count, elements,
+ length);
}
+ }
- BIND(&if_double);
- {
- CallOrConstructDoubleVarargs(target, new_target, elements, length,
- args_count, context, kind);
- }
+ BIND(&if_double);
+ {
+ Node* const elements_kind = var_elements_kind.value();
+ Node* const elements = var_elements.value();
+ Node* const length = var_length.value();
+
+ CallOrConstructDoubleVarargs(target, new_target, elements, length,
+ args_count, context, elements_kind);
}
}
diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc
index 563703707c..5c3f263746 100644
--- a/deps/v8/src/builtins/builtins-collections-gen.cc
+++ b/deps/v8/src/builtins/builtins-collections-gen.cc
@@ -6,7 +6,7 @@
#include "src/builtins/builtins-iterator-gen.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/code-stub-assembler.h"
-#include "src/factory-inl.h"
+#include "src/heap/factory-inl.h"
#include "src/objects/hash-table.h"
namespace v8 {
@@ -237,6 +237,7 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
TNode<Map> original_fast_js_array_map = LoadMap(fast_jsarray);
#endif
Label exit(this), if_doubles(this), if_smiorobjects(this);
+ GotoIf(IntPtrEqual(length, IntPtrConstant(0)), &exit);
Branch(IsFastSmiOrTaggedElementsKind(elements_kind), &if_smiorobjects,
&if_doubles);
BIND(&if_smiorobjects);
@@ -333,8 +334,7 @@ TNode<Object> BaseCollectionsAssembler::AllocateJSCollection(
[=] {
return AllocateJSCollectionSlow(context, constructor,
new_target);
- },
- MachineRepresentation::kTagged);
+ });
}
TNode<Object> BaseCollectionsAssembler::AllocateJSCollectionFast(
@@ -456,7 +456,7 @@ TNode<IntPtrT> BaseCollectionsAssembler::EstimatedInitialSize(
return Select<IntPtrT>(
is_fast_jsarray,
[=] { return SmiUntag(LoadFastJSArrayLength(CAST(initial_entries))); },
- [=] { return IntPtrConstant(0); }, MachineType::PointerRepresentation());
+ [=] { return IntPtrConstant(0); });
}
void BaseCollectionsAssembler::GotoIfNotJSReceiver(Node* const obj,
@@ -492,10 +492,9 @@ TNode<BoolT> BaseCollectionsAssembler::HasInitialCollectionPrototype(
TNode<Object> BaseCollectionsAssembler::LoadAndNormalizeFixedArrayElement(
TNode<Object> elements, TNode<IntPtrT> index) {
- TNode<Object> element = CAST(LoadFixedArrayElement(elements, index));
+ TNode<Object> element = LoadFixedArrayElement(elements, index);
return Select<Object>(IsTheHole(element), [=] { return UndefinedConstant(); },
- [=] { return element; },
- MachineRepresentation::kTagged);
+ [=] { return element; });
}
TNode<Object> BaseCollectionsAssembler::LoadAndNormalizeFixedDoubleArrayElement(
@@ -1060,12 +1059,12 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntry(
std::function<void(Node*, Label*, Label*)> key_compare,
Variable* entry_start_position, Label* entry_found, Label* not_found) {
// Get the index of the bucket.
- Node* const number_of_buckets = SmiUntag(
- LoadFixedArrayElement(table, CollectionType::kNumberOfBucketsIndex));
+ Node* const number_of_buckets = SmiUntag(CAST(
+ LoadFixedArrayElement(table, CollectionType::kNumberOfBucketsIndex)));
Node* const bucket =
WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1)));
- Node* const first_entry = SmiUntag(LoadFixedArrayElement(
- table, bucket, CollectionType::kHashTableStartIndex * kPointerSize));
+ Node* const first_entry = SmiUntag(CAST(LoadFixedArrayElement(
+ table, bucket, CollectionType::kHashTableStartIndex * kPointerSize)));
// Walk the bucket chain.
Node* entry_start;
@@ -1088,10 +1087,10 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntry(
UintPtrLessThan(
var_entry.value(),
SmiUntag(SmiAdd(
- LoadFixedArrayElement(table,
- CollectionType::kNumberOfElementsIndex),
- LoadFixedArrayElement(
- table, CollectionType::kNumberOfDeletedElementsIndex)))));
+ CAST(LoadFixedArrayElement(
+ table, CollectionType::kNumberOfElementsIndex)),
+ CAST(LoadFixedArrayElement(
+ table, CollectionType::kNumberOfDeletedElementsIndex))))));
// Compute the index of the entry relative to kHashTableStartIndex.
entry_start =
@@ -1108,10 +1107,10 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntry(
BIND(&continue_next_entry);
// Load the index of the next entry in the bucket chain.
- var_entry.Bind(SmiUntag(LoadFixedArrayElement(
+ var_entry.Bind(SmiUntag(CAST(LoadFixedArrayElement(
table, entry_start,
(CollectionType::kHashTableStartIndex + CollectionType::kChainOffset) *
- kPointerSize)));
+ kPointerSize))));
Goto(&loop);
}
@@ -1362,8 +1361,8 @@ TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) {
VARIABLE(table_var, MachineRepresentation::kTaggedPointer, table);
{
// Check we have enough space for the entry.
- number_of_buckets.Bind(SmiUntag(
- LoadFixedArrayElement(table, OrderedHashMap::kNumberOfBucketsIndex)));
+ number_of_buckets.Bind(SmiUntag(CAST(
+ LoadFixedArrayElement(table, OrderedHashMap::kNumberOfBucketsIndex))));
STATIC_ASSERT(OrderedHashMap::kLoadFactor == 2);
Node* const capacity = WordShl(number_of_buckets.value(), 1);
@@ -1378,8 +1377,8 @@ TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) {
// fields.
CallRuntime(Runtime::kMapGrow, context, receiver);
table_var.Bind(LoadObjectField(receiver, JSMap::kTableOffset));
- number_of_buckets.Bind(SmiUntag(LoadFixedArrayElement(
- table_var.value(), OrderedHashMap::kNumberOfBucketsIndex)));
+ number_of_buckets.Bind(SmiUntag(CAST(LoadFixedArrayElement(
+ table_var.value(), OrderedHashMap::kNumberOfBucketsIndex))));
Node* const new_number_of_elements = SmiUntag(CAST(LoadObjectField(
table_var.value(), OrderedHashMap::kNumberOfElementsOffset)));
Node* const new_number_of_deleted = SmiUntag(CAST(LoadObjectField(
@@ -1518,7 +1517,7 @@ TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) {
&add_entry);
// Otherwise, go to runtime to compute the hash code.
- entry_start_position_or_hash.Bind(SmiUntag((CallGetOrCreateHashRaw(key))));
+ entry_start_position_or_hash.Bind(SmiUntag(CallGetOrCreateHashRaw(key)));
Goto(&add_entry);
}
@@ -1528,8 +1527,8 @@ TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) {
VARIABLE(table_var, MachineRepresentation::kTaggedPointer, table);
{
// Check we have enough space for the entry.
- number_of_buckets.Bind(SmiUntag(
- LoadFixedArrayElement(table, OrderedHashSet::kNumberOfBucketsIndex)));
+ number_of_buckets.Bind(SmiUntag(CAST(
+ LoadFixedArrayElement(table, OrderedHashSet::kNumberOfBucketsIndex))));
STATIC_ASSERT(OrderedHashSet::kLoadFactor == 2);
Node* const capacity = WordShl(number_of_buckets.value(), 1);
@@ -1544,8 +1543,8 @@ TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) {
// fields.
CallRuntime(Runtime::kSetGrow, context, receiver);
table_var.Bind(LoadObjectField(receiver, JSMap::kTableOffset));
- number_of_buckets.Bind(SmiUntag(LoadFixedArrayElement(
- table_var.value(), OrderedHashSet::kNumberOfBucketsIndex)));
+ number_of_buckets.Bind(SmiUntag(CAST(LoadFixedArrayElement(
+ table_var.value(), OrderedHashSet::kNumberOfBucketsIndex))));
Node* const new_number_of_elements = SmiUntag(CAST(LoadObjectField(
table_var.value(), OrderedHashSet::kNumberOfElementsOffset)));
Node* const new_number_of_deleted = SmiUntag(CAST(LoadObjectField(
@@ -2222,7 +2221,7 @@ TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::FindKeyIndex(
TNode<IntPtrT> key_index;
{
key_index = KeyIndexFromEntry(var_entry.value());
- TNode<Object> entry_key = CAST(LoadFixedArrayElement(table, key_index));
+ TNode<Object> entry_key = LoadFixedArrayElement(table, key_index);
key_compare(entry_key, &if_found);
@@ -2271,15 +2270,15 @@ TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::KeyIndexFromEntry(
TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::LoadNumberOfElements(
TNode<Object> table, int offset) {
- TNode<IntPtrT> number_of_elements = SmiUntag(
- LoadFixedArrayElement(table, ObjectHashTable::kNumberOfElementsIndex));
+ TNode<IntPtrT> number_of_elements = SmiUntag(CAST(
+ LoadFixedArrayElement(table, ObjectHashTable::kNumberOfElementsIndex)));
return IntPtrAdd(number_of_elements, IntPtrConstant(offset));
}
TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::LoadNumberOfDeleted(
TNode<Object> table, int offset) {
- TNode<IntPtrT> number_of_deleted = SmiUntag(LoadFixedArrayElement(
- table, ObjectHashTable::kNumberOfDeletedElementsIndex));
+ TNode<IntPtrT> number_of_deleted = SmiUntag(CAST(LoadFixedArrayElement(
+ table, ObjectHashTable::kNumberOfDeletedElementsIndex)));
return IntPtrAdd(number_of_deleted, IntPtrConstant(offset));
}
@@ -2291,7 +2290,7 @@ TNode<Object> WeakCollectionsBuiltinsAssembler::LoadTable(
TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::LoadTableCapacity(
TNode<Object> table) {
return SmiUntag(
- LoadFixedArrayElement(table, ObjectHashTable::kCapacityIndex));
+ CAST(LoadFixedArrayElement(table, ObjectHashTable::kCapacityIndex)));
}
TNode<Word32T> WeakCollectionsBuiltinsAssembler::InsufficientCapacityToAdd(
diff --git a/deps/v8/src/builtins/builtins-console.cc b/deps/v8/src/builtins/builtins-console.cc
index 75ad302d3d..138db2a422 100644
--- a/deps/v8/src/builtins/builtins-console.cc
+++ b/deps/v8/src/builtins/builtins-console.cc
@@ -112,13 +112,11 @@ void InstallContextFunction(Handle<JSObject> target, const char* name,
Handle<Object> context_name) {
Factory* const factory = target->GetIsolate()->factory();
- Handle<Code> code(target->GetIsolate()->builtins()->builtin(builtin_id));
-
Handle<String> name_string =
Name::ToFunctionName(factory->InternalizeUtf8String(name))
.ToHandleChecked();
NewFunctionArgs args = NewFunctionArgs::ForBuiltinWithoutPrototype(
- name_string, code, builtin_id, i::LanguageMode::kSloppy);
+ name_string, builtin_id, i::LanguageMode::kSloppy);
Handle<JSFunction> fun = factory->NewFunction(args);
fun->shared()->set_native(true);
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc
index 945fb4394b..e16945ba26 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.cc
+++ b/deps/v8/src/builtins/builtins-constructor-gen.cc
@@ -99,13 +99,12 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
// The calculation of |function_map_index| must be in sync with
// SharedFunctionInfo::function_map_index().
- Node* const compiler_hints = LoadObjectField(
- shared_function_info, SharedFunctionInfo::kCompilerHintsOffset,
- MachineType::Uint32());
- Node* const function_map_index =
- IntPtrAdd(DecodeWordFromWord32<SharedFunctionInfo::FunctionMapIndexBits>(
- compiler_hints),
- IntPtrConstant(Context::FIRST_FUNCTION_MAP_INDEX));
+ Node* const flags =
+ LoadObjectField(shared_function_info, SharedFunctionInfo::kFlagsOffset,
+ MachineType::Uint32());
+ Node* const function_map_index = IntPtrAdd(
+ DecodeWordFromWord32<SharedFunctionInfo::FunctionMapIndexBits>(flags),
+ IntPtrConstant(Context::FIRST_FUNCTION_MAP_INDEX));
CSA_ASSERT(this, UintPtrLessThanOrEqual(
function_map_index,
IntPtrConstant(Context::LAST_FUNCTION_MAP_INDEX)));
@@ -636,166 +635,163 @@ Node* ConstructorBuiltinsAssembler::EmitCreateEmptyObjectLiteral(
return result;
}
+// ES #sec-object-constructor
TF_BUILTIN(ObjectConstructor, ConstructorBuiltinsAssembler) {
int const kValueArg = 0;
Node* argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* value = args.GetOptionalArgumentValue(kValueArg);
Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- CSA_ASSERT(this, IsUndefined(Parameter(BuiltinDescriptor::kNewTarget)));
-
- Label return_to_object(this);
-
- GotoIf(Word32And(IsNotUndefined(value), IsNotNull(value)), &return_to_object);
-
- args.PopAndReturn(EmitCreateEmptyObjectLiteral(context));
-
- BIND(&return_to_object);
- args.PopAndReturn(ToObject(context, value));
-}
-
-TF_BUILTIN(ObjectConstructor_ConstructStub, ConstructorBuiltinsAssembler) {
- int const kValueArg = 0;
- Node* argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
- CodeStubArguments args(this, argc);
- Node* value = args.GetOptionalArgumentValue(kValueArg);
-
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+ Label if_subclass(this, Label::kDeferred), if_notsubclass(this),
+ return_result(this);
+ GotoIf(IsUndefined(new_target), &if_notsubclass);
Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
MachineType::TaggedPointer());
- Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ Branch(WordEqual(new_target, target), &if_notsubclass, &if_subclass);
- CSA_ASSERT(this, IsNotUndefined(new_target));
+ BIND(&if_subclass);
+ {
+ Node* result =
+ CallBuiltin(Builtins::kFastNewObject, context, target, new_target);
+ var_result.Bind(result);
+ Goto(&return_result);
+ }
- Label return_to_object(this);
+ BIND(&if_notsubclass);
+ {
+ Label if_newobject(this, Label::kDeferred), if_toobject(this);
- GotoIf(Word32And(WordEqual(target, new_target),
- Word32And(IsNotUndefined(value), IsNotNull(value))),
- &return_to_object);
- args.PopAndReturn(EmitFastNewObject(context, target, new_target));
+ Node* value_index = IntPtrConstant(kValueArg);
+ GotoIf(UintPtrGreaterThanOrEqual(value_index, argc), &if_newobject);
+ Node* value = args.AtIndex(value_index);
+ GotoIf(IsNull(value), &if_newobject);
+ Branch(IsUndefined(value), &if_newobject, &if_toobject);
- BIND(&return_to_object);
- args.PopAndReturn(ToObject(context, value));
+ BIND(&if_newobject);
+ {
+ Node* result = EmitCreateEmptyObjectLiteral(context);
+ var_result.Bind(result);
+ Goto(&return_result);
+ }
+
+ BIND(&if_toobject);
+ {
+ Node* result = CallBuiltin(Builtins::kToObject, context, value);
+ var_result.Bind(result);
+ Goto(&return_result);
+ }
+ }
+
+ BIND(&return_result);
+ args.PopAndReturn(var_result.value());
}
+// ES #sec-number-constructor
TF_BUILTIN(NumberConstructor, ConstructorBuiltinsAssembler) {
+ Node* context = Parameter(BuiltinDescriptor::kContext);
Node* argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Label return_zero(this);
+ // 1. If no arguments were passed to this function invocation, let n be +0.
+ VARIABLE(var_n, MachineRepresentation::kTagged, SmiConstant(0));
+ Label if_nloaded(this, &var_n);
+ GotoIf(WordEqual(argc, IntPtrConstant(0)), &if_nloaded);
- GotoIf(IntPtrEqual(IntPtrConstant(0), argc), &return_zero);
+ // 2. Else,
+ // a. Let prim be ? ToNumeric(value).
+ // b. If Type(prim) is BigInt, let n be the Number value for prim.
+ // c. Otherwise, let n be prim.
+ Node* value = args.AtIndex(0);
+ var_n.Bind(ToNumber(context, value, BigIntHandling::kConvertToNumber));
+ Goto(&if_nloaded);
- Node* context = Parameter(BuiltinDescriptor::kContext);
- args.PopAndReturn(
- ToNumber(context, args.AtIndex(0), BigIntHandling::kConvertToNumber));
+ BIND(&if_nloaded);
+ {
+ // 3. If NewTarget is undefined, return n.
+ Node* n_value = var_n.value();
+ Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Label return_n(this), constructnumber(this, Label::kDeferred);
+ Branch(IsUndefined(new_target), &return_n, &constructnumber);
- BIND(&return_zero);
- args.PopAndReturn(SmiConstant(0));
+ BIND(&return_n);
+ { args.PopAndReturn(n_value); }
+
+ BIND(&constructnumber);
+ {
+ // 4. Let O be ? OrdinaryCreateFromConstructor(NewTarget,
+ // "%NumberPrototype%", Ā« [[NumberData]] Ā»).
+ // 5. Set O.[[NumberData]] to n.
+ // 6. Return O.
+ Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
+ MachineType::TaggedPointer());
+ Node* result =
+ CallBuiltin(Builtins::kFastNewObject, context, target, new_target);
+ StoreObjectField(result, JSValue::kValueOffset, n_value);
+ args.PopAndReturn(result);
+ }
+ }
}
-TF_BUILTIN(NumberConstructor_ConstructStub, ConstructorBuiltinsAssembler) {
- Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
- MachineType::TaggedPointer());
- Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+// https://tc39.github.io/ecma262/#sec-string-constructor
+TF_BUILTIN(StringConstructor, ConstructorBuiltinsAssembler) {
Node* context = Parameter(BuiltinDescriptor::kContext);
-
Node* argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Label wrap(this);
-
- VARIABLE(var_result, MachineRepresentation::kTagged, SmiConstant(0));
-
- GotoIf(IntPtrEqual(IntPtrConstant(0), argc), &wrap);
- var_result.Bind(
- ToNumber(context, args.AtIndex(0), BigIntHandling::kConvertToNumber));
- Goto(&wrap);
-
- BIND(&wrap);
- Node* result = EmitFastNewObject(context, target, new_target);
- StoreObjectField(result, JSValue::kValueOffset, var_result.value());
- args.PopAndReturn(result);
-}
-
-Node* ConstructorBuiltinsAssembler::EmitConstructString(Node* argc,
- CodeStubArguments& args,
- Node* context,
- bool convert_symbol) {
- VARIABLE(var_result, MachineRepresentation::kTagged);
-
- Label return_empty_string(this), to_string(this),
- check_symbol(this, Label::kDeferred), done(this);
-
- GotoIf(IntPtrEqual(IntPtrConstant(0), argc), &return_empty_string);
-
- Node* argument = args.AtIndex(0);
+ Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
+ MachineType::TaggedPointer());
- GotoIf(TaggedIsSmi(argument), &to_string);
+ // 1. If no arguments were passed to this function invocation, let s be "".
+ VARIABLE(var_s, MachineRepresentation::kTagged, EmptyStringConstant());
+ Label if_sloaded(this, &var_s);
+ GotoIf(WordEqual(argc, IntPtrConstant(0)), &if_sloaded);
- Node* instance_type = LoadInstanceType(argument);
+ // 2. Else,
+ // a. If NewTarget is undefined [...]
+ Node* value = args.AtIndex(0);
+ Label if_tostring(this, &var_s);
+ GotoIfNot(IsUndefined(new_target), &if_tostring);
- Label* non_string = convert_symbol ? &check_symbol : &to_string;
- GotoIfNot(IsStringInstanceType(instance_type), non_string);
+ // 2a. [...] and Type(value) is Symbol, return SymbolDescriptiveString(value).
+ GotoIf(TaggedIsSmi(value), &if_tostring);
+ GotoIfNot(IsSymbol(value), &if_tostring);
{
- var_result.Bind(argument);
- Goto(&done);
+ Node* result =
+ CallRuntime(Runtime::kSymbolDescriptiveString, context, value);
+ args.PopAndReturn(result);
}
- if (convert_symbol) {
- BIND(&check_symbol);
- GotoIfNot(IsSymbolInstanceType(instance_type), &to_string);
- {
- var_result.Bind(
- CallRuntime(Runtime::kSymbolDescriptiveString, context, argument));
- Goto(&done);
- }
- }
-
- BIND(&to_string);
+ // 2b. Let s be ? ToString(value).
+ BIND(&if_tostring);
{
- var_result.Bind(ToString(context, argument));
- Goto(&done);
+ var_s.Bind(CallBuiltin(Builtins::kToString, context, value));
+ Goto(&if_sloaded);
}
- BIND(&return_empty_string);
+ // 3. If NewTarget is undefined, return s.
+ BIND(&if_sloaded);
{
- var_result.Bind(EmptyStringConstant());
- Goto(&done);
- }
-
- BIND(&done);
- return var_result.value();
-}
-
-TF_BUILTIN(StringConstructor, ConstructorBuiltinsAssembler) {
- Node* context = Parameter(BuiltinDescriptor::kContext);
- Node* argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
- CodeStubArguments args(this, argc);
-
- args.PopAndReturn(EmitConstructString(argc, args, context, true));
-}
-
-TF_BUILTIN(StringConstructor_ConstructStub, ConstructorBuiltinsAssembler) {
- Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
- MachineType::TaggedPointer());
- Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* s_value = var_s.value();
+ Label return_s(this), constructstring(this, Label::kDeferred);
+ Branch(IsUndefined(new_target), &return_s, &constructstring);
- Node* argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
- CodeStubArguments args(this, argc);
+ BIND(&return_s);
+ { args.PopAndReturn(s_value); }
- Node* string = EmitConstructString(argc, args, context, false);
- Node* result = EmitFastNewObject(context, target, new_target);
- StoreObjectField(result, JSValue::kValueOffset, string);
- args.PopAndReturn(result);
+ BIND(&constructstring);
+ {
+ Node* result =
+ CallBuiltin(Builtins::kFastNewObject, context, target, new_target);
+ StoreObjectField(result, JSValue::kValueOffset, s_value);
+ args.PopAndReturn(result);
+ }
+ }
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.h b/deps/v8/src/builtins/builtins-constructor-gen.h
index f6d71882bc..820970961b 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.h
+++ b/deps/v8/src/builtins/builtins-constructor-gen.h
@@ -36,9 +36,6 @@ class ConstructorBuiltinsAssembler : public CodeStubAssembler {
Node* EmitFastNewObject(Node* context, Node* target, Node* new_target,
Label* call_runtime);
- Node* EmitConstructString(Node* argc, CodeStubArguments& args, Node* context,
- bool convert_symbol);
-
private:
Node* NotHasBoilerplate(Node* literal_site);
Node* LoadAllocationSiteBoilerplate(Node* allocation_site);
diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc
index dc3e8d53c4..63a6dc0a91 100644
--- a/deps/v8/src/builtins/builtins-conversion-gen.cc
+++ b/deps/v8/src/builtins/builtins-conversion-gen.cc
@@ -126,12 +126,12 @@ TF_BUILTIN(NonNumberToNumeric, CodeStubAssembler) {
}
TF_BUILTIN(ToNumeric, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* input = Parameter(Descriptor::kArgument);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> input = CAST(Parameter(Descriptor::kArgument));
- Return(Select(IsNumber(input), [=] { return input; },
- [=] { return NonNumberToNumeric(context, input); },
- MachineRepresentation::kTagged));
+ Return(Select<Numeric>(
+ IsNumber(input), [=] { return CAST(input); },
+ [=] { return NonNumberToNumeric(context, CAST(input)); }));
}
// ES6 section 7.1.3 ToNumber ( argument )
diff --git a/deps/v8/src/builtins/builtins-dataview.cc b/deps/v8/src/builtins/builtins-dataview.cc
index 38b3d90649..49dcbe1e83 100644
--- a/deps/v8/src/builtins/builtins-dataview.cc
+++ b/deps/v8/src/builtins/builtins-dataview.cc
@@ -6,7 +6,7 @@
#include "src/builtins/builtins.h"
#include "src/conversions.h"
#include "src/counters.h"
-#include "src/factory.h"
+#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
@@ -14,97 +14,94 @@ namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
-// ES6 section 24.2 DataView Objects
+// ES #sec-dataview-objects
-// ES6 section 24.2.2 The DataView Constructor for the [[Call]] case.
+// ES #sec-dataview-constructor
BUILTIN(DataViewConstructor) {
HandleScope scope(isolate);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewTypeError(MessageTemplate::kConstructorNotFunction,
- isolate->factory()->NewStringFromAsciiChecked("DataView")));
-}
-
-// ES6 section 24.2.2 The DataView Constructor for the [[Construct]] case.
-BUILTIN(DataViewConstructor_ConstructStub) {
- HandleScope scope(isolate);
- Handle<JSFunction> target = args.target();
- Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
- Handle<Object> buffer = args.atOrUndefined(isolate, 1);
- Handle<Object> byte_offset = args.atOrUndefined(isolate, 2);
- Handle<Object> byte_length = args.atOrUndefined(isolate, 3);
-
- // 2. If Type(buffer) is not Object, throw a TypeError exception.
- // 3. If buffer does not have an [[ArrayBufferData]] internal slot, throw a
- // TypeError exception.
- if (!buffer->IsJSArrayBuffer()) {
+ if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kDataViewNotArrayBuffer));
- }
- Handle<JSArrayBuffer> array_buffer = Handle<JSArrayBuffer>::cast(buffer);
-
- // 4. Let offset be ToIndex(byteOffset).
- Handle<Object> offset;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, offset,
- Object::ToIndex(isolate, byte_offset, MessageTemplate::kInvalidOffset));
+ isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "DataView")));
+ } else { // [[Construct]]
+ Handle<JSFunction> target = args.target();
+ Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+ Handle<Object> buffer = args.atOrUndefined(isolate, 1);
+ Handle<Object> byte_offset = args.atOrUndefined(isolate, 2);
+ Handle<Object> byte_length = args.atOrUndefined(isolate, 3);
+
+ // 2. If Type(buffer) is not Object, throw a TypeError exception.
+ // 3. If buffer does not have an [[ArrayBufferData]] internal slot, throw a
+ // TypeError exception.
+ if (!buffer->IsJSArrayBuffer()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kDataViewNotArrayBuffer));
+ }
+ Handle<JSArrayBuffer> array_buffer = Handle<JSArrayBuffer>::cast(buffer);
- // 5. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
- // We currently violate the specification at this point.
+ // 4. Let offset be ? ToIndex(byteOffset).
+ Handle<Object> offset;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, offset,
+ Object::ToIndex(isolate, byte_offset, MessageTemplate::kInvalidOffset));
- // 6. Let bufferByteLength be the value of buffer's [[ArrayBufferByteLength]]
- // internal slot.
- double const buffer_byte_length = array_buffer->byte_length()->Number();
+ // 5. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
+ // We currently violate the specification at this point. TODO: Fix that.
- // 7. If offset > bufferByteLength, throw a RangeError exception
- if (offset->Number() > buffer_byte_length) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kInvalidOffset, offset));
- }
+ // 6. Let bufferByteLength be the value of buffer's
+ // [[ArrayBufferByteLength]] internal slot.
+ double const buffer_byte_length = array_buffer->byte_length()->Number();
- Handle<Object> view_byte_length;
- if (byte_length->IsUndefined(isolate)) {
- // 8. If byteLength is undefined, then
- // a. Let viewByteLength be bufferByteLength - offset.
- view_byte_length =
- isolate->factory()->NewNumber(buffer_byte_length - offset->Number());
- } else {
- // 9. Else,
- // a. Let viewByteLength be ? ToIndex(byteLength).
- // b. If offset+viewByteLength > bufferByteLength, throw a RangeError
- // exception
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, view_byte_length,
- Object::ToIndex(isolate, byte_length,
- MessageTemplate::kInvalidDataViewLength));
- if (offset->Number() + view_byte_length->Number() > buffer_byte_length) {
+ // 7. If offset > bufferByteLength, throw a RangeError exception.
+ if (offset->Number() > buffer_byte_length) {
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kInvalidDataViewLength));
+ isolate, NewRangeError(MessageTemplate::kInvalidOffset, offset));
}
- }
- // 10. Let O be ? OrdinaryCreateFromConstructor(NewTarget,
- // "%DataViewPrototype%", Ā«[[DataView]], [[ViewedArrayBuffer]],
- // [[ByteLength]], [[ByteOffset]]Ā»).
- // 11. Set O's [[DataView]] internal slot to true.
- Handle<JSObject> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- JSObject::New(target, new_target));
- for (int i = 0; i < ArrayBufferView::kEmbedderFieldCount; ++i) {
- Handle<JSDataView>::cast(result)->SetEmbedderField(i, Smi::kZero);
- }
+ Handle<Object> view_byte_length;
+ if (byte_length->IsUndefined(isolate)) {
+ // 8. If byteLength is either not present or undefined, then
+ // a. Let viewByteLength be bufferByteLength - offset.
+ view_byte_length =
+ isolate->factory()->NewNumber(buffer_byte_length - offset->Number());
+ } else {
+ // 9. Else,
+ // a. Let viewByteLength be ? ToIndex(byteLength).
+ // b. If offset+viewByteLength > bufferByteLength, throw a
+ // RangeError exception.
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, view_byte_length,
+ Object::ToIndex(isolate, byte_length,
+ MessageTemplate::kInvalidDataViewLength));
+ if (offset->Number() + view_byte_length->Number() > buffer_byte_length) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidDataViewLength));
+ }
+ }
- // 12. Set O's [[ViewedArrayBuffer]] internal slot to buffer.
- Handle<JSDataView>::cast(result)->set_buffer(*array_buffer);
+ // 10. Let O be ? OrdinaryCreateFromConstructor(NewTarget,
+ // "%DataViewPrototype%", Ā«[[DataView]], [[ViewedArrayBuffer]],
+ // [[ByteLength]], [[ByteOffset]]Ā»).
+ Handle<JSObject> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ JSObject::New(target, new_target));
+ for (int i = 0; i < ArrayBufferView::kEmbedderFieldCount; ++i) {
+ Handle<JSDataView>::cast(result)->SetEmbedderField(i, Smi::kZero);
+ }
+
+ // 11. Set O's [[ViewedArrayBuffer]] internal slot to buffer.
+ Handle<JSDataView>::cast(result)->set_buffer(*array_buffer);
- // 13. Set O's [[ByteLength]] internal slot to viewByteLength.
- Handle<JSDataView>::cast(result)->set_byte_length(*view_byte_length);
+ // 12. Set O's [[ByteLength]] internal slot to viewByteLength.
+ Handle<JSDataView>::cast(result)->set_byte_length(*view_byte_length);
- // 14. Set O's [[ByteOffset]] internal slot to offset.
- Handle<JSDataView>::cast(result)->set_byte_offset(*offset);
+ // 13. Set O's [[ByteOffset]] internal slot to offset.
+ Handle<JSDataView>::cast(result)->set_byte_offset(*offset);
- // 15. Return O.
- return *result;
+ // 14. Return O.
+ return *result;
+ }
}
// ES6 section 24.2.4.1 get DataView.prototype.buffer
@@ -176,7 +173,7 @@ MaybeHandle<Object> AllocateResult(Isolate* isolate, uint64_t value) {
template <typename T>
MaybeHandle<Object> GetViewValue(Isolate* isolate, Handle<JSDataView> data_view,
Handle<Object> request_index,
- bool is_little_endian) {
+ bool is_little_endian, const char* method) {
ASSIGN_RETURN_ON_EXCEPTION(
isolate, request_index,
Object::ToIndex(isolate, request_index,
@@ -190,6 +187,13 @@ MaybeHandle<Object> GetViewValue(Isolate* isolate, Handle<JSDataView> data_view,
}
Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(data_view->buffer()),
isolate);
+ if (buffer->was_neutered()) {
+ Handle<String> operation =
+ isolate->factory()->NewStringFromAsciiChecked(method);
+ THROW_NEW_ERROR(
+ isolate, NewTypeError(MessageTemplate::kDetachedOperation, operation),
+ Object);
+ }
size_t const data_view_byte_offset = NumberToSize(data_view->byte_offset());
size_t const data_view_byte_length = NumberToSize(data_view->byte_length());
if (get_index + sizeof(T) > data_view_byte_length ||
@@ -290,7 +294,8 @@ uint64_t DataViewConvertValue<uint64_t>(Handle<Object> value) {
template <typename T>
MaybeHandle<Object> SetViewValue(Isolate* isolate, Handle<JSDataView> data_view,
Handle<Object> request_index,
- bool is_little_endian, Handle<Object> value) {
+ bool is_little_endian, Handle<Object> value,
+ const char* method) {
ASSIGN_RETURN_ON_EXCEPTION(
isolate, request_index,
Object::ToIndex(isolate, request_index,
@@ -306,6 +311,13 @@ MaybeHandle<Object> SetViewValue(Isolate* isolate, Handle<JSDataView> data_view,
}
Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(data_view->buffer()),
isolate);
+ if (buffer->was_neutered()) {
+ Handle<String> operation =
+ isolate->factory()->NewStringFromAsciiChecked(method);
+ THROW_NEW_ERROR(
+ isolate, NewTypeError(MessageTemplate::kDetachedOperation, operation),
+ Object);
+ }
size_t const data_view_byte_offset = NumberToSize(data_view->byte_offset());
size_t const data_view_byte_length = NumberToSize(data_view->byte_length());
if (get_index + sizeof(T) > data_view_byte_length ||
@@ -343,7 +355,8 @@ MaybeHandle<Object> SetViewValue(Isolate* isolate, Handle<JSDataView> data_view,
ASSIGN_RETURN_FAILURE_ON_EXCEPTION( \
isolate, result, \
GetViewValue<type>(isolate, data_view, byte_offset, \
- is_little_endian->BooleanValue())); \
+ is_little_endian->BooleanValue(), \
+ "DataView.prototype.get" #Type)); \
return *result; \
}
DATA_VIEW_PROTOTYPE_GET(Int8, int8_t)
@@ -369,7 +382,8 @@ DATA_VIEW_PROTOTYPE_GET(BigUint64, uint64_t)
ASSIGN_RETURN_FAILURE_ON_EXCEPTION( \
isolate, result, \
SetViewValue<type>(isolate, data_view, byte_offset, \
- is_little_endian->BooleanValue(), value)); \
+ is_little_endian->BooleanValue(), value, \
+ "DataView.prototype.get" #Type)); \
return *result; \
}
DATA_VIEW_PROTOTYPE_SET(Int8, int8_t)
diff --git a/deps/v8/src/builtins/builtins-date.cc b/deps/v8/src/builtins/builtins-date.cc
index 5f9f31e10b..c60275d94e 100644
--- a/deps/v8/src/builtins/builtins-date.cc
+++ b/deps/v8/src/builtins/builtins-date.cc
@@ -193,95 +193,94 @@ Object* SetLocalDateValue(Handle<JSDate> date, double time_val) {
} // namespace
-// ES6 section 20.3.2 The Date Constructor for the [[Call]] case.
+// ES #sec-date-constructor
BUILTIN(DateConstructor) {
HandleScope scope(isolate);
- double const time_val = JSDate::CurrentTimeValue(isolate);
- char buffer[128];
- ToDateString(time_val, ArrayVector(buffer), isolate->date_cache());
- RETURN_RESULT_OR_FAILURE(
- isolate, isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
-}
-
-// ES6 section 20.3.2 The Date Constructor for the [[Construct]] case.
-BUILTIN(DateConstructor_ConstructStub) {
- HandleScope scope(isolate);
- int const argc = args.length() - 1;
- Handle<JSFunction> target = args.target();
- Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
- double time_val;
- if (argc == 0) {
- time_val = JSDate::CurrentTimeValue(isolate);
- } else if (argc == 1) {
- Handle<Object> value = args.at(1);
- if (value->IsJSDate()) {
- time_val = Handle<JSDate>::cast(value)->value()->Number();
- } else {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
- Object::ToPrimitive(value));
- if (value->IsString()) {
- time_val = ParseDateTimeString(Handle<String>::cast(value));
+ if (args.new_target()->IsUndefined(isolate)) {
+ double const time_val = JSDate::CurrentTimeValue(isolate);
+ char buffer[128];
+ ToDateString(time_val, ArrayVector(buffer), isolate->date_cache());
+ RETURN_RESULT_OR_FAILURE(
+ isolate, isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
+ } else {
+ int const argc = args.length() - 1;
+ Handle<JSFunction> target = args.target();
+ Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+ double time_val;
+ if (argc == 0) {
+ time_val = JSDate::CurrentTimeValue(isolate);
+ } else if (argc == 1) {
+ Handle<Object> value = args.at(1);
+ if (value->IsJSDate()) {
+ time_val = Handle<JSDate>::cast(value)->value()->Number();
} else {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
- Object::ToNumber(value));
- time_val = value->Number();
+ Object::ToPrimitive(value));
+ if (value->IsString()) {
+ time_val = ParseDateTimeString(Handle<String>::cast(value));
+ } else {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
+ Object::ToNumber(value));
+ time_val = value->Number();
+ }
}
- }
- } else {
- Handle<Object> year_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year_object,
- Object::ToNumber(args.at(1)));
- Handle<Object> month_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month_object,
- Object::ToNumber(args.at(2)));
- double year = year_object->Number();
- double month = month_object->Number();
- double date = 1.0, hours = 0.0, minutes = 0.0, seconds = 0.0, ms = 0.0;
- if (argc >= 3) {
- Handle<Object> date_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date_object,
- Object::ToNumber(args.at(3)));
- date = date_object->Number();
- if (argc >= 4) {
- Handle<Object> hours_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, hours_object,
- Object::ToNumber(args.at(4)));
- hours = hours_object->Number();
- if (argc >= 5) {
- Handle<Object> minutes_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, minutes_object,
- Object::ToNumber(args.at(5)));
- minutes = minutes_object->Number();
- if (argc >= 6) {
- Handle<Object> seconds_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, seconds_object,
- Object::ToNumber(args.at(6)));
- seconds = seconds_object->Number();
- if (argc >= 7) {
- Handle<Object> ms_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms_object,
- Object::ToNumber(args.at(7)));
- ms = ms_object->Number();
+ } else {
+ Handle<Object> year_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year_object,
+ Object::ToNumber(args.at(1)));
+ Handle<Object> month_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month_object,
+ Object::ToNumber(args.at(2)));
+ double year = year_object->Number();
+ double month = month_object->Number();
+ double date = 1.0, hours = 0.0, minutes = 0.0, seconds = 0.0, ms = 0.0;
+ if (argc >= 3) {
+ Handle<Object> date_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date_object,
+ Object::ToNumber(args.at(3)));
+ date = date_object->Number();
+ if (argc >= 4) {
+ Handle<Object> hours_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, hours_object,
+ Object::ToNumber(args.at(4)));
+ hours = hours_object->Number();
+ if (argc >= 5) {
+ Handle<Object> minutes_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, minutes_object,
+ Object::ToNumber(args.at(5)));
+ minutes = minutes_object->Number();
+ if (argc >= 6) {
+ Handle<Object> seconds_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, seconds_object,
+ Object::ToNumber(args.at(6)));
+ seconds = seconds_object->Number();
+ if (argc >= 7) {
+ Handle<Object> ms_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, ms_object, Object::ToNumber(args.at(7)));
+ ms = ms_object->Number();
+ }
}
}
}
}
+ if (!std::isnan(year)) {
+ double const y = DoubleToInteger(year);
+ if (0.0 <= y && y <= 99) year = 1900 + y;
+ }
+ double const day = MakeDay(year, month, date);
+ double const time = MakeTime(hours, minutes, seconds, ms);
+ time_val = MakeDate(day, time);
+ if (time_val >= -DateCache::kMaxTimeBeforeUTCInMs &&
+ time_val <= DateCache::kMaxTimeBeforeUTCInMs) {
+ time_val = isolate->date_cache()->ToUTC(static_cast<int64_t>(time_val));
+ } else {
+ time_val = std::numeric_limits<double>::quiet_NaN();
+ }
}
- if (!std::isnan(year)) {
- double const y = DoubleToInteger(year);
- if (0.0 <= y && y <= 99) year = 1900 + y;
- }
- double const day = MakeDay(year, month, date);
- double const time = MakeTime(hours, minutes, seconds, ms);
- time_val = MakeDate(day, time);
- if (time_val >= -DateCache::kMaxTimeBeforeUTCInMs &&
- time_val <= DateCache::kMaxTimeBeforeUTCInMs) {
- time_val = isolate->date_cache()->ToUTC(static_cast<int64_t>(time_val));
- } else {
- time_val = std::numeric_limits<double>::quiet_NaN();
- }
+ RETURN_RESULT_OR_FAILURE(isolate,
+ JSDate::New(target, new_target, time_val));
}
- RETURN_RESULT_OR_FAILURE(isolate, JSDate::New(target, new_target, time_val));
}
// ES6 section 20.3.3.1 Date.now ( )
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index bf5b9086aa..0f60dfd97e 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -65,7 +65,6 @@ namespace internal {
TFC(ConstructWithArrayLike, ConstructWithArrayLike, 1) \
ASM(ConstructForwardVarargs) \
ASM(ConstructFunctionForwardVarargs) \
- ASM(JSConstructStubApi) \
ASM(JSConstructStubGenericRestrictedReturn) \
ASM(JSConstructStubGenericUnrestrictedReturn) \
ASM(JSBuiltinsConstructStub) \
@@ -110,11 +109,9 @@ namespace internal {
ASM(InterpreterEntryTrampoline) \
ASM(InterpreterPushArgsThenCall) \
ASM(InterpreterPushUndefinedAndArgsThenCall) \
- ASM(InterpreterPushArgsThenCallFunction) \
- ASM(InterpreterPushUndefinedAndArgsThenCallFunction) \
ASM(InterpreterPushArgsThenCallWithFinalSpread) \
ASM(InterpreterPushArgsThenConstruct) \
- ASM(InterpreterPushArgsThenConstructFunction) \
+ ASM(InterpreterPushArgsThenConstructArrayFunction) \
ASM(InterpreterPushArgsThenConstructWithFinalSpread) \
ASM(InterpreterEnterBytecodeAdvance) \
ASM(InterpreterEnterBytecodeDispatch) \
@@ -123,7 +120,6 @@ namespace internal {
/* Code life-cycle */ \
ASM(CompileLazy) \
ASM(CompileLazyDeoptimizedCode) \
- ASM(CheckOptimizationMarker) \
ASM(DeserializeLazy) \
ASM(InstantiateAsmJs) \
ASM(NotifyDeoptimized) \
@@ -171,6 +167,7 @@ namespace internal {
TFC(NewArgumentsElements, NewArgumentsElements, 1) \
\
/* Debugger */ \
+ TFJ(DebugBreakTrampoline, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
ASM(FrameDropperTrampoline) \
ASM(HandleDebuggerStatement) \
\
@@ -214,6 +211,7 @@ namespace internal {
TFH(LoadIC_Uninitialized, LoadWithVector) \
TFH(StoreGlobalIC_Slow, StoreWithVector) \
TFH(StoreIC_Uninitialized, StoreWithVector) \
+ TFH(StoreInArrayLiteralIC_Slow, StoreWithVector) \
\
/* Microtask helpers */ \
TFS(EnqueueMicrotask, kMicrotask) \
@@ -246,8 +244,19 @@ namespace internal {
/* ES6 #sec-array.of */ \
TFJ(ArrayOf, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES7 #sec-array.prototype.includes */ \
+ TFS(ArrayIncludesSmiOrObject, kElements, kSearchElement, kLength, \
+ kFromIndex) \
+ TFS(ArrayIncludesPackedDoubles, kElements, kSearchElement, kLength, \
+ kFromIndex) \
+ TFS(ArrayIncludesHoleyDoubles, kElements, kSearchElement, kLength, \
+ kFromIndex) \
TFJ(ArrayIncludes, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.indexof */ \
+ TFS(ArrayIndexOfSmiOrObject, kElements, kSearchElement, kLength, kFromIndex) \
+ TFS(ArrayIndexOfPackedDoubles, kElements, kSearchElement, kLength, \
+ kFromIndex) \
+ TFS(ArrayIndexOfHoleyDoubles, kElements, kSearchElement, kLength, \
+ kFromIndex) \
TFJ(ArrayIndexOf, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.pop */ \
CPP(ArrayPop) \
@@ -259,7 +268,6 @@ namespace internal {
CPP(ArrayShift) \
TFJ(ArrayPrototypeShift, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.slice */ \
- CPP(ArraySlice) \
TFJ(ArrayPrototypeSlice, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.splice */ \
CPP(ArraySplice) \
@@ -355,10 +363,18 @@ namespace internal {
TFJ(ArrayPrototypeValues, 0) \
/* ES6 #sec-%arrayiteratorprototype%.next */ \
TFJ(ArrayIteratorPrototypeNext, 0) \
+ /* https://tc39.github.io/proposal-flatMap/#sec-FlattenIntoArray */ \
+ TFS(FlattenIntoArray, kTarget, kSource, kSourceLength, kStart, kDepth) \
+ TFS(FlatMapIntoArray, kTarget, kSource, kSourceLength, kStart, kDepth, \
+ kMapperFunction, kThisArg) \
+ /* https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flatten */ \
+ TFJ(ArrayPrototypeFlatten, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flatMap */ \
+ TFJ(ArrayPrototypeFlatMap, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
\
/* ArrayBuffer */ \
+ /* ES #sec-arraybuffer-constructor */ \
CPP(ArrayBufferConstructor) \
- CPP(ArrayBufferConstructor_ConstructStub) \
CPP(ArrayBufferConstructor_DoNotInitialize) \
CPP(ArrayBufferPrototypeGetByteLength) \
CPP(ArrayBufferIsView) \
@@ -374,7 +390,6 @@ namespace internal {
\
/* BigInt */ \
CPP(BigIntConstructor) \
- CPP(BigIntConstructor_ConstructStub) \
CPP(BigIntAsUintN) \
CPP(BigIntAsIntN) \
CPP(BigIntPrototypeToLocaleString) \
@@ -382,8 +397,8 @@ namespace internal {
CPP(BigIntPrototypeValueOf) \
\
/* Boolean */ \
+ /* ES #sec-boolean-constructor */ \
CPP(BooleanConstructor) \
- CPP(BooleanConstructor_ConstructStub) \
/* ES6 #sec-boolean.prototype.tostring */ \
TFJ(BooleanPrototypeToString, 0) \
/* ES6 #sec-boolean.prototype.valueof */ \
@@ -435,8 +450,8 @@ namespace internal {
CPP(ConsoleContext) \
\
/* DataView */ \
+ /* ES #sec-dataview-constructor */ \
CPP(DataViewConstructor) \
- CPP(DataViewConstructor_ConstructStub) \
CPP(DataViewPrototypeGetBuffer) \
CPP(DataViewPrototypeGetByteLength) \
CPP(DataViewPrototypeGetByteOffset) \
@@ -462,8 +477,8 @@ namespace internal {
CPP(DataViewPrototypeSetBigUint64) \
\
/* Date */ \
+ /* ES #sec-date-constructor */ \
CPP(DateConstructor) \
- CPP(DateConstructor_ConstructStub) \
/* ES6 #sec-date.prototype.getdate */ \
TFJ(DatePrototypeGetDate, 0) \
/* ES6 #sec-date.prototype.getday */ \
@@ -599,6 +614,7 @@ namespace internal {
TFH(StoreICTrampoline, Store) \
TFH(KeyedStoreIC, StoreWithVector) \
TFH(KeyedStoreICTrampoline, Store) \
+ TFH(StoreInArrayLiteralIC, StoreWithVector) \
TFH(LoadGlobalIC, LoadGlobalWithVector) \
TFH(LoadGlobalICInsideTypeof, LoadGlobalWithVector) \
TFH(LoadGlobalICTrampoline, LoadGlobal) \
@@ -699,11 +715,8 @@ namespace internal {
\
/* Number */ \
TFC(AllocateHeapNumber, AllocateHeapNumber, 1) \
- /* ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Call]] case */ \
+ /* ES #sec-number-constructor */ \
TFJ(NumberConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- /* ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Construct]] case */ \
- TFJ(NumberConstructor_ConstructStub, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-number.isfinite */ \
TFJ(NumberIsFinite, 1, kNumber) \
/* ES6 #sec-number.isinteger */ \
@@ -748,9 +761,8 @@ namespace internal {
TFS(Negate, kValue) \
\
/* Object */ \
+ /* ES #sec-object-constructor */ \
TFJ(ObjectConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- TFJ(ObjectConstructor_ConstructStub, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
CPP(ObjectAssign) \
/* ES #sec-object.create */ \
TFJ(ObjectCreate, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
@@ -814,7 +826,8 @@ namespace internal {
TFJ(PromiseGetCapabilitiesExecutor, 2, kResolve, kReject) \
/* ES6 #sec-newpromisecapability */ \
TFS(NewPromiseCapability, kConstructor, kDebugEvent) \
- TFJ(PromiseConstructorLazyDeoptContinuation, 2, kPromise, kResult) \
+ TFJ(PromiseConstructorLazyDeoptContinuation, 4, kPromise, kReject, \
+ kException, kResult) \
/* ES6 #sec-promise-executor */ \
TFJ(PromiseConstructor, 1, kExecutor) \
CPP(IsPromise) \
@@ -853,9 +866,7 @@ namespace internal {
TFJ(PromiseInternalResolve, 2, kPromise, kResolution) \
\
/* Proxy */ \
- TFJ(ProxyConstructor, 0) \
- TFJ(ProxyConstructor_ConstructStub, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ProxyConstructor, 2, kTarget, kHandler) \
TFJ(ProxyRevocable, 2, kTarget, kHandler) \
TFJ(ProxyRevoke, 0) \
TFS(ProxyGetProperty, kProxy, kName, kReceiverValue) \
@@ -909,6 +920,8 @@ namespace internal {
TFJ(RegExpPrototypeIgnoreCaseGetter, 0) \
/* ES #sec-regexp.prototype-@@match */ \
TFJ(RegExpPrototypeMatch, 1, kString) \
+ /* https://tc39.github.io/proposal-string-matchall/ */ \
+ TFJ(RegExpPrototypeMatchAll, 1, kString) \
/* ES #sec-get-regexp.prototype.multiline */ \
TFJ(RegExpPrototypeMultilineGetter, 0) \
/* ES #sec-regexp.prototype-@@search */ \
@@ -936,6 +949,10 @@ namespace internal {
TFS(RegExpSearchFast, kReceiver, kPattern) \
TFS(RegExpSplit, kRegExp, kString, kLimit) \
\
+ /* RegExp String Iterator */ \
+ /* https://tc39.github.io/proposal-string-matchall/ */ \
+ TFJ(RegExpStringIteratorPrototypeNext, 0) \
+ \
/* Set */ \
TFJ(SetConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
TFJ(SetPrototypeHas, 1, kKey) \
@@ -970,9 +987,9 @@ namespace internal {
CPP(AtomicsWake) \
\
/* String */ \
+ /* ES #sec-string-constructor */ \
TFJ(StringConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- TFJ(StringConstructor_ConstructStub, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES #sec-string.fromcodepoint */ \
CPP(StringFromCodePoint) \
/* ES6 #sec-string.fromcharcode */ \
TFJ(StringFromCharCode, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
@@ -1013,6 +1030,8 @@ namespace internal {
TFJ(StringPrototypeLink, 1, kValue) \
/* ES6 #sec-string.prototype.match */ \
TFJ(StringPrototypeMatch, 1, kRegexp) \
+ /* ES #sec-string.prototype.matchAll */ \
+ TFJ(StringPrototypeMatchAll, 1, kRegexp) \
/* ES6 #sec-string.prototype.localecompare */ \
CPP(StringPrototypeLocaleCompare) \
/* ES6 #sec-string.prototype.padEnd */ \
@@ -1063,8 +1082,8 @@ namespace internal {
TFJ(StringIteratorPrototypeNext, 0) \
\
/* Symbol */ \
+ /* ES #sec-symbol-constructor */ \
CPP(SymbolConstructor) \
- CPP(SymbolConstructor_ConstructStub) \
/* ES6 #sec-symbol.for */ \
CPP(SymbolFor) \
/* ES6 #sec-symbol.keyfor */ \
@@ -1081,9 +1100,11 @@ namespace internal {
TFS(TypedArrayInitialize, kHolder, kLength, kElementSize, kInitialize) \
TFS(TypedArrayInitializeWithBuffer, kHolder, kLength, kBuffer, kElementSize, \
kByteOffset) \
+ /* ES #sec-typedarray-constructors */ \
+ TFS(CreateTypedArray, kTarget, kNewTarget, kArg1, kArg2, kArg3) \
+ TFJ(TypedArrayBaseConstructor, 0) \
+ TFJ(TypedArrayConstructorLazyDeoptContinuation, 1, kResult) \
TFJ(TypedArrayConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- TFJ(TypedArrayConstructor_ConstructStub, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
CPP(TypedArrayPrototypeBuffer) \
/* ES6 #sec-get-%typedarray%.prototype.bytelength */ \
TFJ(TypedArrayPrototypeByteLength, 0) \
@@ -1266,6 +1287,7 @@ namespace internal {
V(AsyncGeneratorResolve) \
V(PromiseAll) \
V(PromiseConstructor) \
+ V(PromiseConstructorLazyDeoptContinuation) \
V(PromiseFulfillReactionJob) \
V(PromiseRace) \
V(ResolvePromise)
@@ -1298,8 +1320,6 @@ namespace internal {
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, V, \
IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
-#define BUILTINS_WITH_UNTAGGED_PARAMS(V) V(WasmCompileLazy)
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-function-gen.cc b/deps/v8/src/builtins/builtins-function-gen.cc
index 7c1db5093d..05b0fb9fcd 100644
--- a/deps/v8/src/builtins/builtins-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-function-gen.cc
@@ -6,6 +6,7 @@
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
#include "src/frame-constants.h"
+#include "src/objects/descriptor-array.h"
namespace v8 {
namespace internal {
@@ -43,9 +44,14 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
// AccessorInfo objects. In that case, their value can be recomputed even if
// the actual value on the object changes.
Comment("Check descriptor array length");
- Node* descriptors = LoadMapDescriptors(receiver_map);
- Node* descriptors_length = LoadFixedArrayBaseLength(descriptors);
- GotoIf(SmiLessThanOrEqual(descriptors_length, SmiConstant(1)), &slow);
+ TNode<DescriptorArray> descriptors = LoadMapDescriptors(receiver_map);
+ // Minimum descriptor array length required for fast path.
+ const int min_descriptors_length = DescriptorArray::LengthFor(Max(
+ JSFunction::kLengthDescriptorIndex, JSFunction::kNameDescriptorIndex));
+ TNode<Smi> descriptors_length = LoadFixedArrayBaseLength(descriptors);
+ GotoIf(SmiLessThanOrEqual(descriptors_length,
+ SmiConstant(min_descriptors_length)),
+ &slow);
// Check whether the length and name properties are still present as
// AccessorInfo objects. In that case, their value can be recomputed even if
@@ -53,27 +59,27 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
Comment("Check name and length properties");
{
const int length_index = JSFunction::kLengthDescriptorIndex;
- Node* maybe_length = LoadFixedArrayElement(
- descriptors, DescriptorArray::ToKeyIndex(length_index));
+ TNode<Name> maybe_length = CAST(LoadFixedArrayElement(
+ descriptors, DescriptorArray::ToKeyIndex(length_index)));
GotoIf(WordNotEqual(maybe_length, LoadRoot(Heap::klength_stringRootIndex)),
&slow);
- Node* maybe_length_accessor = LoadFixedArrayElement(
+ TNode<Object> maybe_length_accessor = LoadFixedArrayElement(
descriptors, DescriptorArray::ToValueIndex(length_index));
GotoIf(TaggedIsSmi(maybe_length_accessor), &slow);
- Node* length_value_map = LoadMap(maybe_length_accessor);
+ Node* length_value_map = LoadMap(CAST(maybe_length_accessor));
GotoIfNot(IsAccessorInfoMap(length_value_map), &slow);
const int name_index = JSFunction::kNameDescriptorIndex;
- Node* maybe_name = LoadFixedArrayElement(
- descriptors, DescriptorArray::ToKeyIndex(name_index));
+ TNode<Name> maybe_name = CAST(LoadFixedArrayElement(
+ descriptors, DescriptorArray::ToKeyIndex(name_index)));
GotoIf(WordNotEqual(maybe_name, LoadRoot(Heap::kname_stringRootIndex)),
&slow);
- Node* maybe_name_accessor = LoadFixedArrayElement(
+ TNode<Object> maybe_name_accessor = LoadFixedArrayElement(
descriptors, DescriptorArray::ToValueIndex(name_index));
GotoIf(TaggedIsSmi(maybe_name_accessor), &slow);
- Node* name_value_map = LoadMap(maybe_name_accessor);
+ TNode<Map> name_value_map = LoadMap(CAST(maybe_name_accessor));
GotoIfNot(IsAccessorInfoMap(name_value_map), &slow);
}
diff --git a/deps/v8/src/builtins/builtins-handler-gen.cc b/deps/v8/src/builtins/builtins-handler-gen.cc
index 48c28ab730..29422ab72c 100644
--- a/deps/v8/src/builtins/builtins-handler-gen.cc
+++ b/deps/v8/src/builtins/builtins-handler-gen.cc
@@ -55,6 +55,15 @@ TF_BUILTIN(KeyedStoreIC_Slow, CodeStubAssembler) {
receiver, name);
}
+TF_BUILTIN(StoreInArrayLiteralIC_Slow, CodeStubAssembler) {
+ Node* array = Parameter(Descriptor::kReceiver);
+ Node* index = Parameter(Descriptor::kName);
+ Node* value = Parameter(Descriptor::kValue);
+ Node* context = Parameter(Descriptor::kContext);
+ TailCallRuntime(Runtime::kStoreInArrayLiteralIC_Slow, context, value, array,
+ index);
+}
+
TF_BUILTIN(LoadGlobalIC_Slow, CodeStubAssembler) {
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
diff --git a/deps/v8/src/builtins/builtins-ic-gen.cc b/deps/v8/src/builtins/builtins-ic-gen.cc
index 94613a6a32..2439cd9d7b 100644
--- a/deps/v8/src/builtins/builtins-ic-gen.cc
+++ b/deps/v8/src/builtins/builtins-ic-gen.cc
@@ -35,6 +35,7 @@ IC_BUILTIN(StoreIC)
IC_BUILTIN(StoreICTrampoline)
IC_BUILTIN(KeyedStoreIC)
IC_BUILTIN(KeyedStoreICTrampoline)
+IC_BUILTIN(StoreInArrayLiteralIC)
IC_BUILTIN_PARAM(LoadGlobalIC, LoadGlobalIC, NOT_INSIDE_TYPEOF)
IC_BUILTIN_PARAM(LoadGlobalICInsideTypeof, LoadGlobalIC, INSIDE_TYPEOF)
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index edc529c798..e1f4aea405 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -7,7 +7,9 @@
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
#include "src/heap/heap-inl.h"
+#include "src/ic/accessor-assembler.h"
#include "src/macro-assembler.h"
+#include "src/objects/debug-objects.h"
#include "src/objects/shared-function-info.h"
#include "src/runtime/runtime.h"
@@ -73,8 +75,9 @@ TF_BUILTIN(GrowFastSmiOrObjectElements, CodeStubAssembler) {
TF_BUILTIN(NewArgumentsElements, CodeStubAssembler) {
Node* frame = Parameter(Descriptor::kFrame);
- Node* length = SmiToIntPtr(Parameter(Descriptor::kLength));
- Node* mapped_count = SmiToIntPtr(Parameter(Descriptor::kMappedCount));
+ TNode<IntPtrT> length = SmiToIntPtr(Parameter(Descriptor::kLength));
+ TNode<IntPtrT> mapped_count =
+ SmiToIntPtr(Parameter(Descriptor::kMappedCount));
// Check if we can allocate in new space.
ElementsKind kind = PACKED_ELEMENTS;
@@ -102,9 +105,7 @@ TF_BUILTIN(NewArgumentsElements, CodeStubAssembler) {
// The elements might be used to back mapped arguments. In that case fill
// the mapped elements (i.e. the first {mapped_count}) with the hole, but
// make sure not to overshoot the {length} if some arguments are missing.
- Node* number_of_holes =
- SelectConstant(IntPtrLessThan(mapped_count, length), mapped_count,
- length, MachineType::PointerRepresentation());
+ TNode<IntPtrT> number_of_holes = IntPtrMin(mapped_count, length);
Node* the_hole = TheHoleConstant();
// Fill the first elements up to {number_of_holes} with the hole.
@@ -173,6 +174,43 @@ TF_BUILTIN(ReturnReceiver, CodeStubAssembler) {
Return(Parameter(Descriptor::kReceiver));
}
+TF_BUILTIN(DebugBreakTrampoline, CodeStubAssembler) {
+ Label tailcall_to_shared(this);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ TNode<Object> new_target = CAST(Parameter(BuiltinDescriptor::kNewTarget));
+ TNode<Int32T> arg_count =
+ UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
+ TNode<JSFunction> function = CAST(LoadFromFrame(
+ StandardFrameConstants::kFunctionOffset, MachineType::TaggedPointer()));
+
+ // Check break-at-entry flag on the debug info.
+ TNode<SharedFunctionInfo> shared =
+ CAST(LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset));
+ TNode<Object> maybe_debug_info =
+ LoadObjectField(shared, SharedFunctionInfo::kDebugInfoOffset);
+ GotoIf(TaggedIsSmi(maybe_debug_info), &tailcall_to_shared);
+
+ {
+ TNode<DebugInfo> debug_info = CAST(maybe_debug_info);
+ TNode<Smi> flags =
+ CAST(LoadObjectField(debug_info, DebugInfo::kFlagsOffset));
+ GotoIfNot(SmiToInt32(SmiAnd(flags, SmiConstant(DebugInfo::kBreakAtEntry))),
+ &tailcall_to_shared);
+
+ CallRuntime(Runtime::kDebugBreakAtEntry, context, function);
+ Goto(&tailcall_to_shared);
+ }
+
+ BIND(&tailcall_to_shared);
+ // Tail call into code object on the SharedFunctionInfo.
+ TNode<Code> code = GetSharedFunctionInfoCode(shared);
+ // Use the ConstructTrampolineDescriptor because it passes new.target too in
+ // case this is called during construct.
+ CSA_ASSERT(this, IsCode(code));
+ ConstructTrampolineDescriptor descriptor(isolate());
+ TailCallStub(descriptor, code, context, function, new_target, arg_count);
+}
+
class RecordWriteCodeStubAssembler : public CodeStubAssembler {
public:
explicit RecordWriteCodeStubAssembler(compiler::CodeAssemblerState* state)
@@ -442,10 +480,10 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
Return(TrueConstant());
}
-class DeletePropertyBaseAssembler : public CodeStubAssembler {
+class DeletePropertyBaseAssembler : public AccessorAssembler {
public:
explicit DeletePropertyBaseAssembler(compiler::CodeAssemblerState* state)
- : CodeStubAssembler(state) {}
+ : AccessorAssembler(state) {}
void DeleteDictionaryProperty(Node* receiver, Node* properties, Node* name,
Node* context, Label* dont_delete,
@@ -532,6 +570,8 @@ TF_BUILTIN(DeleteProperty, DeletePropertyBaseAssembler) {
BIND(&dictionary);
{
+ InvalidateValidityCellIfPrototype(receiver_map);
+
Node* properties = LoadSlowProperties(receiver);
DeleteDictionaryProperty(receiver, properties, unique, context,
&dont_delete, &if_notfound);
@@ -853,8 +893,8 @@ TF_BUILTIN(RunMicrotasks, InternalBuiltinsAssembler) {
Goto(&loop);
BIND(&loop);
{
- TNode<HeapObject> microtask = TNode<HeapObject>::UncheckedCast(
- LoadFixedArrayElement(queue, index.value()));
+ TNode<HeapObject> microtask =
+ CAST(LoadFixedArrayElement(queue, index.value()));
index = IntPtrAdd(index.value(), IntPtrConstant(1));
CSA_ASSERT(this, TaggedIsNotSmi(microtask));
@@ -921,8 +961,10 @@ TF_BUILTIN(RunMicrotasks, InternalBuiltinsAssembler) {
// But from our current measurements it doesn't seem to be a
// serious performance problem, even if the microtask is full
// of CallHandlerTasks (which is not a realistic use case anyways).
- CallRuntime(Runtime::kRunMicrotaskCallback, current_context,
- microtask_callback, microtask_data);
+ Node* const result =
+ CallRuntime(Runtime::kRunMicrotaskCallback, current_context,
+ microtask_callback, microtask_data);
+ GotoIfException(result, &if_exception, &var_exception);
Goto(&loop_next);
}
diff --git a/deps/v8/src/builtins/builtins-interpreter-gen.cc b/deps/v8/src/builtins/builtins-interpreter-gen.cc
index a8552338c8..f0d5160330 100644
--- a/deps/v8/src/builtins/builtins-interpreter-gen.cc
+++ b/deps/v8/src/builtins/builtins-interpreter-gen.cc
@@ -14,12 +14,6 @@ void Builtins::Generate_InterpreterPushArgsThenCall(MacroAssembler* masm) {
masm, ConvertReceiverMode::kAny, InterpreterPushArgsMode::kOther);
}
-void Builtins::Generate_InterpreterPushArgsThenCallFunction(
- MacroAssembler* masm) {
- return Generate_InterpreterPushArgsThenCallImpl(
- masm, ConvertReceiverMode::kAny, InterpreterPushArgsMode::kJSFunction);
-}
-
void Builtins::Generate_InterpreterPushUndefinedAndArgsThenCall(
MacroAssembler* masm) {
return Generate_InterpreterPushArgsThenCallImpl(
@@ -27,13 +21,6 @@ void Builtins::Generate_InterpreterPushUndefinedAndArgsThenCall(
InterpreterPushArgsMode::kOther);
}
-void Builtins::Generate_InterpreterPushUndefinedAndArgsThenCallFunction(
- MacroAssembler* masm) {
- return Generate_InterpreterPushArgsThenCallImpl(
- masm, ConvertReceiverMode::kNullOrUndefined,
- InterpreterPushArgsMode::kJSFunction);
-}
-
void Builtins::Generate_InterpreterPushArgsThenCallWithFinalSpread(
MacroAssembler* masm) {
return Generate_InterpreterPushArgsThenCallImpl(
@@ -52,10 +39,10 @@ void Builtins::Generate_InterpreterPushArgsThenConstructWithFinalSpread(
masm, InterpreterPushArgsMode::kWithFinalSpread);
}
-void Builtins::Generate_InterpreterPushArgsThenConstructFunction(
+void Builtins::Generate_InterpreterPushArgsThenConstructArrayFunction(
MacroAssembler* masm) {
return Generate_InterpreterPushArgsThenConstructImpl(
- masm, InterpreterPushArgsMode::kJSFunction);
+ masm, InterpreterPushArgsMode::kArrayFunction);
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-interpreter.cc b/deps/v8/src/builtins/builtins-interpreter.cc
index 64c43ee4ac..d05a8656db 100644
--- a/deps/v8/src/builtins/builtins-interpreter.cc
+++ b/deps/v8/src/builtins/builtins-interpreter.cc
@@ -14,15 +14,10 @@ namespace internal {
Handle<Code> Builtins::InterpreterPushArgsThenCall(
ConvertReceiverMode receiver_mode, InterpreterPushArgsMode mode) {
switch (mode) {
- case InterpreterPushArgsMode::kJSFunction:
- switch (receiver_mode) {
- case ConvertReceiverMode::kNullOrUndefined:
- return builtin_handle(
- kInterpreterPushUndefinedAndArgsThenCallFunction);
- case ConvertReceiverMode::kNotNullOrUndefined:
- case ConvertReceiverMode::kAny:
- return builtin_handle(kInterpreterPushArgsThenCallFunction);
- }
+ case InterpreterPushArgsMode::kArrayFunction:
+ // There is no special-case handling of calls to Array. They will all go
+ // through the kOther case below.
+ UNREACHABLE();
case InterpreterPushArgsMode::kWithFinalSpread:
return builtin_handle(kInterpreterPushArgsThenCallWithFinalSpread);
case InterpreterPushArgsMode::kOther:
@@ -40,8 +35,8 @@ Handle<Code> Builtins::InterpreterPushArgsThenCall(
Handle<Code> Builtins::InterpreterPushArgsThenConstruct(
InterpreterPushArgsMode mode) {
switch (mode) {
- case InterpreterPushArgsMode::kJSFunction:
- return builtin_handle(kInterpreterPushArgsThenConstructFunction);
+ case InterpreterPushArgsMode::kArrayFunction:
+ return builtin_handle(kInterpreterPushArgsThenConstructArrayFunction);
case InterpreterPushArgsMode::kWithFinalSpread:
return builtin_handle(kInterpreterPushArgsThenConstructWithFinalSpread);
case InterpreterPushArgsMode::kOther:
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc
index 21f6039f08..57702556a9 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-iterator-gen.cc
@@ -4,7 +4,7 @@
#include "src/builtins/builtins-iterator-gen.h"
-#include "src/factory-inl.h"
+#include "src/heap/factory-inl.h"
namespace v8 {
namespace internal {
@@ -31,23 +31,42 @@ IteratorRecord IteratorBuiltinsAssembler::GetIterator(Node* context,
Variable* exception) {
GotoIfException(method, if_exception, exception);
- Callable callable = CodeFactory::Call(isolate());
- Node* iterator = CallJS(callable, context, method, object);
- GotoIfException(iterator, if_exception, exception);
-
- Label get_next(this), if_notobject(this, Label::kDeferred);
- GotoIf(TaggedIsSmi(iterator), &if_notobject);
- Branch(IsJSReceiver(iterator), &get_next, &if_notobject);
+ Label if_not_callable(this, Label::kDeferred), if_callable(this);
+ GotoIf(TaggedIsSmi(method), &if_not_callable);
+ Branch(IsCallable(method), &if_callable, &if_not_callable);
- BIND(&if_notobject);
- { ThrowTypeError(context, MessageTemplate::kNotAnIterator, iterator); }
-
- BIND(&get_next);
- Node* const next = GetProperty(context, iterator, factory()->next_string());
- GotoIfException(next, if_exception, exception);
+ BIND(&if_not_callable);
+ {
+ Node* ret = CallRuntime(Runtime::kThrowTypeError, context,
+ SmiConstant(MessageTemplate::kNotIterable), object);
+ GotoIfException(ret, if_exception, exception);
+ Unreachable();
+ }
- return IteratorRecord{TNode<JSReceiver>::UncheckedCast(iterator),
- TNode<Object>::UncheckedCast(next)};
+ BIND(&if_callable);
+ {
+ Callable callable = CodeFactory::Call(isolate());
+ Node* iterator = CallJS(callable, context, method, object);
+ GotoIfException(iterator, if_exception, exception);
+
+ Label get_next(this), if_notobject(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(iterator), &if_notobject);
+ Branch(IsJSReceiver(iterator), &get_next, &if_notobject);
+
+ BIND(&if_notobject);
+ {
+ Node* ret = CallRuntime(Runtime::kThrowSymbolIteratorInvalid, context);
+ GotoIfException(ret, if_exception, exception);
+ Unreachable();
+ }
+
+ BIND(&get_next);
+ Node* const next = GetProperty(context, iterator, factory()->next_string());
+ GotoIfException(next, if_exception, exception);
+
+ return IteratorRecord{TNode<JSReceiver>::UncheckedCast(iterator),
+ TNode<Object>::UncheckedCast(next)};
+ }
}
Node* IteratorBuiltinsAssembler::IteratorStep(
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index 3fb8d7792d..20d9453aa7 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -5,7 +5,7 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
-#include "src/factory-inl.h"
+#include "src/heap/factory-inl.h"
#include "src/objects/property-descriptor-object.h"
#include "src/objects/shared-function-info.h"
@@ -141,14 +141,15 @@ Node* ObjectBuiltinsAssembler::ConstructDataDescriptor(Node* context,
Node* ObjectBuiltinsAssembler::IsSpecialReceiverMap(SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
- Node* is_special = IsSpecialReceiverInstanceType(LoadMapInstanceType(map));
+ TNode<BoolT> is_special =
+ IsSpecialReceiverInstanceType(LoadMapInstanceType(map));
uint32_t mask =
Map::HasNamedInterceptorBit::kMask | Map::IsAccessCheckNeededBit::kMask;
USE(mask);
// Interceptors or access checks imply special receiver.
CSA_ASSERT(this,
- SelectConstant(IsSetWord32(LoadMapBitField(map), mask), is_special,
- Int32Constant(1), MachineRepresentation::kWord32));
+ SelectConstant<BoolT>(IsSetWord32(LoadMapBitField(map), mask),
+ is_special, Int32TrueConstant()));
return is_special;
}
@@ -313,7 +314,7 @@ TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries(
CSA_ASSERT(this, WordEqual(map, LoadMap(object)));
TNode<Uint32T> descriptor_index = TNode<Uint32T>::UncheckedCast(
TruncateIntPtrToInt32(var_descriptor_number.value()));
- Node* next_key = DescriptorArrayGetKey(descriptors, descriptor_index);
+ Node* next_key = GetKey(descriptors, descriptor_index);
// Skip Symbols.
GotoIf(IsSymbol(next_key), &next_descriptor);
@@ -332,8 +333,8 @@ TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries(
VARIABLE(var_property_value, MachineRepresentation::kTagged,
UndefinedConstant());
- Node* descriptor_name_index = DescriptorArrayToKeyIndex(
- TruncateIntPtrToInt32(var_descriptor_number.value()));
+ TNode<IntPtrT> descriptor_name_index = ToKeyIndex<DescriptorArray>(
+ Unsigned(TruncateIntPtrToInt32(var_descriptor_number.value())));
// Let value be ? Get(O, key).
LoadPropertyFromFastObject(object, map, descriptors,
@@ -771,11 +772,15 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) {
// as the exception is observable.
Node* receiver_is_array =
CallRuntime(Runtime::kArrayIsArray, context, receiver);
- Node* builtin_tag = SelectTaggedConstant<Object>(
- IsTrue(receiver_is_array), LoadRoot(Heap::kArray_stringRootIndex),
- SelectTaggedConstant<Object>(IsCallableMap(receiver_map),
- LoadRoot(Heap::kFunction_stringRootIndex),
- LoadRoot(Heap::kObject_stringRootIndex)));
+ TNode<String> builtin_tag = Select<String>(
+ IsTrue(receiver_is_array),
+ [=] { return CAST(LoadRoot(Heap::kArray_stringRootIndex)); },
+ [=] {
+ return Select<String>(
+ IsCallableMap(receiver_map),
+ [=] { return CAST(LoadRoot(Heap::kFunction_stringRootIndex)); },
+ [=] { return CAST(LoadRoot(Heap::kObject_stringRootIndex)); });
+ });
// Lookup the @@toStringTag property on the {receiver}.
VARIABLE(var_tag, MachineRepresentation::kTagged,
@@ -1053,7 +1058,7 @@ TF_BUILTIN(GetSuperConstructor, ObjectBuiltinsAssembler) {
Node* object = Parameter(Descriptor::kObject);
Node* context = Parameter(Descriptor::kContext);
- Return(GetSuperConstructor(object, context));
+ Return(GetSuperConstructor(context, object));
}
TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
@@ -1071,8 +1076,8 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
Node* shared =
LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset);
- Node* bytecode_array =
- LoadObjectField(shared, SharedFunctionInfo::kFunctionDataOffset);
+ Node* bytecode_array = LoadSharedFunctionInfoBytecodeArray(shared);
+
Node* frame_size = ChangeInt32ToIntPtr(LoadObjectField(
bytecode_array, BytecodeArray::kFrameSizeOffset, MachineType::Int32()));
Node* size = WordSar(frame_size, IntPtrConstant(kPointerSizeLog2));
@@ -1130,9 +1135,7 @@ TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) {
return_undefined(this, Label::kDeferred), if_notunique_name(this);
Node* map = LoadMap(object);
Node* instance_type = LoadMapInstanceType(map);
- GotoIf(Int32LessThanOrEqual(instance_type,
- Int32Constant(LAST_SPECIAL_RECEIVER_TYPE)),
- &call_runtime);
+ GotoIf(IsSpecialReceiverInstanceType(instance_type), &call_runtime);
{
VARIABLE(var_index, MachineType::PointerRepresentation(),
IntPtrConstant(0));
diff --git a/deps/v8/src/builtins/builtins-promise-gen.cc b/deps/v8/src/builtins/builtins-promise-gen.cc
index 032d0ef100..dd38dbc543 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.cc
+++ b/deps/v8/src/builtins/builtins-promise-gen.cc
@@ -198,6 +198,50 @@ Node* PromiseBuiltinsAssembler::CreatePromiseContext(Node* native_context,
return context;
}
+Node* PromiseBuiltinsAssembler::CreatePromiseAllResolveElementContext(
+ Node* promise_capability, Node* native_context) {
+ CSA_ASSERT(this, IsNativeContext(native_context));
+
+ // TODO(bmeurer): Manually fold this into a single allocation.
+ Node* const array_map = LoadContextElement(
+ native_context, Context::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX);
+ Node* const values_array = AllocateJSArray(PACKED_ELEMENTS, array_map,
+ IntPtrConstant(0), SmiConstant(0));
+
+ Node* const context =
+ CreatePromiseContext(native_context, kPromiseAllResolveElementLength);
+ StoreContextElementNoWriteBarrier(
+ context, kPromiseAllResolveElementRemainingSlot, SmiConstant(1));
+ StoreContextElementNoWriteBarrier(
+ context, kPromiseAllResolveElementCapabilitySlot, promise_capability);
+ StoreContextElementNoWriteBarrier(
+ context, kPromiseAllResolveElementValuesArraySlot, values_array);
+
+ return context;
+}
+
+Node* PromiseBuiltinsAssembler::CreatePromiseAllResolveElementFunction(
+ Node* context, Node* index, Node* native_context) {
+ CSA_ASSERT(this, TaggedIsSmi(index));
+ CSA_ASSERT(this, SmiGreaterThan(index, SmiConstant(0)));
+ CSA_ASSERT(this, SmiLessThanOrEqual(
+ index, SmiConstant(PropertyArray::HashField::kMax)));
+ CSA_ASSERT(this, IsNativeContext(native_context));
+
+ Node* const map = LoadContextElement(
+ native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
+ Node* const resolve_info = LoadContextElement(
+ native_context, Context::PROMISE_ALL_RESOLVE_ELEMENT_SHARED_FUN);
+ Node* const resolve =
+ AllocateFunctionWithMapAndContext(map, resolve_info, context);
+
+ STATIC_ASSERT(PropertyArray::kNoHashSentinel == 0);
+ StoreObjectFieldNoWriteBarrier(resolve, JSFunction::kPropertiesOrHashOffset,
+ index);
+
+ return resolve;
+}
+
Node* PromiseBuiltinsAssembler::CreatePromiseResolvingFunctionsContext(
Node* promise, Node* debug_event, Node* native_context) {
Node* const context =
@@ -526,6 +570,61 @@ Node* PromiseBuiltinsAssembler::InvokeThen(Node* native_context, Node* receiver,
return var_result.value();
}
+Node* PromiseBuiltinsAssembler::InvokeResolve(Node* native_context,
+ Node* constructor, Node* value,
+ Label* if_exception,
+ Variable* var_exception) {
+ CSA_ASSERT(this, IsNativeContext(native_context));
+
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+ Label if_fast(this), if_slow(this, Label::kDeferred), done(this, &var_result);
+ // We can skip the "resolve" lookup on {constructor} if it's the
+ // Promise constructor and the Promise.resolve protector is intact,
+ // as that guards the lookup path for the "resolve" property on the
+ // Promise constructor.
+ BranchIfPromiseResolveLookupChainIntact(native_context, constructor, &if_fast,
+ &if_slow);
+
+ BIND(&if_fast);
+ {
+ Node* const result = CallBuiltin(Builtins::kPromiseResolve, native_context,
+ constructor, value);
+ GotoIfException(result, if_exception, var_exception);
+
+ var_result.Bind(result);
+ Goto(&done);
+ }
+
+ BIND(&if_slow);
+ {
+ Node* const resolve =
+ GetProperty(native_context, constructor, factory()->resolve_string());
+ GotoIfException(resolve, if_exception, var_exception);
+
+ Node* const result = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
+ native_context, resolve, constructor, value);
+ GotoIfException(result, if_exception, var_exception);
+
+ var_result.Bind(result);
+ Goto(&done);
+ }
+
+ BIND(&done);
+ return var_result.value();
+}
+
+void PromiseBuiltinsAssembler::BranchIfPromiseResolveLookupChainIntact(
+ Node* native_context, Node* constructor, Label* if_fast, Label* if_slow) {
+ CSA_ASSERT(this, IsNativeContext(native_context));
+
+ GotoIfForceSlowPath(if_slow);
+ Node* const promise_fun =
+ LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+ GotoIfNot(WordEqual(promise_fun, constructor), if_slow);
+ Branch(IsPromiseResolveProtectorCellInvalid(), if_slow, if_fast);
+}
+
void PromiseBuiltinsAssembler::BranchIfPromiseSpeciesLookupChainIntact(
Node* native_context, Node* promise_map, Label* if_fast, Label* if_slow) {
CSA_ASSERT(this, IsNativeContext(native_context));
@@ -536,7 +635,7 @@ void PromiseBuiltinsAssembler::BranchIfPromiseSpeciesLookupChainIntact(
GotoIfForceSlowPath(if_slow);
GotoIfNot(WordEqual(LoadMapPrototype(promise_map), promise_prototype),
if_slow);
- Branch(IsSpeciesProtectorCellInvalid(), if_slow, if_fast);
+ Branch(IsPromiseSpeciesProtectorCellInvalid(), if_slow, if_fast);
}
void PromiseBuiltinsAssembler::BranchIfPromiseThenLookupChainIntact(
@@ -668,6 +767,18 @@ TF_BUILTIN(PromiseCapabilityDefaultResolve, PromiseBuiltinsAssembler) {
TF_BUILTIN(PromiseConstructorLazyDeoptContinuation, PromiseBuiltinsAssembler) {
Node* promise = Parameter(Descriptor::kPromise);
+ Node* reject = Parameter(Descriptor::kReject);
+ Node* exception = Parameter(Descriptor::kException);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ Label finally(this);
+
+ GotoIf(IsTheHole(exception), &finally);
+ CallJS(CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
+ context, reject, UndefinedConstant(), exception);
+ Goto(&finally);
+
+ BIND(&finally);
Return(promise);
}
@@ -1171,10 +1282,10 @@ TF_BUILTIN(PromiseResolve, PromiseBuiltinsAssembler) {
// intact, as that guards the lookup path for "constructor" on
// JSPromise instances which have the (initial) Promise.prototype.
Node* const promise_prototype =
- LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+ LoadContextElement(native_context, Context::PROMISE_PROTOTYPE_INDEX);
GotoIfNot(WordEqual(LoadMapPrototype(value_map), promise_prototype),
&if_slow_constructor);
- GotoIf(IsSpeciesProtectorCellInvalid(), &if_slow_constructor);
+ GotoIf(IsPromiseSpeciesProtectorCellInvalid(), &if_slow_constructor);
// If the {constructor} is the Promise function, we just immediately
// return the {value} here and don't bother wrapping it into a
@@ -1606,21 +1717,23 @@ TF_BUILTIN(ResolvePromise, PromiseBuiltinsAssembler) {
// 7. If Type(resolution) is not Object, then
GotoIf(TaggedIsSmi(resolution), &if_fulfill);
- Node* const result_map = LoadMap(resolution);
- GotoIfNot(IsJSReceiverMap(result_map), &if_fulfill);
+ Node* const resolution_map = LoadMap(resolution);
+ GotoIfNot(IsJSReceiverMap(resolution_map), &if_fulfill);
// We can skip the "then" lookup on {resolution} if its [[Prototype]]
// is the (initial) Promise.prototype and the Promise#then protector
// is intact, as that guards the lookup path for the "then" property
// on JSPromise instances which have the (initial) %PromisePrototype%.
- Label if_fast(this), if_slow(this, Label::kDeferred);
+ Label if_fast(this), if_generic(this), if_slow(this, Label::kDeferred);
Node* const native_context = LoadNativeContext(context);
- BranchIfPromiseThenLookupChainIntact(native_context, result_map, &if_fast,
- &if_slow);
+ GotoIfForceSlowPath(&if_slow);
+ GotoIf(IsPromiseThenProtectorCellInvalid(), &if_slow);
+ GotoIfNot(IsJSPromiseMap(resolution_map), &if_generic);
+ Node* const promise_prototype =
+ LoadContextElement(native_context, Context::PROMISE_PROTOTYPE_INDEX);
+ Branch(WordEqual(LoadMapPrototype(resolution_map), promise_prototype),
+ &if_fast, &if_slow);
- // Resolution is a native promise and if it's already resolved or
- // rejected, shortcircuit the resolution procedure by directly
- // reusing the value from the promise.
BIND(&if_fast);
{
Node* const then =
@@ -1629,6 +1742,21 @@ TF_BUILTIN(ResolvePromise, PromiseBuiltinsAssembler) {
Goto(&do_enqueue);
}
+ BIND(&if_generic);
+ {
+ // We can skip the lookup of "then" if the {resolution} is a (newly
+ // created) IterResultObject, as the Promise#then protector also
+ // ensures that the intrinsic %ObjectPrototype% doesn't contain any
+ // "then" property. This helps to avoid negative lookups on iterator
+ // results from async generators.
+ CSA_ASSERT(this, IsJSReceiverMap(resolution_map));
+ CSA_ASSERT(this, Word32BinaryNot(IsPromiseThenProtectorCellInvalid()));
+ Node* const iterator_result_map =
+ LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
+ Branch(WordEqual(resolution_map, iterator_result_map), &if_fulfill,
+ &if_slow);
+ }
+
BIND(&if_slow);
{
// 8. Let then be Get(resolution, "then").
@@ -1677,26 +1805,23 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
const IteratorRecord& iterator, Label* if_exception,
Variable* var_exception) {
IteratorBuiltinsAssembler iter_assembler(state());
- Label close_iterator(this);
Node* const instrumenting = IsDebugActive();
+ Node* const native_context = LoadNativeContext(context);
// For catch prediction, don't treat the .then calls as handling it;
// instead, recurse outwards.
SetForwardingHandlerIfTrue(
- context, instrumenting,
+ native_context, instrumenting,
LoadObjectField(capability, PromiseCapability::kRejectOffset));
- Node* const native_context = LoadNativeContext(context);
- Node* const array_map = LoadContextElement(
- native_context, Context::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX);
- Node* const values_array = AllocateJSArray(PACKED_ELEMENTS, array_map,
- IntPtrConstant(0), SmiConstant(0));
- Node* const remaining_elements = AllocateSmiCell(1);
+ Node* const resolve_element_context =
+ CreatePromiseAllResolveElementContext(capability, native_context);
- VARIABLE(var_index, MachineRepresentation::kTagged, SmiConstant(0));
-
- Label loop(this, &var_index), break_loop(this);
+ VARIABLE(var_index, MachineRepresentation::kTagged, SmiConstant(1));
+ Label loop(this, &var_index), done_loop(this),
+ too_many_elements(this, Label::kDeferred),
+ close_iterator(this, Label::kDeferred);
Goto(&loop);
BIND(&loop);
{
@@ -1706,119 +1831,147 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
Node* const fast_iterator_result_map =
LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
Node* const next = iter_assembler.IteratorStep(
- context, iterator, &break_loop, fast_iterator_result_map, if_exception,
- var_exception);
+ native_context, iterator, &done_loop, fast_iterator_result_map,
+ if_exception, var_exception);
// Let nextValue be IteratorValue(next).
// If nextValue is an abrupt completion, set iteratorRecord.[[Done]] to
// true.
// ReturnIfAbrupt(nextValue).
Node* const next_value = iter_assembler.IteratorValue(
- context, next, fast_iterator_result_map, if_exception, var_exception);
+ native_context, next, fast_iterator_result_map, if_exception,
+ var_exception);
// Let nextPromise be ? Invoke(constructor, "resolve", Ā« nextValue Ā»).
- Node* const promise_resolve =
- GetProperty(context, constructor, factory()->resolve_string());
- GotoIfException(promise_resolve, &close_iterator, var_exception);
-
- Node* const next_promise = CallJS(
- CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
- context, promise_resolve, constructor, next_value);
- GotoIfException(next_promise, &close_iterator, var_exception);
+ Node* const next_promise =
+ InvokeResolve(native_context, constructor, next_value, &close_iterator,
+ var_exception);
- // Let resolveElement be a new built-in function object as defined in
- // Promise.all Resolve Element Functions.
- Node* const resolve_context =
- CreatePromiseContext(native_context, kPromiseAllResolveElementLength);
- StoreContextElementNoWriteBarrier(
- resolve_context, kPromiseAllResolveElementIndexSlot, var_index.value());
- StoreContextElementNoWriteBarrier(
- resolve_context, kPromiseAllResolveElementRemainingElementsSlot,
- remaining_elements);
- StoreContextElementNoWriteBarrier(
- resolve_context, kPromiseAllResolveElementCapabilitySlot, capability);
- StoreContextElementNoWriteBarrier(resolve_context,
- kPromiseAllResolveElementValuesArraySlot,
- values_array);
-
- Node* const map = LoadContextElement(
- native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
- Node* const resolve_info = LoadContextElement(
- native_context, Context::PROMISE_ALL_RESOLVE_ELEMENT_SHARED_FUN);
- Node* const resolve =
- AllocateFunctionWithMapAndContext(map, resolve_info, resolve_context);
+ // Check if we reached the limit.
+ Node* const index = var_index.value();
+ GotoIf(SmiEqual(index, SmiConstant(PropertyArray::HashField::kMax)),
+ &too_many_elements);
// Set remainingElementsCount.[[Value]] to
// remainingElementsCount.[[Value]] + 1.
- {
- Label if_outofrange(this, Label::kDeferred), done(this);
- IncrementSmiCell(remaining_elements, &if_outofrange);
- Goto(&done);
-
- BIND(&if_outofrange);
- {
- // If the incremented value is out of Smi range, crash.
- Abort(AbortReason::kOffsetOutOfRange);
- }
-
- BIND(&done);
- }
+ Node* const remaining_elements_count = LoadContextElement(
+ resolve_element_context, kPromiseAllResolveElementRemainingSlot);
+ StoreContextElementNoWriteBarrier(
+ resolve_element_context, kPromiseAllResolveElementRemainingSlot,
+ SmiAdd(remaining_elements_count, SmiConstant(1)));
+
+ // Let resolveElement be CreateBuiltinFunction(steps,
+ // Ā« [[AlreadyCalled]],
+ // [[Index]],
+ // [[Values]],
+ // [[Capability]],
+ // [[RemainingElements]] Ā»).
+ // Set resolveElement.[[AlreadyCalled]] to a Record { [[Value]]: false }.
+ // Set resolveElement.[[Index]] to index.
+ // Set resolveElement.[[Values]] to values.
+ // Set resolveElement.[[Capability]] to resultCapability.
+ // Set resolveElement.[[RemainingElements]] to remainingElementsCount.
+ Node* const resolve_element_fun = CreatePromiseAllResolveElementFunction(
+ resolve_element_context, index, native_context);
// Perform ? Invoke(nextPromise, "then", Ā« resolveElement,
// resultCapability.[[Reject]] Ā»).
Node* const then =
- GetProperty(context, next_promise, factory()->then_string());
+ GetProperty(native_context, next_promise, factory()->then_string());
GotoIfException(then, &close_iterator, var_exception);
Node* const then_call = CallJS(
CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
- context, then, next_promise, resolve,
+ native_context, then, next_promise, resolve_element_fun,
LoadObjectField(capability, PromiseCapability::kRejectOffset));
GotoIfException(then_call, &close_iterator, var_exception);
// For catch prediction, mark that rejections here are semantically
// handled by the combined Promise.
- SetPromiseHandledByIfTrue(context, instrumenting, then_call, [=]() {
+ SetPromiseHandledByIfTrue(native_context, instrumenting, then_call, [=]() {
// Load promiseCapability.[[Promise]]
return LoadObjectField(capability, PromiseCapability::kPromiseOffset);
});
- // Set index to index + 1
- var_index.Bind(NumberInc(var_index.value()));
+ // Set index to index + 1.
+ var_index.Bind(SmiAdd(index, SmiConstant(1)));
Goto(&loop);
}
+ BIND(&too_many_elements);
+ {
+ // If there are too many elements (currently more than 2**21-1), raise a
+ // RangeError here (which is caught directly and turned into a rejection)
+ // of the resulting promise. We could gracefully handle this case as well
+ // and support more than this number of elements by going to a separate
+ // function and pass the larger indices via a separate context, but it
+ // doesn't seem likely that we need this, and it's unclear how the rest
+ // of the system deals with 2**21 live Promises anyways.
+ Node* const result =
+ CallRuntime(Runtime::kThrowRangeError, native_context,
+ SmiConstant(MessageTemplate::kTooManyElementsInPromiseAll));
+ GotoIfException(result, &close_iterator, var_exception);
+ Unreachable();
+ }
+
BIND(&close_iterator);
{
// Exception must be bound to a JS value.
CSA_ASSERT(this, IsNotTheHole(var_exception->value()));
- iter_assembler.IteratorCloseOnException(context, iterator, if_exception,
- var_exception);
+ iter_assembler.IteratorCloseOnException(native_context, iterator,
+ if_exception, var_exception);
}
- BIND(&break_loop);
+ BIND(&done_loop);
{
- Label resolve_promise(this), return_promise(this);
+ Label resolve_promise(this, Label::kDeferred), return_promise(this);
// Set iteratorRecord.[[Done]] to true.
// Set remainingElementsCount.[[Value]] to
// remainingElementsCount.[[Value]] - 1.
- Node* const remaining = DecrementSmiCell(remaining_elements);
- Branch(SmiEqual(remaining, SmiConstant(0)), &resolve_promise,
- &return_promise);
+ Node* remaining_elements_count = LoadContextElement(
+ resolve_element_context, kPromiseAllResolveElementRemainingSlot);
+ remaining_elements_count = SmiSub(remaining_elements_count, SmiConstant(1));
+ StoreContextElementNoWriteBarrier(resolve_element_context,
+ kPromiseAllResolveElementRemainingSlot,
+ remaining_elements_count);
+ GotoIf(SmiEqual(remaining_elements_count, SmiConstant(0)),
+ &resolve_promise);
+
+ // Pre-allocate the backing store for the {values_array} to the desired
+ // capacity here. We may already have elements here in case of some
+ // fancy Thenable that calls the resolve callback immediately, so we need
+ // to handle that correctly here.
+ Node* const values_array = LoadContextElement(
+ resolve_element_context, kPromiseAllResolveElementValuesArraySlot);
+ Node* const old_elements = LoadElements(values_array);
+ Node* const old_capacity = LoadFixedArrayBaseLength(old_elements);
+ Node* const new_capacity = var_index.value();
+ GotoIf(SmiGreaterThanOrEqual(old_capacity, new_capacity), &return_promise);
+ Node* const new_elements =
+ AllocateFixedArray(PACKED_ELEMENTS, new_capacity, SMI_PARAMETERS,
+ AllocationFlag::kAllowLargeObjectAllocation);
+ CopyFixedArrayElements(PACKED_ELEMENTS, old_elements, PACKED_ELEMENTS,
+ new_elements, SmiConstant(0), old_capacity,
+ new_capacity, UPDATE_WRITE_BARRIER, SMI_PARAMETERS);
+ StoreObjectField(values_array, JSArray::kElementsOffset, new_elements);
+ Goto(&return_promise);
// If remainingElementsCount.[[Value]] is 0, then
// Let valuesArray be CreateArrayFromList(values).
// Perform ? Call(resultCapability.[[Resolve]], undefined,
// Ā« valuesArray Ā»).
BIND(&resolve_promise);
-
- Node* const resolve =
- LoadObjectField(capability, PromiseCapability::kResolveOffset);
- Node* const resolve_call = CallJS(
- CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
- context, resolve, UndefinedConstant(), values_array);
- GotoIfException(resolve_call, if_exception, var_exception);
- Goto(&return_promise);
+ {
+ Node* const resolve =
+ LoadObjectField(capability, PromiseCapability::kResolveOffset);
+ Node* const values_array = LoadContextElement(
+ resolve_element_context, kPromiseAllResolveElementValuesArraySlot);
+ Node* const resolve_call = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ native_context, resolve, UndefinedConstant(), values_array);
+ GotoIfException(resolve_call, if_exception, var_exception);
+ Goto(&return_promise);
+ }
// Return resultCapability.[[Promise]].
BIND(&return_promise);
@@ -1829,31 +1982,6 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
return promise;
}
-Node* PromiseBuiltinsAssembler::IncrementSmiCell(Node* cell,
- Label* if_overflow) {
- CSA_SLOW_ASSERT(this, HasInstanceType(cell, CELL_TYPE));
- Node* value = LoadCellValue(cell);
- CSA_SLOW_ASSERT(this, TaggedIsSmi(value));
-
- if (if_overflow != nullptr) {
- GotoIf(SmiEqual(value, SmiConstant(Smi::kMaxValue)), if_overflow);
- }
-
- Node* result = SmiAdd(value, SmiConstant(1));
- StoreCellValue(cell, result, SKIP_WRITE_BARRIER);
- return result;
-}
-
-Node* PromiseBuiltinsAssembler::DecrementSmiCell(Node* cell) {
- CSA_SLOW_ASSERT(this, HasInstanceType(cell, CELL_TYPE));
- Node* value = LoadCellValue(cell);
- CSA_SLOW_ASSERT(this, TaggedIsSmi(value));
-
- Node* result = SmiSub(value, SmiConstant(1));
- StoreCellValue(cell, result, SKIP_WRITE_BARRIER);
- return result;
-}
-
// ES#sec-promise.all
// Promise.all ( iterable )
TF_BUILTIN(PromiseAll, PromiseBuiltinsAssembler) {
@@ -1910,60 +2038,96 @@ TF_BUILTIN(PromiseAll, PromiseBuiltinsAssembler) {
TF_BUILTIN(PromiseAllResolveElementClosure, PromiseBuiltinsAssembler) {
Node* const value = Parameter(Descriptor::kValue);
Node* const context = Parameter(Descriptor::kContext);
+ Node* const function = LoadFromFrame(StandardFrameConstants::kFunctionOffset);
+ Label already_called(this, Label::kDeferred), resolve_promise(this);
+
+ // We use the {function}s context as the marker to remember whether this
+ // resolve element closure was already called. It points to the resolve
+ // element context (which is a FunctionContext) until it was called the
+ // first time, in which case we make it point to the native context here
+ // to mark this resolve element closure as done.
+ GotoIf(IsNativeContext(context), &already_called);
CSA_ASSERT(this, SmiEqual(LoadFixedArrayBaseLength(context),
SmiConstant(kPromiseAllResolveElementLength)));
+ Node* const native_context = LoadNativeContext(context);
+ StoreObjectField(function, JSFunction::kContextOffset, native_context);
- Node* const index =
- LoadContextElement(context, kPromiseAllResolveElementIndexSlot);
+ // Determine the index from the {function}.
+ Label unreachable(this, Label::kDeferred);
+ STATIC_ASSERT(PropertyArray::kNoHashSentinel == 0);
+ Node* const identity_hash =
+ LoadJSReceiverIdentityHash(function, &unreachable);
+ CSA_ASSERT(this, IntPtrGreaterThan(identity_hash, IntPtrConstant(0)));
+ Node* const index = IntPtrSub(identity_hash, IntPtrConstant(1));
+
+ // Check if we need to grow the [[ValuesArray]] to store {value} at {index}.
Node* const values_array =
LoadContextElement(context, kPromiseAllResolveElementValuesArraySlot);
+ Node* const elements = LoadElements(values_array);
+ Node* const values_length =
+ LoadAndUntagObjectField(values_array, JSArray::kLengthOffset);
+ Label if_inbounds(this), if_outofbounds(this), done(this);
+ Branch(IntPtrLessThan(index, values_length), &if_inbounds, &if_outofbounds);
- Label already_called(this, Label::kDeferred), resolve_promise(this);
- GotoIf(SmiLessThan(index, SmiConstant(Smi::kZero)), &already_called);
- StoreContextElementNoWriteBarrier(context, kPromiseAllResolveElementIndexSlot,
- SmiConstant(-1));
+ BIND(&if_outofbounds);
+ {
+ // Check if we need to grow the backing store.
+ Node* const new_length = IntPtrAdd(index, IntPtrConstant(1));
+ Node* const elements_length =
+ LoadAndUntagObjectField(elements, FixedArray::kLengthOffset);
+ Label if_grow(this, Label::kDeferred), if_nogrow(this);
+ Branch(IntPtrLessThan(index, elements_length), &if_nogrow, &if_grow);
+
+ BIND(&if_grow);
+ {
+ // We need to grow the backing store to fit the {index} as well.
+ Node* const new_elements_length =
+ IntPtrMin(CalculateNewElementsCapacity(new_length),
+ IntPtrConstant(PropertyArray::HashField::kMax + 1));
+ CSA_ASSERT(this, IntPtrLessThan(index, new_elements_length));
+ CSA_ASSERT(this, IntPtrLessThan(elements_length, new_elements_length));
+ Node* const new_elements = AllocateFixedArray(
+ PACKED_ELEMENTS, new_elements_length, INTPTR_PARAMETERS,
+ AllocationFlag::kAllowLargeObjectAllocation);
+ CopyFixedArrayElements(PACKED_ELEMENTS, elements, PACKED_ELEMENTS,
+ new_elements, elements_length,
+ new_elements_length);
+ StoreFixedArrayElement(new_elements, index, value);
+
+ // Update backing store and "length" on {values_array}.
+ StoreObjectField(values_array, JSArray::kElementsOffset, new_elements);
+ StoreObjectFieldNoWriteBarrier(values_array, JSArray::kLengthOffset,
+ SmiTag(new_length));
+ Goto(&done);
+ }
+
+ BIND(&if_nogrow);
+ {
+ // The {index} is within bounds of the {elements} backing store, so
+ // just store the {value} and update the "length" of the {values_array}.
+ StoreObjectFieldNoWriteBarrier(values_array, JSArray::kLengthOffset,
+ SmiTag(new_length));
+ StoreFixedArrayElement(elements, index, value);
+ Goto(&done);
+ }
+ }
- // Set element in FixedArray
- Label runtime_set_element(this), did_set_element(this);
- GotoIfNot(TaggedIsPositiveSmi(index), &runtime_set_element);
+ BIND(&if_inbounds);
{
- VARIABLE(var_elements, MachineRepresentation::kTagged,
- LoadElements(values_array));
- PossiblyGrowElementsCapacity(SMI_PARAMETERS, PACKED_ELEMENTS, values_array,
- index, &var_elements, SmiConstant(1),
- &runtime_set_element);
- StoreFixedArrayElement(var_elements.value(), index, value,
- UPDATE_WRITE_BARRIER, 0, SMI_PARAMETERS);
-
- // Update array length
- Label did_set_length(this);
- Node* const length = LoadJSArrayLength(values_array);
- GotoIfNot(TaggedIsPositiveSmi(length), &did_set_length);
- Node* const new_length = SmiAdd(index, SmiConstant(1));
- GotoIfNot(SmiLessThan(length, new_length), &did_set_length);
- StoreObjectFieldNoWriteBarrier(values_array, JSArray::kLengthOffset,
- new_length);
- // Assert that valuesArray.[[Length]] is less than or equal to the
- // elements backing-store length.e
- CSA_SLOW_ASSERT(
- this, SmiAboveOrEqual(LoadFixedArrayBaseLength(var_elements.value()),
- new_length));
- Goto(&did_set_length);
- BIND(&did_set_length);
+ // The {index} is in bounds of the {values_array},
+ // just store the {value} and continue.
+ StoreFixedArrayElement(elements, index, value);
+ Goto(&done);
}
- Goto(&did_set_element);
- BIND(&runtime_set_element);
- // New-space filled up or index too large, set element via runtime
- CallRuntime(Runtime::kCreateDataProperty, context, values_array, index,
- value);
- Goto(&did_set_element);
- BIND(&did_set_element);
-
- Node* const remaining_elements = LoadContextElement(
- context, kPromiseAllResolveElementRemainingElementsSlot);
- Node* const result = DecrementSmiCell(remaining_elements);
- GotoIf(SmiEqual(result, SmiConstant(0)), &resolve_promise);
+
+ BIND(&done);
+ Node* remaining_elements_count =
+ LoadContextElement(context, kPromiseAllResolveElementRemainingSlot);
+ remaining_elements_count = SmiSub(remaining_elements_count, SmiConstant(1));
+ StoreContextElement(context, kPromiseAllResolveElementRemainingSlot,
+ remaining_elements_count);
+ GotoIf(SmiEqual(remaining_elements_count, SmiConstant(0)), &resolve_promise);
Return(UndefinedConstant());
BIND(&resolve_promise);
@@ -1977,6 +2141,9 @@ TF_BUILTIN(PromiseAllResolveElementClosure, PromiseBuiltinsAssembler) {
BIND(&already_called);
Return(UndefinedConstant());
+
+ BIND(&unreachable);
+ Unreachable();
}
// ES#sec-promise.race
@@ -2043,15 +2210,9 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) {
&reject_promise, &var_exception);
// Let nextPromise be ? Invoke(constructor, "resolve", Ā« nextValue Ā»).
- Node* const promise_resolve =
- GetProperty(context, receiver, factory()->resolve_string());
- GotoIfException(promise_resolve, &close_iterator, &var_exception);
-
Node* const next_promise =
- CallJS(CodeFactory::Call(isolate(),
- ConvertReceiverMode::kNotNullOrUndefined),
- context, promise_resolve, receiver, next_value);
- GotoIfException(next_promise, &close_iterator, &var_exception);
+ InvokeResolve(native_context, receiver, next_value, &close_iterator,
+ &var_exception);
// Perform ? Invoke(nextPromise, "then", Ā« resolveElement,
// resultCapability.[[Reject]] Ā»).
diff --git a/deps/v8/src/builtins/builtins-promise-gen.h b/deps/v8/src/builtins/builtins-promise-gen.h
index 2130101e84..f21d86a141 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.h
+++ b/deps/v8/src/builtins/builtins-promise-gen.h
@@ -30,11 +30,8 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
protected:
enum PromiseAllResolveElementContextSlots {
- // Index into the values array, or -1 if the callback was already called
- kPromiseAllResolveElementIndexSlot = Context::MIN_CONTEXT_SLOTS,
-
- // Remaining elements count (mutable HeapNumber)
- kPromiseAllResolveElementRemainingElementsSlot,
+ // Remaining elements count
+ kPromiseAllResolveElementRemainingSlot = Context::MIN_CONTEXT_SLOTS,
// Promise capability from Promise.all
kPromiseAllResolveElementCapabilitySlot,
@@ -105,6 +102,18 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
Node* PromiseHasHandler(Node* promise);
+ // Creates the context used by all Promise.all resolve element closures,
+ // together with the values array. Since all closures for a single Promise.all
+ // call use the same context, we need to store the indices for the individual
+ // closures somewhere else (we put them into the identity hash field of the
+ // closures), and we also need to have a separate marker for when the closure
+ // was called already (we slap the native context onto the closure in that
+ // case to mark it's done).
+ Node* CreatePromiseAllResolveElementContext(Node* promise_capability,
+ Node* native_context);
+ Node* CreatePromiseAllResolveElementFunction(Node* context, Node* index,
+ Node* native_context);
+
Node* CreatePromiseResolvingFunctionsContext(Node* promise, Node* debug_event,
Node* native_context);
@@ -126,6 +135,14 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
Node* TriggerPromiseReactions(Node* context, Node* promise, Node* result,
PromiseReaction::Type type);
+ // We can skip the "resolve" lookup on {constructor} if it's the (initial)
+ // Promise constructor and the Promise.resolve() protector is intact, as
+ // that guards the lookup path for the "resolve" property on the %Promise%
+ // intrinsic object.
+ void BranchIfPromiseResolveLookupChainIntact(Node* native_context,
+ Node* constructor,
+ Label* if_fast, Label* if_slow);
+
// We can shortcut the SpeciesConstructor on {promise_map} if it's
// [[Prototype]] is the (initial) Promise.prototype and the @@species
// protector is intact, as that guards the lookup path for the "constructor"
@@ -142,6 +159,8 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
Node* receiver_map, Label* if_fast,
Label* if_slow);
+ Node* InvokeResolve(Node* native_context, Node* constructor, Node* value,
+ Label* if_exception, Variable* var_exception);
template <typename... TArgs>
Node* InvokeThen(Node* native_context, Node* receiver, TArgs... args);
@@ -160,9 +179,6 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
const IteratorRecord& record, Label* if_exception,
Variable* var_exception);
- Node* IncrementSmiCell(Node* cell, Label* if_overflow = nullptr);
- Node* DecrementSmiCell(Node* cell);
-
void SetForwardingHandlerIfTrue(Node* context, Node* condition,
const NodeGenerator& object);
inline void SetForwardingHandlerIfTrue(Node* context, Node* condition,
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc
index fb35f48a15..a4c208ac4e 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.cc
+++ b/deps/v8/src/builtins/builtins-proxy-gen.cc
@@ -13,12 +13,6 @@
namespace v8 {
namespace internal {
-// ES6 section 26.2.1.1 Proxy ( target, handler ) for the [[Call]] case.
-TF_BUILTIN(ProxyConstructor, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- ThrowTypeError(context, MessageTemplate::kConstructorNotFunction, "Proxy");
-}
-
void ProxiesCodeStubAssembler::GotoIfRevokedProxy(Node* object,
Label* if_proxy_revoked) {
Label proxy_not_revoked(this);
@@ -127,40 +121,6 @@ Node* ProxiesCodeStubAssembler::AllocateJSArrayForCodeStubArguments(
return array;
}
-// ES6 section 26.2.1.1 Proxy ( target, handler ) for the [[Construct]] case.
-TF_BUILTIN(ProxyConstructor_ConstructStub, ProxiesCodeStubAssembler) {
- int const kTargetArg = 0;
- int const kHandlerArg = 1;
-
- Node* argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
- CodeStubArguments args(this, argc);
-
- Node* target = args.GetOptionalArgumentValue(kTargetArg);
- Node* handler = args.GetOptionalArgumentValue(kHandlerArg);
- Node* context = Parameter(BuiltinDescriptor::kContext);
-
- Label throw_proxy_non_object(this, Label::kDeferred),
- throw_proxy_handler_or_target_revoked(this, Label::kDeferred),
- return_create_proxy(this);
-
- GotoIf(TaggedIsSmi(target), &throw_proxy_non_object);
- GotoIfNot(IsJSReceiver(target), &throw_proxy_non_object);
- GotoIfRevokedProxy(target, &throw_proxy_handler_or_target_revoked);
-
- GotoIf(TaggedIsSmi(handler), &throw_proxy_non_object);
- GotoIfNot(IsJSReceiver(handler), &throw_proxy_non_object);
- GotoIfRevokedProxy(handler, &throw_proxy_handler_or_target_revoked);
-
- args.PopAndReturn(AllocateProxy(target, handler, context));
-
- BIND(&throw_proxy_non_object);
- ThrowTypeError(context, MessageTemplate::kProxyNonObject);
-
- BIND(&throw_proxy_handler_or_target_revoked);
- ThrowTypeError(context, MessageTemplate::kProxyHandlerOrTargetRevoked);
-}
-
Node* ProxiesCodeStubAssembler::CreateProxyRevokeFunctionContext(
Node* proxy, Node* native_context) {
Node* const context = Allocate(FixedArray::SizeFor(kProxyContextLength));
@@ -185,6 +145,65 @@ Node* ProxiesCodeStubAssembler::AllocateProxyRevokeFunction(Node* proxy,
proxy_context);
}
+// ES #sec-proxy-constructor
+TF_BUILTIN(ProxyConstructor, ProxiesCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+
+ // 1. If NewTarget is undefined, throw a TypeError exception.
+ Node* new_target = Parameter(Descriptor::kNewTarget);
+ Label throwtypeerror(this, Label::kDeferred), createproxy(this);
+ Branch(IsUndefined(new_target), &throwtypeerror, &createproxy);
+
+ BIND(&throwtypeerror);
+ {
+ ThrowTypeError(context, MessageTemplate::kConstructorNotFunction, "Proxy");
+ }
+
+ // 2. Return ? ProxyCreate(target, handler).
+ BIND(&createproxy);
+ {
+ // https://tc39.github.io/ecma262/#sec-proxycreate
+ Node* target = Parameter(Descriptor::kTarget);
+ Node* handler = Parameter(Descriptor::kHandler);
+
+ // 1. If Type(target) is not Object, throw a TypeError exception.
+ // 2. If target is a Proxy exotic object and target.[[ProxyHandler]] is
+ // null, throw a TypeError exception.
+ // 3. If Type(handler) is not Object, throw a TypeError exception.
+ // 4. If handler is a Proxy exotic object and handler.[[ProxyHandler]]
+ // is null, throw a TypeError exception.
+ Label throw_proxy_non_object(this, Label::kDeferred),
+ throw_proxy_handler_or_target_revoked(this, Label::kDeferred),
+ return_create_proxy(this);
+
+ GotoIf(TaggedIsSmi(target), &throw_proxy_non_object);
+ GotoIfNot(IsJSReceiver(target), &throw_proxy_non_object);
+ GotoIfRevokedProxy(target, &throw_proxy_handler_or_target_revoked);
+
+ GotoIf(TaggedIsSmi(handler), &throw_proxy_non_object);
+ GotoIfNot(IsJSReceiver(handler), &throw_proxy_non_object);
+ GotoIfRevokedProxy(handler, &throw_proxy_handler_or_target_revoked);
+
+ // 5. Let P be a newly created object.
+ // 6. Set P's essential internal methods (except for [[Call]] and
+ // [[Construct]]) to the definitions specified in 9.5.
+ // 7. If IsCallable(target) is true, then
+ // a. Set P.[[Call]] as specified in 9.5.12.
+ // b. If IsConstructor(target) is true, then
+ // 1. Set P.[[Construct]] as specified in 9.5.13.
+ // 8. Set P.[[ProxyTarget]] to target.
+ // 9. Set P.[[ProxyHandler]] to handler.
+ // 10. Return P.
+ Return(AllocateProxy(target, handler, context));
+
+ BIND(&throw_proxy_non_object);
+ ThrowTypeError(context, MessageTemplate::kProxyNonObject);
+
+ BIND(&throw_proxy_handler_or_target_revoked);
+ ThrowTypeError(context, MessageTemplate::kProxyHandlerOrTargetRevoked);
+ }
+}
+
TF_BUILTIN(ProxyRevocable, ProxiesCodeStubAssembler) {
Node* const target = Parameter(Descriptor::kTarget);
Node* const handler = Parameter(Descriptor::kHandler);
@@ -439,8 +458,7 @@ TF_BUILTIN(ProxyHasProperty, ProxiesCodeStubAssembler) {
BIND(&trap_undefined);
{
// 7.a. Return ? target.[[HasProperty]](P).
- TailCallStub(Builtins::CallableFor(isolate(), Builtins::kHasProperty),
- context, name, target);
+ TailCallBuiltin(Builtins::kHasProperty, context, name, target);
}
BIND(&return_false);
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index 45329eed70..2cc354cb94 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -11,7 +11,8 @@
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
#include "src/counters.h"
-#include "src/factory-inl.h"
+#include "src/heap/factory-inl.h"
+#include "src/objects/js-regexp-string-iterator.h"
#include "src/objects/js-regexp.h"
#include "src/objects/regexp-match-info.h"
#include "src/regexp/regexp-macro-assembler.h"
@@ -20,13 +21,15 @@ namespace v8 {
namespace internal {
using compiler::Node;
+template <class T>
+using TNode = compiler::TNode<T>;
// -----------------------------------------------------------------------------
// ES6 section 21.2 RegExp Objects
Node* RegExpBuiltinsAssembler::AllocateRegExpResult(Node* context, Node* length,
Node* index, Node* input) {
- CSA_ASSERT(this, IsFixedArray(context));
+ CSA_ASSERT(this, IsContext(context));
CSA_ASSERT(this, TaggedIsSmi(index));
CSA_ASSERT(this, TaggedIsSmi(length));
CSA_ASSERT(this, IsString(input));
@@ -88,6 +91,28 @@ Node* RegExpBuiltinsAssembler::AllocateRegExpResult(Node* context, Node* length,
return result;
}
+TNode<Object> RegExpBuiltinsAssembler::RegExpCreate(
+ TNode<Context> context, TNode<Context> native_context,
+ TNode<Object> maybe_string, TNode<String> flags) {
+ TNode<JSFunction> regexp_function =
+ CAST(LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX));
+ TNode<Map> initial_map = CAST(LoadObjectField(
+ regexp_function, JSFunction::kPrototypeOrInitialMapOffset));
+ return RegExpCreate(context, initial_map, maybe_string, flags);
+}
+
+TNode<Object> RegExpBuiltinsAssembler::RegExpCreate(TNode<Context> context,
+ TNode<Map> initial_map,
+ TNode<Object> maybe_string,
+ TNode<String> flags) {
+ TNode<String> pattern = Select<String>(
+ IsUndefined(maybe_string), [=] { return EmptyStringConstant(); },
+ [=] { return ToString_Inline(context, maybe_string); });
+ TNode<Object> regexp = CAST(AllocateJSObjectFromMap(initial_map));
+ return CallRuntime(Runtime::kRegExpInitializeAndCompile, context, regexp,
+ pattern, flags);
+}
+
Node* RegExpBuiltinsAssembler::FastLoadLastIndex(Node* regexp) {
// Load the in-object field.
static const int field_offset =
@@ -142,8 +167,8 @@ Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
Label named_captures(this), out(this);
- TNode<IntPtrT> num_indices = SmiUntag(LoadFixedArrayElement(
- match_info, RegExpMatchInfo::kNumberOfCapturesIndex));
+ TNode<IntPtrT> num_indices = SmiUntag(CAST(LoadFixedArrayElement(
+ match_info, RegExpMatchInfo::kNumberOfCapturesIndex)));
Node* const num_results = SmiTag(WordShr(num_indices, 1));
Node* const start =
LoadFixedArrayElement(match_info, RegExpMatchInfo::kFirstCaptureIndex);
@@ -1185,16 +1210,14 @@ Node* RegExpBuiltinsAssembler::RegExpInitialize(Node* const context,
CSA_ASSERT(this, IsJSRegExp(regexp));
// Normalize pattern.
- Node* const pattern = Select<Object>(
+ TNode<Object> const pattern = Select<Object>(
IsUndefined(maybe_pattern), [=] { return EmptyStringConstant(); },
- [=] { return ToString_Inline(context, maybe_pattern); },
- MachineRepresentation::kTagged);
+ [=] { return ToString_Inline(context, maybe_pattern); });
// Normalize flags.
- Node* const flags = Select<Object>(
+ TNode<Object> const flags = Select<Object>(
IsUndefined(maybe_flags), [=] { return EmptyStringConstant(); },
- [=] { return ToString_Inline(context, maybe_flags); },
- MachineRepresentation::kTagged);
+ [=] { return ToString_Inline(context, maybe_flags); });
// Initialize.
@@ -1843,30 +1866,9 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
Branch(IsNull(result), &if_didnotmatch, &load_match);
BIND(&load_match);
- {
- Label fast_result(this), slow_result(this);
- BranchIfFastRegExpResult(context, result, &fast_result, &slow_result);
-
- BIND(&fast_result);
- {
- Node* const result_fixed_array = LoadElements(result);
- Node* const match = LoadFixedArrayElement(result_fixed_array, 0);
-
- // The match is guaranteed to be a string on the fast path.
- CSA_ASSERT(this, IsString(match));
-
- var_match.Bind(match);
- Goto(&if_didmatch);
- }
-
- BIND(&slow_result);
- {
- // TODO(ishell): Use GetElement stub once it's available.
- Node* const match = GetProperty(context, result, smi_zero);
- var_match.Bind(ToString_Inline(context, match));
- Goto(&if_didmatch);
- }
- }
+ Node* const match = GetProperty(context, result, smi_zero);
+ var_match.Bind(ToString_Inline(context, match));
+ Goto(&if_didmatch);
}
BIND(&if_didnotmatch);
@@ -1951,6 +1953,163 @@ TF_BUILTIN(RegExpPrototypeMatch, RegExpBuiltinsAssembler) {
RegExpPrototypeMatchBody(context, receiver, string, false);
}
+TNode<Object> RegExpBuiltinsAssembler::MatchAllIterator(
+ TNode<Context> context, TNode<Context> native_context,
+ TNode<Object> maybe_regexp, TNode<Object> maybe_string,
+ char const* method_name) {
+ Label create_iterator(this), if_regexp(this), if_not_regexp(this),
+ throw_type_error(this, Label::kDeferred);
+
+ // 1. Let S be ? ToString(O).
+ TNode<String> string = ToString_Inline(context, maybe_string);
+ TVARIABLE(Object, var_matcher);
+ TVARIABLE(Int32T, var_global);
+ TVARIABLE(Int32T, var_unicode);
+
+ // 2. If ? IsRegExp(R) is true, then
+ Branch(IsRegExp(context, maybe_regexp), &if_regexp, &if_not_regexp);
+ BIND(&if_regexp);
+ {
+ // a. Let C be ? SpeciesConstructor(R, %RegExp%).
+ TNode<Object> regexp_fun =
+ LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
+ TNode<Object> species_constructor =
+ SpeciesConstructor(native_context, maybe_regexp, regexp_fun);
+
+ // b. Let flags be ? ToString(? Get(R, "flags")).
+ // TODO(pwong): Add fast path to avoid property lookup.
+ TNode<Object> flags = GetProperty(context, maybe_regexp,
+ isolate()->factory()->flags_string());
+ TNode<Object> flags_string = ToString_Inline(context, flags);
+
+ // c. Let matcher be ? Construct(C, Ā« R, flags Ā»).
+ var_matcher =
+ CAST(ConstructJS(CodeFactory::Construct(isolate()), context,
+ species_constructor, maybe_regexp, flags_string));
+
+ // d. Let global be ? ToBoolean(? Get(matcher, "global")).
+ // TODO(pwong): Add fast path for loading flags.
+ var_global = UncheckedCast<Int32T>(
+ SlowFlagGetter(context, var_matcher.value(), JSRegExp::kGlobal));
+
+ // e. Let fullUnicode be ? ToBoolean(? Get(matcher, "unicode").
+ // TODO(pwong): Add fast path for loading flags.
+ var_unicode = UncheckedCast<Int32T>(
+ SlowFlagGetter(context, var_matcher.value(), JSRegExp::kUnicode));
+
+ // f. Let lastIndex be ? ToLength(? Get(R, "lastIndex")).
+ // TODO(pwong): Add fast path for loading last index.
+ TNode<Number> last_index = UncheckedCast<Number>(
+ ToLength_Inline(context, SlowLoadLastIndex(context, maybe_regexp)));
+
+ // g. Perform ? Set(matcher, "lastIndex", lastIndex, true).
+ // TODO(pwong): Add fast path for storing last index.
+ SlowStoreLastIndex(context, var_matcher.value(), last_index);
+
+ Goto(&create_iterator);
+ }
+ // 3. Else,
+ BIND(&if_not_regexp);
+ {
+ // a. Let flags be "g".
+ // b. Let matcher be ? RegExpCreate(R, flags).
+ var_matcher = RegExpCreate(context, native_context, maybe_regexp,
+ StringConstant("g"));
+
+ // c. If ? IsRegExp(matcher) is not true, throw a TypeError exception.
+ GotoIfNot(IsRegExp(context, var_matcher.value()), &throw_type_error);
+
+ // d. Let global be true.
+ var_global = Int32Constant(1);
+
+ // e. Let fullUnicode be false.
+ var_unicode = Int32Constant(0);
+
+ // f. If ? Get(matcher, "lastIndex") is not 0, throw a TypeError exception.
+ TNode<Object> last_index =
+ CAST(LoadLastIndex(context, var_matcher.value(), false));
+ Branch(SmiEqual(SmiConstant(0), last_index), &create_iterator,
+ &throw_type_error);
+ }
+ BIND(&throw_type_error);
+ {
+ ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
+ StringConstant(method_name), maybe_regexp);
+ }
+ // 4. Return ! CreateRegExpStringIterator(matcher, S, global, fullUnicode).
+ // CreateRegExpStringIterator ( R, S, global, fullUnicode )
+ BIND(&create_iterator);
+ {
+ TNode<Map> map = CAST(LoadContextElement(
+ native_context,
+ Context::INITIAL_REGEXP_STRING_ITERATOR_PROTOTYPE_MAP_INDEX));
+
+ // 4. Let iterator be ObjectCreate(%RegExpStringIteratorPrototype%, Ā«
+ // [[IteratingRegExp]], [[IteratedString]], [[Global]], [[Unicode]],
+ // [[Done]] Ā»).
+ TNode<Object> iterator = CAST(Allocate(JSRegExpStringIterator::kSize));
+ StoreMapNoWriteBarrier(iterator, map);
+ StoreObjectFieldRoot(iterator,
+ JSRegExpStringIterator::kPropertiesOrHashOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldRoot(iterator, JSRegExpStringIterator::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+
+ // 5. Set iterator.[[IteratingRegExp]] to R.
+ StoreObjectFieldNoWriteBarrier(
+ iterator, JSRegExpStringIterator::kIteratingRegExpOffset,
+ var_matcher.value());
+
+ // 6. Set iterator.[[IteratedString]] to S.
+ StoreObjectFieldNoWriteBarrier(
+ iterator, JSRegExpStringIterator::kIteratedStringOffset, string);
+
+#ifdef DEBUG
+ // Verify global and unicode can be bitwise shifted without masking.
+ TNode<Int32T> zero = Int32Constant(0);
+ TNode<Int32T> one = Int32Constant(1);
+ CSA_ASSERT(this, Word32Or(Word32Equal(var_global.value(), zero),
+ Word32Equal(var_global.value(), one)));
+ CSA_ASSERT(this, Word32Or(Word32Equal(var_unicode.value(), zero),
+ Word32Equal(var_unicode.value(), one)));
+#endif // DEBUG
+
+ // 7. Set iterator.[[Global]] to global.
+ // 8. Set iterator.[[Unicode]] to fullUnicode.
+ // 9. Set iterator.[[Done]] to false.
+ TNode<Word32T> global_flag = Word32Shl(
+ var_global.value(), Int32Constant(JSRegExpStringIterator::kGlobalBit));
+ TNode<Word32T> unicode_flag =
+ Word32Shl(var_unicode.value(),
+ Int32Constant(JSRegExpStringIterator::kUnicodeBit));
+ TNode<Word32T> iterator_flags = Word32Or(global_flag, unicode_flag);
+ StoreObjectFieldNoWriteBarrier(iterator,
+ JSRegExpStringIterator::kFlagsOffset,
+ SmiFromInt32(Signed(iterator_flags)));
+
+ return iterator;
+ }
+}
+
+// https://tc39.github.io/proposal-string-matchall/
+// RegExp.prototype [ @@matchAll ] ( string )
+TF_BUILTIN(RegExpPrototypeMatchAll, RegExpBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Context> native_context = LoadNativeContext(context);
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<Object> string = CAST(Parameter(Descriptor::kString));
+
+ // 1. Let R be the this value.
+ // 2. If Type(R) is not Object, throw a TypeError exception.
+ ThrowIfNotJSReceiver(context, receiver,
+ MessageTemplate::kIncompatibleMethodReceiver,
+ "RegExp.prototype.@@matchAll");
+
+ // 3. Return ? MatchAllIterator(R, string).
+ Return(MatchAllIterator(context, native_context, receiver, string,
+ "RegExp.prototype.@@matchAll"));
+}
+
// Helper that skips a few initial checks. and assumes...
// 1) receiver is a "fast" RegExp
// 2) pattern is a string
@@ -2888,5 +3047,174 @@ TF_BUILTIN(RegExpInternalMatch, RegExpBuiltinsAssembler) {
}
}
+class RegExpStringIteratorAssembler : public RegExpBuiltinsAssembler {
+ public:
+ explicit RegExpStringIteratorAssembler(compiler::CodeAssemblerState* state)
+ : RegExpBuiltinsAssembler(state) {}
+
+ protected:
+ TNode<Smi> LoadFlags(TNode<HeapObject> iterator) {
+ return LoadObjectField<Smi>(iterator, JSRegExpStringIterator::kFlagsOffset);
+ }
+
+ TNode<BoolT> HasDoneFlag(TNode<Smi> flags) {
+ return UncheckedCast<BoolT>(
+ IsSetSmi(flags, 1 << JSRegExpStringIterator::kDoneBit));
+ }
+
+ TNode<BoolT> HasGlobalFlag(TNode<Smi> flags) {
+ return UncheckedCast<BoolT>(
+ IsSetSmi(flags, 1 << JSRegExpStringIterator::kGlobalBit));
+ }
+
+ TNode<BoolT> HasUnicodeFlag(TNode<Smi> flags) {
+ return UncheckedCast<BoolT>(
+ IsSetSmi(flags, 1 << JSRegExpStringIterator::kUnicodeBit));
+ }
+
+ void SetDoneFlag(TNode<HeapObject> iterator, TNode<Smi> flags) {
+ TNode<Smi> new_flags =
+ SmiOr(flags, SmiConstant(1 << JSRegExpStringIterator::kDoneBit));
+ StoreObjectFieldNoWriteBarrier(
+ iterator, JSRegExpStringIterator::kFlagsOffset, new_flags);
+ }
+};
+
+// https://tc39.github.io/proposal-string-matchall/
+// %RegExpStringIteratorPrototype%.next ( )
+TF_BUILTIN(RegExpStringIteratorPrototypeNext, RegExpStringIteratorAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> maybe_receiver = CAST(Parameter(Descriptor::kReceiver));
+
+ Label if_match(this), if_no_match(this, Label::kDeferred),
+ return_empty_done_result(this, Label::kDeferred),
+ throw_bad_receiver(this, Label::kDeferred);
+
+ // 1. Let O be the this value.
+ // 2. If Type(O) is not Object, throw a TypeError exception.
+ GotoIf(TaggedIsSmi(maybe_receiver), &throw_bad_receiver);
+ GotoIfNot(IsJSReceiver(maybe_receiver), &throw_bad_receiver);
+ TNode<HeapObject> receiver = CAST(maybe_receiver);
+
+ // 3. If O does not have all of the internal slots of a RegExp String Iterator
+ // Object Instance (see 5.3), throw a TypeError exception.
+ GotoIfNot(InstanceTypeEqual(LoadInstanceType(receiver),
+ JS_REGEXP_STRING_ITERATOR_TYPE),
+ &throw_bad_receiver);
+
+ // 4. If O.[[Done]] is true, then
+ // a. Return ! CreateIterResultObject(undefined, true).
+ TNode<Smi> flags = LoadFlags(receiver);
+ GotoIf(HasDoneFlag(flags), &return_empty_done_result);
+
+ // 5. Let R be O.[[IteratingRegExp]].
+ TNode<Object> iterating_regexp =
+ LoadObjectField(receiver, JSRegExpStringIterator::kIteratingRegExpOffset);
+
+ // 6. Let S be O.[[IteratedString]].
+ TNode<String> iterating_string = CAST(
+ LoadObjectField(receiver, JSRegExpStringIterator::kIteratedStringOffset));
+
+ // 7. Let global be O.[[Global]].
+ // See if_match.
+
+ // 8. Let fullUnicode be O.[[Unicode]].
+ // See if_global.
+
+ // 9. Let match be ? RegExpExec(R, S).
+ TVARIABLE(Object, var_match);
+ {
+ Label if_fast(this), if_slow(this), next(this);
+ BranchIfFastRegExp(context, iterating_regexp, &if_fast, &if_slow);
+ BIND(&if_fast);
+ {
+ var_match = CAST(RegExpPrototypeExecBody(context, iterating_regexp,
+ iterating_string, true));
+ Goto(&next);
+ }
+ BIND(&if_slow);
+ {
+ var_match = CAST(RegExpExec(context, iterating_regexp, iterating_string));
+ Goto(&next);
+ }
+ BIND(&next);
+ }
+
+ // 10. If match is null, then
+ Branch(IsNull(var_match.value()), &if_no_match, &if_match);
+ BIND(&if_no_match);
+ {
+ // a. Set O.[[Done]] to true.
+ SetDoneFlag(receiver, flags);
+
+ // b. Return ! CreateIterResultObject(undefined, true).
+ Goto(&return_empty_done_result);
+ }
+ // 11. Else,
+ BIND(&if_match);
+ {
+ Label if_global(this), if_not_global(this, Label::kDeferred);
+
+ // a. If global is true,
+ Branch(HasGlobalFlag(flags), &if_global, &if_not_global);
+ BIND(&if_global);
+ {
+ // i. Let matchStr be ? ToString(? Get(match, "0")).
+ // TODO(pwong): Add fast path for fast regexp results. See
+ // BranchIfFastRegExpResult().
+ TNode<Object> match_str = ToString_Inline(
+ context, GetProperty(context, var_match.value(),
+ isolate()->factory()->zero_string()));
+
+ // ii. If matchStr is the empty string,
+ {
+ Label next(this);
+ GotoIfNot(IsEmptyString(match_str), &next);
+
+ // 1. Let thisIndex be ? ToLength(? Get(R, "lastIndex")).
+ // TODO(pwong): Add fast path for loading last index.
+ TNode<Object> last_index =
+ CAST(SlowLoadLastIndex(context, iterating_regexp));
+ TNode<Number> this_index = ToLength_Inline(context, last_index);
+
+ // 2. Let nextIndex be ! AdvanceStringIndex(S, thisIndex, fullUnicode).
+ TNode<Object> next_index = CAST(AdvanceStringIndex(
+ iterating_string, this_index, HasUnicodeFlag(flags), false));
+
+ // 3. Perform ? Set(R, "lastIndex", nextIndex, true).
+ // TODO(pwong): Add fast path for storing last index.
+ SlowStoreLastIndex(context, iterating_regexp, next_index);
+
+ Goto(&next);
+ BIND(&next);
+ }
+
+ // iii. Return ! CreateIterResultObject(match, false).
+ Return(AllocateJSIteratorResult(context, var_match.value(),
+ FalseConstant()));
+ }
+ // b. Else,
+ BIND(&if_not_global);
+ {
+ // i. Set O.[[Done]] to true.
+ SetDoneFlag(receiver, flags);
+
+ // ii. Return ! CreateIterResultObject(match, false).
+ Return(AllocateJSIteratorResult(context, var_match.value(),
+ FalseConstant()));
+ }
+ }
+ BIND(&return_empty_done_result);
+ Return(
+ AllocateJSIteratorResult(context, UndefinedConstant(), TrueConstant()));
+
+ BIND(&throw_bad_receiver);
+ {
+ ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
+ StringConstant("%RegExpStringIterator%.prototype.next"),
+ receiver);
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.h b/deps/v8/src/builtins/builtins-regexp-gen.h
index b57b90acf9..2146da5c0e 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.h
+++ b/deps/v8/src/builtins/builtins-regexp-gen.h
@@ -19,6 +19,19 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
Node* const map, Label* const if_isunmodified,
Label* const if_ismodified);
+ // Create and initialize a RegExp object.
+ TNode<Object> RegExpCreate(TNode<Context> context,
+ TNode<Context> native_context,
+ TNode<Object> regexp_string, TNode<String> flags);
+
+ TNode<Object> RegExpCreate(TNode<Context> context, TNode<Map> initial_map,
+ TNode<Object> regexp_string, TNode<String> flags);
+
+ TNode<Object> MatchAllIterator(TNode<Context> context,
+ TNode<Context> native_context,
+ TNode<Object> regexp, TNode<Object> string,
+ char const* method_name);
+
protected:
// Allocate a RegExpResult with the given length (the number of captures,
// including the match itself), index (the index where the match starts),
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
index 8edb3574cd..a847a5d892 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
@@ -10,9 +10,9 @@
#include "src/code-factory.h"
#include "src/conversions-inl.h"
#include "src/counters.h"
-#include "src/factory.h"
#include "src/futex-emulation.h"
#include "src/globals.h"
+#include "src/heap/factory.h"
#include "src/objects-inl.h"
namespace v8 {
@@ -34,7 +34,7 @@ BUILTIN(AtomicsIsLockFree) {
}
// ES #sec-validatesharedintegertypedarray
-MUST_USE_RESULT MaybeHandle<JSTypedArray> ValidateSharedIntegerTypedArray(
+V8_WARN_UNUSED_RESULT MaybeHandle<JSTypedArray> ValidateSharedIntegerTypedArray(
Isolate* isolate, Handle<Object> object, bool only_int32 = false) {
if (object->IsJSTypedArray()) {
Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(object);
@@ -60,7 +60,7 @@ MUST_USE_RESULT MaybeHandle<JSTypedArray> ValidateSharedIntegerTypedArray(
// ES #sec-validateatomicaccess
// ValidateAtomicAccess( typedArray, requestIndex )
-MUST_USE_RESULT Maybe<size_t> ValidateAtomicAccess(
+V8_WARN_UNUSED_RESULT Maybe<size_t> ValidateAtomicAccess(
Isolate* isolate, Handle<JSTypedArray> typed_array,
Handle<Object> request_index) {
Handle<Object> access_index_obj;
diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc
index 5cc4621b84..bc9578514a 100644
--- a/deps/v8/src/builtins/builtins-string-gen.cc
+++ b/deps/v8/src/builtins/builtins-string-gen.cc
@@ -8,7 +8,7 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
-#include "src/factory-inl.h"
+#include "src/heap/factory-inl.h"
#include "src/objects.h"
namespace v8 {
@@ -525,7 +525,7 @@ TF_BUILTIN(StringCharAt, StringBuiltinsAssembler) {
TNode<Int32T> code = StringCharCodeAt(receiver, position);
// And return the single character string with only that {code}
- TNode<String> result = StringFromCharCode(code);
+ TNode<String> result = StringFromSingleCharCode(code);
Return(result);
}
@@ -586,7 +586,7 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
Node* code32 = TruncateTaggedToWord32(context, code);
TNode<Int32T> code16 =
Signed(Word32And(code32, Int32Constant(String::kMaxUtf16CodeUnit)));
- Node* result = StringFromCharCode(code16);
+ Node* result = StringFromSingleCharCode(code16);
arguments.PopAndReturn(result);
}
@@ -682,7 +682,7 @@ TF_BUILTIN(StringPrototypeCharAt, StringBuiltinsAssembler) {
[this](TNode<String> string, TNode<IntPtrT> length,
TNode<IntPtrT> index) {
TNode<Int32T> code = StringCharCodeAt(string, index);
- return StringFromCharCode(code);
+ return StringFromSingleCharCode(code);
});
}
@@ -1024,7 +1024,7 @@ void StringBuiltinsAssembler::RequireObjectCoercible(Node* const context,
void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol(
Node* const context, Node* const object, Node* const maybe_string,
Handle<Symbol> symbol, const NodeFunction0& regexp_call,
- const NodeFunction1& generic_call, CodeStubArguments* args) {
+ const NodeFunction1& generic_call) {
Label out(this);
// Smis definitely don't have an attached symbol.
@@ -1069,12 +1069,7 @@ void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol(
BIND(&stub_call);
// TODO(jgruber): Add a no-JS scope once it exists.
- Node* const result = regexp_call();
- if (args == nullptr) {
- Return(result);
- } else {
- args->PopAndReturn(result);
- }
+ regexp_call();
BIND(&slow_lookup);
}
@@ -1094,12 +1089,7 @@ void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol(
GotoIf(IsNull(maybe_func), &out);
// Attempt to call the function.
- Node* const result = generic_call(maybe_func);
- if (args == nullptr) {
- Return(result);
- } else {
- args->PopAndReturn(result);
- }
+ generic_call(maybe_func);
BIND(&out);
}
@@ -1294,12 +1284,12 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
MaybeCallFunctionAtSymbol(
context, search, receiver, isolate()->factory()->replace_symbol(),
[=]() {
- return CallBuiltin(Builtins::kRegExpReplace, context, search, receiver,
- replace);
+ Return(CallBuiltin(Builtins::kRegExpReplace, context, search, receiver,
+ replace));
},
[=](Node* fn) {
Callable call_callable = CodeFactory::Call(isolate());
- return CallJS(call_callable, context, fn, search, receiver, replace);
+ Return(CallJS(call_callable, context, fn, search, receiver, replace));
});
// Convert {receiver} and {search} to strings.
@@ -1439,8 +1429,9 @@ class StringMatchSearchAssembler : public StringBuiltinsAssembler {
protected:
enum Variant { kMatch, kSearch };
- void Generate(Variant variant, const char* method_name, Node* const receiver,
- Node* maybe_regexp, Node* const context) {
+ void Generate(Variant variant, const char* method_name,
+ TNode<Object> receiver, TNode<Object> maybe_regexp,
+ TNode<Context> context) {
Label call_regexp_match_search(this);
Builtins::Name builtin;
@@ -1457,33 +1448,24 @@ class StringMatchSearchAssembler : public StringBuiltinsAssembler {
MaybeCallFunctionAtSymbol(
context, maybe_regexp, receiver, symbol,
- [=] { return CallBuiltin(builtin, context, maybe_regexp, receiver); },
+ [=] { Return(CallBuiltin(builtin, context, maybe_regexp, receiver)); },
[=](Node* fn) {
Callable call_callable = CodeFactory::Call(isolate());
- return CallJS(call_callable, context, fn, maybe_regexp, receiver);
+ Return(CallJS(call_callable, context, fn, maybe_regexp, receiver));
});
// maybe_regexp is not a RegExp nor has [@@match / @@search] property.
{
RegExpBuiltinsAssembler regexp_asm(state());
- Node* const receiver_string = ToString_Inline(context, receiver);
- Node* const pattern = Select(
- IsUndefined(maybe_regexp), [=] { return EmptyStringConstant(); },
- [=] { return ToString_Inline(context, maybe_regexp); },
- MachineRepresentation::kTagged);
-
- // Create RegExp
- // TODO(pwong): This could be factored out as a helper (RegExpCreate) that
- // also does the "is fast" checks.
- Node* const native_context = LoadNativeContext(context);
- Node* const regexp_function =
- LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
- Node* const initial_map = LoadObjectField(
- regexp_function, JSFunction::kPrototypeOrInitialMapOffset);
- Node* const regexp = CallRuntime(
- Runtime::kRegExpInitializeAndCompile, context,
- AllocateJSObjectFromMap(initial_map), pattern, EmptyStringConstant());
+ TNode<String> receiver_string = ToString_Inline(context, receiver);
+ TNode<Context> native_context = LoadNativeContext(context);
+ TNode<HeapObject> regexp_function = CAST(
+ LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX));
+ TNode<Map> initial_map = CAST(LoadObjectField(
+ regexp_function, JSFunction::kPrototypeOrInitialMapOffset));
+ TNode<Object> regexp = regexp_asm.RegExpCreate(
+ context, initial_map, maybe_regexp, EmptyStringConstant());
Label fast_path(this), slow_path(this);
regexp_asm.BranchIfFastRegExp(context, regexp, initial_map, &fast_path,
@@ -1494,7 +1476,7 @@ class StringMatchSearchAssembler : public StringBuiltinsAssembler {
BIND(&slow_path);
{
- Node* const maybe_func = GetProperty(context, regexp, symbol);
+ TNode<Object> maybe_func = GetProperty(context, regexp, symbol);
Callable call_callable = CodeFactory::Call(isolate());
Return(CallJS(call_callable, context, maybe_func, regexp,
receiver_string));
@@ -1505,13 +1487,52 @@ class StringMatchSearchAssembler : public StringBuiltinsAssembler {
// ES6 #sec-string.prototype.match
TF_BUILTIN(StringPrototypeMatch, StringMatchSearchAssembler) {
- Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* const maybe_regexp = Parameter(Descriptor::kRegexp);
- Node* const context = Parameter(Descriptor::kContext);
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<Object> maybe_regexp = CAST(Parameter(Descriptor::kRegexp));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Generate(kMatch, "String.prototype.match", receiver, maybe_regexp, context);
}
+// ES #sec-string.prototype.matchAll
+TF_BUILTIN(StringPrototypeMatchAll, StringBuiltinsAssembler) {
+ char const* method_name = "String.prototype.matchAll";
+
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> maybe_regexp = CAST(Parameter(Descriptor::kRegexp));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<Context> native_context = LoadNativeContext(context);
+
+ // 1. Let O be ? RequireObjectCoercible(this value).
+ RequireObjectCoercible(context, receiver, method_name);
+
+ // 2. If regexp is neither undefined nor null, then
+ Label return_match_all_iterator(this);
+ GotoIf(IsNullOrUndefined(maybe_regexp), &return_match_all_iterator);
+ {
+ // a. Let matcher be ? GetMethod(regexp, @@matchAll).
+ // b. If matcher is not undefined, then
+ // i. Return ? Call(matcher, regexp, Ā« O Ā»).
+ auto if_regexp_call = [&] { Goto(&return_match_all_iterator); };
+ auto if_generic_call = [=](Node* fn) {
+ Callable call_callable = CodeFactory::Call(isolate());
+ Return(CallJS(call_callable, context, fn, maybe_regexp, receiver));
+ };
+ MaybeCallFunctionAtSymbol(context, maybe_regexp, receiver,
+ isolate()->factory()->match_all_symbol(),
+ if_regexp_call, if_generic_call);
+ Goto(&return_match_all_iterator);
+ }
+ BIND(&return_match_all_iterator);
+ {
+ // 3. Return ? MatchAllIterator(regexp, O).
+ RegExpBuiltinsAssembler regexp_asm(state());
+ TNode<Object> iterator = regexp_asm.MatchAllIterator(
+ context, native_context, maybe_regexp, receiver, method_name);
+ Return(iterator);
+ }
+}
+
class StringPadAssembler : public StringBuiltinsAssembler {
public:
explicit StringPadAssembler(compiler::CodeAssemblerState* state)
@@ -1640,9 +1661,9 @@ TF_BUILTIN(StringPrototypePadStart, StringPadAssembler) {
// ES6 #sec-string.prototype.search
TF_BUILTIN(StringPrototypeSearch, StringMatchSearchAssembler) {
- Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* const maybe_regexp = Parameter(Descriptor::kRegexp);
- Node* const context = Parameter(Descriptor::kContext);
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<Object> maybe_regexp = CAST(Parameter(Descriptor::kRegexp));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Generate(kSearch, "String.prototype.search", receiver, maybe_regexp, context);
}
@@ -1719,23 +1740,22 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
MaybeCallFunctionAtSymbol(
context, separator, receiver, isolate()->factory()->split_symbol(),
- [=]() {
- return CallBuiltin(Builtins::kRegExpSplit, context, separator, receiver,
- limit);
+ [&]() {
+ args.PopAndReturn(CallBuiltin(Builtins::kRegExpSplit, context,
+ separator, receiver, limit));
},
- [=](Node* fn) {
+ [&](Node* fn) {
Callable call_callable = CodeFactory::Call(isolate());
- return CallJS(call_callable, context, fn, separator, receiver, limit);
- },
- &args);
+ args.PopAndReturn(
+ CallJS(call_callable, context, fn, separator, receiver, limit));
+ });
// String and integer conversions.
Node* const subject_string = ToString_Inline(context, receiver);
- Node* const limit_number =
- Select(IsUndefined(limit), [=]() { return NumberConstant(kMaxUInt32); },
- [=]() { return ToUint32(context, limit); },
- MachineRepresentation::kTagged);
+ TNode<Number> const limit_number = Select<Number>(
+ IsUndefined(limit), [=] { return NumberConstant(kMaxUInt32); },
+ [=] { return ToUint32(context, limit); });
Node* const separator_string = ToString_Inline(context, separator);
// Shortcut for {limit} == 0.
@@ -1923,7 +1943,7 @@ TNode<Smi> StringBuiltinsAssembler::ToSmiBetweenZeroAnd(
{
TNode<Smi> const zero = SmiConstant(0);
var_result =
- SelectTaggedConstant(SmiLessThan(value_smi, zero), zero, limit);
+ SelectConstant<Smi>(SmiLessThan(value_smi, zero), zero, limit);
Goto(&out);
}
}
@@ -1936,8 +1956,8 @@ TNode<Smi> StringBuiltinsAssembler::ToSmiBetweenZeroAnd(
TNode<Float64T> const float_zero = Float64Constant(0.);
TNode<Smi> const smi_zero = SmiConstant(0);
TNode<Float64T> const value_float = LoadHeapNumberValue(value_int_hn);
- var_result = SelectTaggedConstant(Float64LessThan(value_float, float_zero),
- smi_zero, limit);
+ var_result = SelectConstant<Smi>(Float64LessThan(value_float, float_zero),
+ smi_zero, limit);
Goto(&out);
}
@@ -2305,7 +2325,7 @@ TF_BUILTIN(StringIteratorPrototypeNext, StringBuiltinsAssembler) {
{
UnicodeEncoding encoding = UnicodeEncoding::UTF16;
TNode<Int32T> ch = LoadSurrogatePairAt(string, length, position, encoding);
- TNode<String> value = StringFromCodePoint(ch, encoding);
+ TNode<String> value = StringFromSingleCodePoint(ch, encoding);
var_value.Bind(value);
TNode<IntPtrT> length = LoadStringLengthAsWord(value);
StoreObjectFieldNoWriteBarrier(iterator, JSStringIterator::kNextIndexOffset,
diff --git a/deps/v8/src/builtins/builtins-string-gen.h b/deps/v8/src/builtins/builtins-string-gen.h
index 1bd5429fdb..4147b3fc0c 100644
--- a/deps/v8/src/builtins/builtins-string-gen.h
+++ b/deps/v8/src/builtins/builtins-string-gen.h
@@ -95,14 +95,13 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
//
// Contains fast paths for Smi and RegExp objects.
// Important: {regexp_call} may not contain any code that can call into JS.
- typedef std::function<Node*()> NodeFunction0;
- typedef std::function<Node*(Node* fn)> NodeFunction1;
+ typedef std::function<void()> NodeFunction0;
+ typedef std::function<void(Node* fn)> NodeFunction1;
void MaybeCallFunctionAtSymbol(Node* const context, Node* const object,
Node* const maybe_string,
Handle<Symbol> symbol,
const NodeFunction0& regexp_call,
- const NodeFunction1& generic_call,
- CodeStubArguments* args = nullptr);
+ const NodeFunction1& generic_call);
};
class StringIncludesIndexOfAssembler : public StringBuiltinsAssembler {
diff --git a/deps/v8/src/builtins/builtins-string.cc b/deps/v8/src/builtins/builtins-string.cc
index d2e447538d..854bb5e58a 100644
--- a/deps/v8/src/builtins/builtins-string.cc
+++ b/deps/v8/src/builtins/builtins-string.cc
@@ -330,7 +330,7 @@ inline bool ToUpperOverflows(uc32 character) {
}
template <class Converter>
-MUST_USE_RESULT static Object* ConvertCaseHelper(
+V8_WARN_UNUSED_RESULT static Object* ConvertCaseHelper(
Isolate* isolate, String* string, SeqString* result, int result_length,
unibrow::Mapping<Converter, 128>* mapping) {
DisallowHeapAllocation no_gc;
@@ -427,7 +427,7 @@ MUST_USE_RESULT static Object* ConvertCaseHelper(
}
template <class Converter>
-MUST_USE_RESULT static Object* ConvertCase(
+V8_WARN_UNUSED_RESULT static Object* ConvertCase(
Handle<String> s, Isolate* isolate,
unibrow::Mapping<Converter, 128>* mapping) {
s = String::Flatten(s);
diff --git a/deps/v8/src/builtins/builtins-symbol.cc b/deps/v8/src/builtins/builtins-symbol.cc
index 77bb09d0f7..1343a293bd 100644
--- a/deps/v8/src/builtins/builtins-symbol.cc
+++ b/deps/v8/src/builtins/builtins-symbol.cc
@@ -11,27 +11,25 @@ namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
-// ES6 section 19.4 Symbol Objects
+// ES #sec-symbol-objects
-// ES6 section 19.4.1.1 Symbol ( [ description ] ) for the [[Call]] case.
+// ES #sec-symbol-constructor
BUILTIN(SymbolConstructor) {
HandleScope scope(isolate);
- Handle<Symbol> result = isolate->factory()->NewSymbol();
- Handle<Object> description = args.atOrUndefined(isolate, 1);
- if (!description->IsUndefined(isolate)) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, description,
- Object::ToString(isolate, description));
- result->set_name(*description);
+ if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
+ Handle<Symbol> result = isolate->factory()->NewSymbol();
+ Handle<Object> description = args.atOrUndefined(isolate, 1);
+ if (!description->IsUndefined(isolate)) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, description, Object::ToString(isolate, description));
+ result->set_name(*description);
+ }
+ return *result;
+ } else { // [[Construct]]
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kNotConstructor,
+ isolate->factory()->Symbol_string()));
}
- return *result;
-}
-
-// ES6 section 19.4.1.1 Symbol ( [ description ] ) for the [[Construct]] case.
-BUILTIN(SymbolConstructor_ConstructStub) {
- HandleScope scope(isolate);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kNotConstructor,
- isolate->factory()->Symbol_string()));
}
// ES6 section 19.4.2.1 Symbol.for.
diff --git a/deps/v8/src/builtins/builtins-typedarray-gen.cc b/deps/v8/src/builtins/builtins-typedarray-gen.cc
index 2a4f23b003..e5ac615cca 100644
--- a/deps/v8/src/builtins/builtins-typedarray-gen.cc
+++ b/deps/v8/src/builtins/builtins-typedarray-gen.cc
@@ -36,9 +36,7 @@ TNode<Map> TypedArrayBuiltinsAssembler::LoadMapForType(
DispatchTypedArrayByElementsKind(
elements_kind,
[&](ElementsKind kind, int size, int typed_array_fun_index) {
- ExternalArrayType type =
- isolate()->factory()->GetArrayTypeFromElementsKind(kind);
- Handle<Map> map(isolate()->heap()->MapForFixedTypedArray(type));
+ Handle<Map> map(isolate()->heap()->MapForFixedTypedArray(kind));
var_typed_map = HeapConstant(map);
});
@@ -630,40 +628,33 @@ void TypedArrayBuiltinsAssembler::ConstructByIterable(
element_size);
}
-TF_BUILTIN(TypedArrayConstructor, TypedArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
-
- // If NewTarget is undefined, throw a TypeError exception.
- Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
- MachineType::TaggedPointer());
- Node* shared = LoadObjectField(target, JSFunction::kSharedFunctionInfoOffset);
- Node* name = LoadObjectField(shared, SharedFunctionInfo::kNameOffset);
- ThrowTypeError(context, MessageTemplate::kConstructorNotFunction, name);
+TF_BUILTIN(TypedArrayBaseConstructor, TypedArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ ThrowTypeError(context, MessageTemplate::kConstructAbstractClass,
+ "TypedArray");
}
-TF_BUILTIN(TypedArrayConstructor_ConstructStub, TypedArrayBuiltinsAssembler) {
- Label if_arg1isbuffer(this), if_arg1istypedarray(this),
- if_arg1isreceiver(this), if_arg1isnumber(this), done(this);
+// ES #sec-typedarray-constructors
+TF_BUILTIN(CreateTypedArray, TypedArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<JSFunction> target = CAST(Parameter(Descriptor::kTarget));
+ TNode<JSReceiver> new_target = CAST(Parameter(Descriptor::kNewTarget));
+ TNode<Object> arg1 = CAST(Parameter(Descriptor::kArg1));
+ TNode<Object> arg2 = CAST(Parameter(Descriptor::kArg2));
+ TNode<Object> arg3 = CAST(Parameter(Descriptor::kArg3));
- TNode<Object> new_target = CAST(Parameter(BuiltinDescriptor::kNewTarget));
- CSA_ASSERT(this, IsNotUndefined(new_target));
+ CSA_ASSERT(this, IsConstructor(target));
+ CSA_ASSERT(this, IsJSReceiver(new_target));
- Node* argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
- CodeStubArguments args(this, argc);
- TNode<Object> arg1 = args.GetOptionalArgumentValue(0);
- TNode<Object> arg2 = args.GetOptionalArgumentValue(1);
- TNode<Object> arg3 = args.GetOptionalArgumentValue(2);
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ Label if_arg1isbuffer(this), if_arg1istypedarray(this),
+ if_arg1isreceiver(this), if_arg1isnumber(this), return_result(this);
- Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
- MachineType::TaggedPointer());
ConstructorBuiltinsAssembler constructor_assembler(this->state());
- TNode<JSTypedArray> holder = CAST(
+ TNode<JSTypedArray> result = CAST(
constructor_assembler.EmitFastNewObject(context, target, new_target));
TNode<Smi> element_size =
- SmiTag(GetTypedArrayElementSize(LoadElementsKind(holder)));
+ SmiTag(GetTypedArrayElementSize(LoadElementsKind(result)));
GotoIf(TaggedIsSmi(arg1), &if_arg1isnumber);
GotoIf(IsJSArrayBuffer(arg1), &if_arg1isbuffer);
@@ -671,15 +662,23 @@ TF_BUILTIN(TypedArrayConstructor_ConstructStub, TypedArrayBuiltinsAssembler) {
GotoIf(IsJSReceiver(arg1), &if_arg1isreceiver);
Goto(&if_arg1isnumber);
+ // https://tc39.github.io/ecma262/#sec-typedarray-buffer-byteoffset-length
BIND(&if_arg1isbuffer);
- ConstructByArrayBuffer(context, holder, CAST(arg1), arg2, arg3, element_size);
- Goto(&done);
+ {
+ ConstructByArrayBuffer(context, result, CAST(arg1), arg2, arg3,
+ element_size);
+ Goto(&return_result);
+ }
+ // https://tc39.github.io/ecma262/#sec-typedarray-typedarray
BIND(&if_arg1istypedarray);
- TNode<JSTypedArray> typed_array = CAST(arg1);
- ConstructByTypedArray(context, holder, typed_array, element_size);
- Goto(&done);
+ {
+ TNode<JSTypedArray> typed_array = CAST(arg1);
+ ConstructByTypedArray(context, result, typed_array, element_size);
+ Goto(&return_result);
+ }
+ // https://tc39.github.io/ecma262/#sec-typedarray-object
BIND(&if_arg1isreceiver);
{
Label if_iteratorundefined(this), if_iteratornotcallable(this);
@@ -690,8 +689,8 @@ TF_BUILTIN(TypedArrayConstructor_ConstructStub, TypedArrayBuiltinsAssembler) {
GotoIf(TaggedIsSmi(iteratorFn), &if_iteratornotcallable);
GotoIfNot(IsCallable(iteratorFn), &if_iteratornotcallable);
- ConstructByIterable(context, holder, CAST(arg1), iteratorFn, element_size);
- Goto(&done);
+ ConstructByIterable(context, result, CAST(arg1), iteratorFn, element_size);
+ Goto(&return_result);
BIND(&if_iteratorundefined);
{
@@ -699,22 +698,61 @@ TF_BUILTIN(TypedArrayConstructor_ConstructStub, TypedArrayBuiltinsAssembler) {
TNode<Object> initial_length =
GetProperty(context, arg1, LengthStringConstant());
- ConstructByArrayLike(context, holder, array_like, initial_length,
+ ConstructByArrayLike(context, result, array_like, initial_length,
element_size);
- Goto(&done);
+ Goto(&return_result);
}
BIND(&if_iteratornotcallable);
{ ThrowTypeError(context, MessageTemplate::kIteratorSymbolNonCallable); }
}
- // First arg was a number or fell through and will be treated as a number.
+ // The first argument was a number or fell through and is treated as
+ // a number. https://tc39.github.io/ecma262/#sec-typedarray-length
BIND(&if_arg1isnumber);
- ConstructByLength(context, holder, arg1, element_size);
- Goto(&done);
+ {
+ ConstructByLength(context, result, arg1, element_size);
+ Goto(&return_result);
+ }
- BIND(&done);
- args.PopAndReturn(holder);
+ BIND(&return_result);
+ Return(result);
+}
+
+TF_BUILTIN(TypedArrayConstructorLazyDeoptContinuation,
+ TypedArrayBuiltinsAssembler) {
+ Node* result = Parameter(Descriptor::kResult);
+ Return(result);
+}
+
+// ES #sec-typedarray-constructors
+TF_BUILTIN(TypedArrayConstructor, TypedArrayBuiltinsAssembler) {
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
+ MachineType::TaggedPointer());
+ Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+ Node* arg1 = args.GetOptionalArgumentValue(0);
+ Node* arg2 = args.GetOptionalArgumentValue(1);
+ Node* arg3 = args.GetOptionalArgumentValue(2);
+
+ // If NewTarget is undefined, throw a TypeError exception.
+ // All the TypedArray constructors have this as the first step:
+ // https://tc39.github.io/ecma262/#sec-typedarray-constructors
+ Label throwtypeerror(this, Label::kDeferred);
+ GotoIf(IsUndefined(new_target), &throwtypeerror);
+
+ Node* result = CallBuiltin(Builtins::kCreateTypedArray, context, target,
+ new_target, arg1, arg2, arg3);
+ args.PopAndReturn(result);
+
+ BIND(&throwtypeerror);
+ {
+ Node* name = CallRuntime(Runtime::kGetFunctionName, context, target);
+ ThrowTypeError(context, MessageTemplate::kConstructorNotFunction, name);
+ }
}
void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeGetter(
@@ -829,11 +867,10 @@ TNode<Object> TypedArrayBuiltinsAssembler::TypedArraySpeciesConstructor(
var_constructor = default_constructor;
Node* map = LoadMap(exemplar);
GotoIfNot(IsPrototypeTypedArrayPrototype(context, map), &slow);
- Branch(IsSpeciesProtectorCellInvalid(), &slow, &done);
+ Branch(IsTypedArraySpeciesProtectorCellInvalid(), &slow, &done);
BIND(&slow);
- var_constructor =
- CAST(SpeciesConstructor(context, exemplar, default_constructor));
+ var_constructor = SpeciesConstructor(context, exemplar, default_constructor);
Goto(&done);
BIND(&done);
@@ -1260,8 +1297,7 @@ TF_BUILTIN(TypedArrayPrototypeSlice, TypedArrayBuiltinsAssembler) {
[=] {
return SmiTag(ConvertToRelativeIndex(
context, end, SmiUntag(source_length)));
- },
- MachineRepresentation::kTagged);
+ });
// Create a result array by invoking TypedArraySpeciesCreate.
TNode<Smi> count = SmiMax(SmiSub(end_index, start_index), SmiConstant(0));
@@ -1460,15 +1496,11 @@ TF_BUILTIN(TypedArrayPrototypeToStringTag, TypedArrayBuiltinsAssembler) {
void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeIterationMethod(
Node* context, Node* receiver, const char* method_name,
- IterationKind iteration_kind) {
+ IterationKind kind) {
Label throw_bad_receiver(this, Label::kDeferred);
GotoIf(TaggedIsSmi(receiver), &throw_bad_receiver);
-
- Node* map = LoadMap(receiver);
- Node* instance_type = LoadMapInstanceType(map);
- GotoIfNot(InstanceTypeEqual(instance_type, JS_TYPED_ARRAY_TYPE),
- &throw_bad_receiver);
+ GotoIfNot(IsJSTypedArray(receiver), &throw_bad_receiver);
// Check if the {receiver}'s JSArrayBuffer was neutered.
Node* receiver_buffer =
@@ -1476,8 +1508,7 @@ void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeIterationMethod(
Label if_receiverisneutered(this, Label::kDeferred);
GotoIf(IsDetachedBuffer(receiver_buffer), &if_receiverisneutered);
- Return(CreateArrayIterator(receiver, map, instance_type, context,
- iteration_kind));
+ Return(CreateArrayIterator(context, receiver, kind));
BIND(&throw_bad_receiver);
ThrowTypeError(context, MessageTemplate::kNotTypedArray, method_name);
@@ -1486,7 +1517,7 @@ void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeIterationMethod(
ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);
}
-// ES6 #sec-%typedarray%.prototype.values
+// ES #sec-%typedarray%.prototype.values
TF_BUILTIN(TypedArrayPrototypeValues, TypedArrayBuiltinsAssembler) {
Node* context = Parameter(Descriptor::kContext);
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -1495,7 +1526,7 @@ TF_BUILTIN(TypedArrayPrototypeValues, TypedArrayBuiltinsAssembler) {
IterationKind::kValues);
}
-// ES6 #sec-%typedarray%.prototype.entries
+// ES #sec-%typedarray%.prototype.entries
TF_BUILTIN(TypedArrayPrototypeEntries, TypedArrayBuiltinsAssembler) {
Node* context = Parameter(Descriptor::kContext);
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -1504,7 +1535,7 @@ TF_BUILTIN(TypedArrayPrototypeEntries, TypedArrayBuiltinsAssembler) {
IterationKind::kEntries);
}
-// ES6 #sec-%typedarray%.prototype.keys
+// ES #sec-%typedarray%.prototype.keys
TF_BUILTIN(TypedArrayPrototypeKeys, TypedArrayBuiltinsAssembler) {
Node* context = Parameter(Descriptor::kContext);
Node* receiver = Parameter(Descriptor::kReceiver);
diff --git a/deps/v8/src/builtins/builtins-utils.h b/deps/v8/src/builtins/builtins-utils.h
index 3493e776b6..35a77c7518 100644
--- a/deps/v8/src/builtins/builtins-utils.h
+++ b/deps/v8/src/builtins/builtins-utils.h
@@ -8,7 +8,7 @@
#include "src/arguments.h"
#include "src/base/logging.h"
#include "src/builtins/builtins.h"
-#include "src/factory.h"
+#include "src/heap/factory.h"
#include "src/isolate.h"
namespace v8 {
@@ -79,8 +79,8 @@ class BuiltinArguments : public Arguments {
// TODO(cbruni): add global flag to check whether any tracing events have been
// enabled.
#define BUILTIN(name) \
- MUST_USE_RESULT static Object* Builtin_Impl_##name(BuiltinArguments args, \
- Isolate* isolate); \
+ V8_WARN_UNUSED_RESULT static Object* Builtin_Impl_##name( \
+ BuiltinArguments args, Isolate* isolate); \
\
V8_NOINLINE static Object* Builtin_Impl_Stats_##name( \
int args_length, Object** args_object, Isolate* isolate) { \
@@ -92,7 +92,7 @@ class BuiltinArguments : public Arguments {
return Builtin_Impl_##name(args, isolate); \
} \
\
- MUST_USE_RESULT Object* Builtin_##name( \
+ V8_WARN_UNUSED_RESULT Object* Builtin_##name( \
int args_length, Object** args_object, Isolate* isolate) { \
DCHECK(isolate->context() == nullptr || isolate->context()->IsContext()); \
if (V8_UNLIKELY(FLAG_runtime_stats)) { \
@@ -102,8 +102,8 @@ class BuiltinArguments : public Arguments {
return Builtin_Impl_##name(args, isolate); \
} \
\
- MUST_USE_RESULT static Object* Builtin_Impl_##name(BuiltinArguments args, \
- Isolate* isolate)
+ V8_WARN_UNUSED_RESULT static Object* Builtin_Impl_##name( \
+ BuiltinArguments args, Isolate* isolate)
// ----------------------------------------------------------------------------
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
index ad1763a292..c348248fff 100644
--- a/deps/v8/src/builtins/builtins.cc
+++ b/deps/v8/src/builtins/builtins.cc
@@ -195,16 +195,15 @@ Address Builtins::CppEntryOf(int index) {
}
// static
-bool Builtins::IsBuiltin(Code* code) {
+bool Builtins::IsBuiltin(const Code* code) {
return Builtins::IsBuiltinId(code->builtin_index());
}
// static
-bool Builtins::IsOffHeapBuiltin(Code* code) {
+bool Builtins::IsEmbeddedBuiltin(const Code* code) {
#ifdef V8_EMBEDDED_BUILTINS
- return FLAG_stress_off_heap_code &&
- Builtins::IsBuiltinId(code->builtin_index()) &&
- Builtins::IsOffHeapSafe(code->builtin_index());
+ return Builtins::IsBuiltinId(code->builtin_index()) &&
+ Builtins::IsIsolateIndependent(code->builtin_index());
#else
return false;
#endif
@@ -213,6 +212,12 @@ bool Builtins::IsOffHeapBuiltin(Code* code) {
// static
bool Builtins::IsLazy(int index) {
DCHECK(IsBuiltinId(index));
+
+#ifdef V8_EMBEDDED_BUILTINS
+ // We don't want to lazy-deserialize off-heap builtins.
+ if (Builtins::IsIsolateIndependent(index)) return false;
+#endif
+
// There are a couple of reasons that builtins can require eager-loading,
// i.e. deserialization at isolate creation instead of on-demand. For
// instance:
@@ -225,50 +230,41 @@ bool Builtins::IsLazy(int index) {
// TODO(wasm): Remove wasm builtins once immovability is no longer required.
switch (index) {
case kAbort: // Required by wasm.
- case kArrayFindLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
- case kArrayFindLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
- // https://crbug.com/v8/6786.
- case kArrayFindLoopAfterCallbackLazyDeoptContinuation:
- // https://crbug.com/v8/6786.
+ case kArrayEveryLoopEagerDeoptContinuation:
+ case kArrayEveryLoopLazyDeoptContinuation:
+ case kArrayFilterLoopEagerDeoptContinuation:
+ case kArrayFilterLoopLazyDeoptContinuation:
+ case kArrayFindIndexLoopAfterCallbackLazyDeoptContinuation:
case kArrayFindIndexLoopEagerDeoptContinuation:
- // https://crbug.com/v8/6786.
case kArrayFindIndexLoopLazyDeoptContinuation:
- // https://crbug.com/v8/6786.
- case kArrayFindIndexLoopAfterCallbackLazyDeoptContinuation:
- case kArrayForEachLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
- case kArrayForEachLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
- case kArrayMapLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
- case kArrayMapLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
- case kArrayEveryLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
- case kArrayEveryLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
- case kArrayFilterLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
- case kArrayFilterLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
+ case kArrayFindLoopAfterCallbackLazyDeoptContinuation:
+ case kArrayFindLoopEagerDeoptContinuation:
+ case kArrayFindLoopLazyDeoptContinuation:
+ case kArrayForEachLoopEagerDeoptContinuation:
+ case kArrayForEachLoopLazyDeoptContinuation:
+ case kArrayMapLoopEagerDeoptContinuation:
+ case kArrayMapLoopLazyDeoptContinuation:
+ case kArrayReduceLoopEagerDeoptContinuation:
+ case kArrayReduceLoopLazyDeoptContinuation:
case kArrayReducePreLoopEagerDeoptContinuation:
- case kArrayReduceLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
- case kArrayReduceLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
- case kArrayReduceRightPreLoopEagerDeoptContinuation:
case kArrayReduceRightLoopEagerDeoptContinuation:
case kArrayReduceRightLoopLazyDeoptContinuation:
- case kArraySomeLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
- case kArraySomeLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
+ case kArrayReduceRightPreLoopEagerDeoptContinuation:
+ case kArraySomeLoopEagerDeoptContinuation:
+ case kArraySomeLoopLazyDeoptContinuation:
case kAsyncGeneratorAwaitCaught: // https://crbug.com/v8/6786.
case kAsyncGeneratorAwaitUncaught: // https://crbug.com/v8/6786.
- case kCheckOptimizationMarker:
case kCompileLazy:
+ case kDebugBreakTrampoline:
case kDeserializeLazy:
case kFunctionPrototypeHasInstance: // https://crbug.com/v8/6786.
case kHandleApiCall:
case kIllegal:
+ case kInstantiateAsmJs:
case kInterpreterEnterBytecodeAdvance:
case kInterpreterEnterBytecodeDispatch:
case kInterpreterEntryTrampoline:
- case kObjectConstructor_ConstructStub: // https://crbug.com/v8/6787.
- case kPromiseConstructorLazyDeoptContinuation: // crbug/v8/6786.
- case kProxyConstructor_ConstructStub: // https://crbug.com/v8/6787.
- case kNumberConstructor_ConstructStub: // https://crbug.com/v8/6787.
- case kStringConstructor_ConstructStub: // https://crbug.com/v8/6787.
- case kTypedArrayConstructor_ConstructStub: // https://crbug.com/v8/6787.
- case kProxyConstructor: // https://crbug.com/v8/6787.
+ case kPromiseConstructorLazyDeoptContinuation:
case kRecordWrite: // https://crbug.com/chromium/765301.
case kThrowWasmTrapDivByZero: // Required by wasm.
case kThrowWasmTrapDivUnrepresentable: // Required by wasm.
@@ -278,7 +274,9 @@ bool Builtins::IsLazy(int index) {
case kThrowWasmTrapMemOutOfBounds: // Required by wasm.
case kThrowWasmTrapRemByZero: // Required by wasm.
case kThrowWasmTrapUnreachable: // Required by wasm.
+ case kToBooleanLazyDeoptContinuation:
case kToNumber: // Required by wasm.
+ case kTypedArrayConstructorLazyDeoptContinuation:
case kWasmCompileLazy: // Required by wasm.
case kWasmStackGuard: // Required by wasm.
return false;
@@ -295,711 +293,24 @@ bool Builtins::IsIsolateIndependent(int index) {
switch (index) {
#ifdef DEBUG
case kAbortJS:
- case kAllocateHeapNumber:
- case kArrayEveryLoopEagerDeoptContinuation:
- case kArrayFilterLoopEagerDeoptContinuation:
- case kArrayFindIndexLoopEagerDeoptContinuation:
- case kArrayFindIndexLoopLazyDeoptContinuation:
- case kArrayFindLoopEagerDeoptContinuation:
- case kArrayFindLoopLazyDeoptContinuation:
- case kArrayForEachLoopEagerDeoptContinuation:
- case kArrayForEachLoopLazyDeoptContinuation:
- case kArrayMapLoopEagerDeoptContinuation:
- case kArrayReduceLoopEagerDeoptContinuation:
- case kArrayReduceLoopLazyDeoptContinuation:
- case kArrayReducePreLoopEagerDeoptContinuation:
- case kArrayReduceRightLoopEagerDeoptContinuation:
- case kArrayReduceRightLoopLazyDeoptContinuation:
- case kArrayReduceRightPreLoopEagerDeoptContinuation:
- case kArraySomeLoopEagerDeoptContinuation:
- case kBitwiseNot:
- case kBooleanPrototypeToString:
- case kBooleanPrototypeValueOf:
case kContinueToCodeStubBuiltin:
case kContinueToCodeStubBuiltinWithResult:
case kContinueToJavaScriptBuiltin:
case kContinueToJavaScriptBuiltinWithResult:
- case kDatePrototypeGetDate:
- case kDatePrototypeGetDay:
- case kDatePrototypeGetFullYear:
- case kDatePrototypeGetHours:
- case kDatePrototypeGetMilliseconds:
- case kDatePrototypeGetMinutes:
- case kDatePrototypeGetMonth:
- case kDatePrototypeGetSeconds:
- case kDatePrototypeGetTime:
- case kDatePrototypeGetTimezoneOffset:
- case kDatePrototypeGetUTCDate:
- case kDatePrototypeGetUTCDay:
- case kDatePrototypeGetUTCFullYear:
- case kDatePrototypeGetUTCHours:
- case kDatePrototypeGetUTCMilliseconds:
- case kDatePrototypeGetUTCMinutes:
- case kDatePrototypeGetUTCMonth:
- case kDatePrototypeGetUTCSeconds:
- case kDatePrototypeToPrimitive:
- case kDatePrototypeValueOf:
- case kDecrement:
- case kDivide:
- case kGlobalIsFinite:
- case kGlobalIsNaN:
- case kIncrement:
case kKeyedLoadIC_Slow:
- case kKeyedLoadICTrampoline:
case kKeyedStoreIC_Slow:
- case kKeyedStoreICTrampoline:
- case kLoadField:
- case kLoadGlobalICInsideTypeofTrampoline:
case kLoadGlobalIC_Slow:
- case kLoadGlobalICTrampoline:
case kLoadIC_Slow:
- case kLoadICTrampoline:
- case kMapPrototypeEntries:
- case kMapPrototypeGet:
- case kMapPrototypeGetSize:
- case kMapPrototypeHas:
- case kMapPrototypeKeys:
- case kMapPrototypeValues:
- case kMathAcos:
- case kMathAcosh:
- case kMathAsin:
- case kMathAsinh:
- case kMathAtan:
- case kMathAtan2:
- case kMathAtanh:
- case kMathCbrt:
- case kMathCeil:
- case kMathCos:
- case kMathCosh:
- case kMathExp:
- case kMathExpm1:
- case kMathFloor:
- case kMathFround:
- case kMathLog:
- case kMathLog10:
- case kMathLog1p:
- case kMathLog2:
- case kMathMax:
- case kMathMin:
- case kMathRound:
- case kMathSign:
- case kMathSin:
- case kMathSinh:
- case kMathSqrt:
- case kMathTan:
- case kMathTanh:
- case kMathTrunc:
- case kModulus:
- case kMultiply:
- case kNonPrimitiveToPrimitive_Default:
- case kNonPrimitiveToPrimitive_Number:
- case kNonPrimitiveToPrimitive_String:
- case kNumberIsFinite:
- case kNumberIsInteger:
- case kNumberIsNaN:
- case kNumberIsSafeInteger:
- case kNumberPrototypeValueOf:
- case kObjectPrototypeToLocaleString:
- case kObjectPrototypeValueOf:
- case kPromiseCapabilityDefaultReject:
- case kPromiseCapabilityDefaultResolve:
- case kPromiseConstructorLazyDeoptContinuation:
- case kPromiseInternalReject:
- case kPromiseInternalResolve:
- case kPromiseResolveTrampoline:
- case kPromiseThrowerFinally:
- case kPromiseValueThunkFinally:
- case kProxyConstructor:
- case kReflectHas:
- case kRegExpPrototypeDotAllGetter:
- case kRegExpPrototypeGlobalGetter:
- case kRegExpPrototypeIgnoreCaseGetter:
- case kRegExpPrototypeMultilineGetter:
- case kRegExpPrototypeSourceGetter:
- case kRegExpPrototypeStickyGetter:
- case kRegExpPrototypeUnicodeGetter:
- case kReturnReceiver:
- case kSetPrototypeEntries:
- case kSetPrototypeGetSize:
- case kSetPrototypeValues:
case kStoreGlobalIC_Slow:
- case kStoreGlobalICTrampoline:
- case kStoreICTrampoline:
- case kStringPrototypeBig:
- case kStringPrototypeBlink:
- case kStringPrototypeBold:
- case kStringPrototypeConcat:
- case kStringPrototypeFixed:
- case kStringPrototypeItalics:
- case kStringPrototypeIterator:
- case kStringPrototypeSmall:
- case kStringPrototypeStrike:
- case kStringPrototypeSub:
- case kStringPrototypeSup:
-#ifdef V8_INTL_SUPPORT
- case kStringPrototypeToLowerCaseIntl:
-#endif
- case kSubtract:
- case kSymbolPrototypeToPrimitive:
- case kSymbolPrototypeToString:
- case kSymbolPrototypeValueOf:
- case kThrowWasmTrapDivByZero:
- case kThrowWasmTrapDivUnrepresentable:
- case kThrowWasmTrapFloatUnrepresentable:
- case kThrowWasmTrapFuncInvalid:
- case kThrowWasmTrapFuncSigMismatch:
- case kThrowWasmTrapMemOutOfBounds:
- case kThrowWasmTrapRemByZero:
- case kThrowWasmTrapUnreachable:
- case kToInteger:
- case kTypedArrayConstructor:
case kWasmStackGuard:
- case kWeakMapGet:
- case kWeakMapHas:
- case kWeakMapPrototypeDelete:
- case kWeakMapPrototypeSet:
- case kWeakSetHas:
- case kWeakSetPrototypeAdd:
- case kWeakSetPrototypeDelete:
-#else
- case kAbortJS:
- case kAdd:
- case kAllocateHeapNumber:
- case kArrayEvery:
- case kArrayEveryLoopContinuation:
- case kArrayEveryLoopEagerDeoptContinuation:
- case kArrayEveryLoopLazyDeoptContinuation:
- case kArrayFilterLoopEagerDeoptContinuation:
- case kArrayFilterLoopLazyDeoptContinuation:
- case kArrayFindIndexLoopAfterCallbackLazyDeoptContinuation:
- case kArrayFindIndexLoopContinuation:
- case kArrayFindIndexLoopEagerDeoptContinuation:
- case kArrayFindIndexLoopLazyDeoptContinuation:
- case kArrayFindLoopAfterCallbackLazyDeoptContinuation:
- case kArrayFindLoopContinuation:
- case kArrayFindLoopEagerDeoptContinuation:
- case kArrayFindLoopLazyDeoptContinuation:
- case kArrayForEach:
- case kArrayForEachLoopContinuation:
- case kArrayForEachLoopEagerDeoptContinuation:
- case kArrayForEachLoopLazyDeoptContinuation:
- case kArrayFrom:
- case kArrayIncludes:
- case kArrayIndexOf:
- case kArrayIsArray:
- case kArrayMapLoopContinuation:
- case kArrayMapLoopEagerDeoptContinuation:
- case kArrayMapLoopLazyDeoptContinuation:
- case kArrayOf:
- case kArrayPrototypeEntries:
- case kArrayPrototypeFind:
- case kArrayPrototypeFindIndex:
- case kArrayPrototypeKeys:
- case kArrayPrototypeSlice:
- case kArrayPrototypeValues:
- case kArrayReduce:
- case kArrayReduceLoopContinuation:
- case kArrayReduceLoopEagerDeoptContinuation:
- case kArrayReduceLoopLazyDeoptContinuation:
- case kArrayReducePreLoopEagerDeoptContinuation:
- case kArrayReduceRight:
- case kArrayReduceRightLoopContinuation:
- case kArrayReduceRightLoopEagerDeoptContinuation:
- case kArrayReduceRightLoopLazyDeoptContinuation:
- case kArrayReduceRightPreLoopEagerDeoptContinuation:
- case kArraySome:
- case kArraySomeLoopContinuation:
- case kArraySomeLoopEagerDeoptContinuation:
- case kArraySomeLoopLazyDeoptContinuation:
- case kAsyncFromSyncIteratorPrototypeNext:
- case kAsyncFromSyncIteratorPrototypeReturn:
- case kAsyncFromSyncIteratorPrototypeThrow:
- case kAsyncFunctionAwaitFulfill:
- case kAsyncFunctionAwaitReject:
- case kAsyncFunctionPromiseCreate:
- case kAsyncFunctionPromiseRelease:
- case kAsyncGeneratorAwaitFulfill:
- case kAsyncGeneratorAwaitReject:
- case kAsyncGeneratorResumeNext:
- case kAsyncGeneratorReturnClosedFulfill:
- case kAsyncGeneratorReturnClosedReject:
- case kAsyncGeneratorReturnFulfill:
- case kAsyncGeneratorYieldFulfill:
- case kAsyncIteratorValueUnwrap:
- case kBitwiseNot:
- case kBooleanPrototypeToString:
- case kBooleanPrototypeValueOf:
- case kCallProxy:
- case kConstructFunction:
- case kConstructProxy:
- case kContinueToCodeStubBuiltin:
- case kContinueToCodeStubBuiltinWithResult:
- case kContinueToJavaScriptBuiltin:
- case kContinueToJavaScriptBuiltinWithResult:
- case kCreateGeneratorObject:
- case kCreateIterResultObject:
- case kCreateRegExpLiteral:
- case kDatePrototypeGetDate:
- case kDatePrototypeGetDay:
- case kDatePrototypeGetFullYear:
- case kDatePrototypeGetHours:
- case kDatePrototypeGetMilliseconds:
- case kDatePrototypeGetMinutes:
- case kDatePrototypeGetMonth:
- case kDatePrototypeGetSeconds:
- case kDatePrototypeGetTime:
- case kDatePrototypeGetTimezoneOffset:
- case kDatePrototypeGetUTCDate:
- case kDatePrototypeGetUTCDay:
- case kDatePrototypeGetUTCFullYear:
- case kDatePrototypeGetUTCHours:
- case kDatePrototypeGetUTCMilliseconds:
- case kDatePrototypeGetUTCMinutes:
- case kDatePrototypeGetUTCMonth:
- case kDatePrototypeGetUTCSeconds:
- case kDatePrototypeToPrimitive:
- case kDatePrototypeValueOf:
- case kDecrement:
- case kDeleteProperty:
- case kDivide:
- case kEqual:
- case kFastConsoleAssert:
- case kFastNewClosure:
- case kFastNewFunctionContextEval:
- case kFastNewFunctionContextFunction:
- case kFastNewObject:
- case kFindOrderedHashMapEntry:
- case kForInEnumerate:
- case kForInFilter:
- case kFunctionPrototypeHasInstance:
- case kGeneratorPrototypeNext:
- case kGeneratorPrototypeReturn:
- case kGeneratorPrototypeThrow:
- case kGetSuperConstructor:
- case kGlobalIsFinite:
- case kGlobalIsNaN:
- case kGreaterThan:
- case kGreaterThanOrEqual:
- case kHasProperty:
- case kIncrement:
- case kInstanceOf:
- case kKeyedLoadIC_Megamorphic:
- case kKeyedLoadIC_PolymorphicName:
- case kKeyedLoadIC_Slow:
- case kKeyedLoadICTrampoline:
- case kKeyedStoreIC_Slow:
- case kKeyedStoreICTrampoline:
- case kLessThan:
- case kLessThanOrEqual:
- case kLoadField:
- case kLoadGlobalIC:
- case kLoadGlobalICInsideTypeof:
- case kLoadGlobalICInsideTypeofTrampoline:
- case kLoadGlobalIC_Slow:
- case kLoadGlobalICTrampoline:
- case kLoadIC:
- case kLoadIC_FunctionPrototype:
- case kLoadIC_Noninlined:
- case kLoadIC_Slow:
- case kLoadIC_StringLength:
- case kLoadIC_StringWrapperLength:
- case kLoadICTrampoline:
- case kLoadIC_Uninitialized:
- case kMapPrototypeEntries:
- case kMapPrototypeForEach:
- case kMapPrototypeGet:
- case kMapPrototypeGetSize:
- case kMapPrototypeHas:
- case kMapPrototypeKeys:
- case kMapPrototypeValues:
- case kMathAcos:
- case kMathAcosh:
- case kMathAsin:
- case kMathAsinh:
- case kMathAtan:
- case kMathAtan2:
- case kMathAtanh:
- case kMathCbrt:
- case kMathCeil:
- case kMathCos:
- case kMathCosh:
- case kMathExp:
- case kMathExpm1:
- case kMathFloor:
- case kMathFround:
- case kMathLog:
- case kMathLog10:
- case kMathLog1p:
- case kMathLog2:
- case kMathMax:
- case kMathMin:
- case kMathRound:
- case kMathSign:
- case kMathSin:
- case kMathSinh:
- case kMathSqrt:
- case kMathTan:
- case kMathTanh:
- case kMathTrunc:
- case kModulus:
- case kMultiply:
- case kNegate:
- case kNewArgumentsElements:
- case kNonNumberToNumber:
- case kNonNumberToNumeric:
- case kNonPrimitiveToPrimitive_Default:
- case kNonPrimitiveToPrimitive_Number:
- case kNonPrimitiveToPrimitive_String:
- case kNumberConstructor:
- case kNumberIsFinite:
- case kNumberIsInteger:
- case kNumberIsNaN:
- case kNumberIsSafeInteger:
- case kNumberParseFloat:
- case kNumberPrototypeValueOf:
- case kNumberToString:
- case kObjectConstructor:
- case kObjectConstructor_ConstructStub:
- case kObjectCreate:
- case kObjectIs:
- case kObjectKeys:
- case kObjectPrototypeHasOwnProperty:
- case kObjectPrototypeIsPrototypeOf:
- case kObjectPrototypeToLocaleString:
- case kObjectPrototypeToString:
- case kObjectPrototypeValueOf:
- case kOrderedHashTableHealIndex:
- case kOrdinaryHasInstance:
- case kOrdinaryToPrimitive_Number:
- case kOrdinaryToPrimitive_String:
- case kPromiseAll:
- case kPromiseCapabilityDefaultReject:
- case kPromiseCapabilityDefaultResolve:
- case kPromiseCatchFinally:
- case kPromiseConstructor:
- case kPromiseConstructorLazyDeoptContinuation:
- case kPromiseFulfillReactionJob:
- case kPromiseInternalConstructor:
- case kPromiseInternalReject:
- case kPromiseInternalResolve:
- case kPromisePrototypeCatch:
- case kPromisePrototypeFinally:
- case kPromiseRace:
- case kPromiseReject:
- case kPromiseRejectReactionJob:
- case kPromiseResolve:
- case kPromiseResolveThenableJob:
- case kPromiseResolveTrampoline:
- case kPromiseThenFinally:
- case kPromiseThrowerFinally:
- case kPromiseValueThunkFinally:
- case kProxyConstructor:
- case kProxyGetProperty:
- case kProxyHasProperty:
- case kProxySetProperty:
- case kRecordWrite:
- case kReflectHas:
- case kRegExpConstructor:
- case kRegExpPrototypeCompile:
- case kRegExpPrototypeDotAllGetter:
- case kRegExpPrototypeFlagsGetter:
- case kRegExpPrototypeGlobalGetter:
- case kRegExpPrototypeIgnoreCaseGetter:
- case kRegExpPrototypeMultilineGetter:
- case kRegExpPrototypeReplace:
- case kRegExpPrototypeSearch:
- case kRegExpPrototypeSourceGetter:
- case kRegExpPrototypeSplit:
- case kRegExpPrototypeStickyGetter:
- case kRegExpPrototypeUnicodeGetter:
- case kResolvePromise:
- case kReturnReceiver:
- case kRunMicrotasks:
- case kSameValue:
- case kSetPrototypeEntries:
- case kSetPrototypeForEach:
- case kSetPrototypeGetSize:
- case kSetPrototypeHas:
- case kSetPrototypeValues:
- case kStoreGlobalIC_Slow:
- case kStoreGlobalICTrampoline:
- case kStoreICTrampoline:
- case kStrictEqual:
- case kStringCodePointAtUTF16:
- case kStringCodePointAtUTF32:
- case kStringConstructor:
- case kStringEqual:
- case kStringGreaterThan:
- case kStringGreaterThanOrEqual:
- case kStringIndexOf:
- case kStringLessThan:
- case kStringLessThanOrEqual:
- case kStringPrototypeAnchor:
- case kStringPrototypeBig:
- case kStringPrototypeBlink:
- case kStringPrototypeBold:
- case kStringPrototypeCharCodeAt:
- case kStringPrototypeCodePointAt:
- case kStringPrototypeConcat:
- case kStringPrototypeFixed:
- case kStringPrototypeFontcolor:
- case kStringPrototypeFontsize:
- case kStringPrototypeIncludes:
- case kStringPrototypeIndexOf:
- case kStringPrototypeItalics:
- case kStringPrototypeIterator:
- case kStringPrototypeLink:
- case kStringPrototypeMatch:
- case kStringPrototypePadEnd:
- case kStringPrototypePadStart:
- case kStringPrototypeRepeat:
- case kStringPrototypeReplace:
- case kStringPrototypeSearch:
- case kStringPrototypeSmall:
- case kStringPrototypeStrike:
- case kStringPrototypeSub:
- case kStringPrototypeSup:
-#ifdef V8_INTL_SUPPORT
- case kStringPrototypeToLowerCaseIntl:
- case kStringToLowerCaseIntl:
-#endif
- case kStringPrototypeToString:
- case kStringPrototypeValueOf:
- case kStringRepeat:
- case kStringToNumber:
- case kSubtract:
- case kSymbolPrototypeToPrimitive:
- case kSymbolPrototypeToString:
- case kSymbolPrototypeValueOf:
- case kThrowWasmTrapDivByZero:
- case kThrowWasmTrapDivUnrepresentable:
- case kThrowWasmTrapFloatUnrepresentable:
- case kThrowWasmTrapFuncInvalid:
- case kThrowWasmTrapFuncSigMismatch:
- case kThrowWasmTrapMemOutOfBounds:
- case kThrowWasmTrapRemByZero:
case kThrowWasmTrapUnreachable:
- case kToBoolean:
- case kToBooleanLazyDeoptContinuation:
- case kToInteger:
- case kToInteger_TruncateMinusZero:
- case kToName:
- case kToNumber:
- case kToNumeric:
- case kToString:
- case kTypedArrayConstructor:
- case kTypedArrayConstructor_ConstructStub:
- case kTypedArrayPrototypeByteLength:
- case kTypedArrayPrototypeByteOffset:
- case kTypedArrayPrototypeEntries:
- case kTypedArrayPrototypeEvery:
- case kTypedArrayPrototypeFind:
- case kTypedArrayPrototypeFindIndex:
- case kTypedArrayPrototypeForEach:
- case kTypedArrayPrototypeKeys:
- case kTypedArrayPrototypeLength:
- case kTypedArrayPrototypeReduce:
- case kTypedArrayPrototypeReduceRight:
- case kTypedArrayPrototypeSet:
- case kTypedArrayPrototypeSlice:
- case kTypedArrayPrototypeSome:
- case kTypedArrayPrototypeSubArray:
- case kTypedArrayPrototypeToStringTag:
- case kTypedArrayPrototypeValues:
- case kTypeof:
- case kWasmStackGuard:
- case kWeakMapGet:
- case kWeakMapHas:
- case kWeakMapLookupHashIndex:
- case kWeakMapPrototypeDelete:
- case kWeakMapPrototypeSet:
- case kWeakSetHas:
- case kWeakSetPrototypeAdd:
- case kWeakSetPrototypeDelete:
-#endif
- return true;
- default:
- return false;
- }
- UNREACHABLE();
-}
-
-// static
-bool Builtins::IsOffHeapSafe(int index) {
-#ifndef V8_EMBEDDED_BUILTINS
- return false;
-#else
- DCHECK(IsBuiltinId(index));
- if (IsTooShortForOffHeapTrampoline(index)) return false;
- switch (index) {
-#ifdef DEBUG
- case kAbortJS:
- case kAllocateHeapNumber:
- case kArrayEveryLoopEagerDeoptContinuation:
- case kArrayFilterLoopEagerDeoptContinuation:
- case kArrayFindIndexLoopEagerDeoptContinuation:
- case kArrayFindIndexLoopLazyDeoptContinuation:
- case kArrayFindLoopEagerDeoptContinuation:
- case kArrayFindLoopLazyDeoptContinuation:
- case kArrayForEachLoopEagerDeoptContinuation:
- case kArrayForEachLoopLazyDeoptContinuation:
- case kArrayMapLoopEagerDeoptContinuation:
- case kArrayReduceLoopEagerDeoptContinuation:
- case kArrayReduceLoopLazyDeoptContinuation:
- case kArrayReducePreLoopEagerDeoptContinuation:
- case kArrayReduceRightLoopEagerDeoptContinuation:
- case kArrayReduceRightLoopLazyDeoptContinuation:
- case kArrayReduceRightPreLoopEagerDeoptContinuation:
- case kArraySomeLoopEagerDeoptContinuation:
- case kBitwiseNot:
- case kBooleanPrototypeToString:
- case kBooleanPrototypeValueOf:
- case kContinueToCodeStubBuiltin:
- case kContinueToCodeStubBuiltinWithResult:
- case kContinueToJavaScriptBuiltin:
- case kContinueToJavaScriptBuiltinWithResult:
- case kDatePrototypeGetDate:
- case kDatePrototypeGetDay:
- case kDatePrototypeGetFullYear:
- case kDatePrototypeGetHours:
- case kDatePrototypeGetMilliseconds:
- case kDatePrototypeGetMinutes:
- case kDatePrototypeGetMonth:
- case kDatePrototypeGetSeconds:
- case kDatePrototypeGetTime:
- case kDatePrototypeGetTimezoneOffset:
- case kDatePrototypeGetUTCDate:
- case kDatePrototypeGetUTCDay:
- case kDatePrototypeGetUTCFullYear:
- case kDatePrototypeGetUTCHours:
- case kDatePrototypeGetUTCMilliseconds:
- case kDatePrototypeGetUTCMinutes:
- case kDatePrototypeGetUTCMonth:
- case kDatePrototypeGetUTCSeconds:
- case kDatePrototypeToPrimitive:
- case kDatePrototypeValueOf:
- case kDecrement:
- case kDivide:
- case kGlobalIsFinite:
- case kGlobalIsNaN:
- case kIncrement:
- case kKeyedLoadIC_Slow:
- case kKeyedLoadICTrampoline:
- case kKeyedStoreIC_Slow:
- case kKeyedStoreICTrampoline:
- case kLoadField:
- case kLoadGlobalICInsideTypeofTrampoline:
- case kLoadGlobalIC_Slow:
- case kLoadGlobalICTrampoline:
- case kLoadIC_Slow:
- case kLoadICTrampoline:
- case kMapPrototypeEntries:
- case kMapPrototypeGet:
- case kMapPrototypeGetSize:
- case kMapPrototypeHas:
- case kMapPrototypeKeys:
- case kMapPrototypeValues:
- case kMathAcos:
- case kMathAcosh:
- case kMathAsin:
- case kMathAsinh:
- case kMathAtan:
- case kMathAtan2:
- case kMathAtanh:
- case kMathCbrt:
- case kMathCeil:
- case kMathCos:
- case kMathCosh:
- case kMathExp:
- case kMathExpm1:
- case kMathFloor:
- case kMathFround:
- case kMathLog:
- case kMathLog10:
- case kMathLog1p:
- case kMathLog2:
- case kMathMax:
- case kMathMin:
- case kMathRound:
- case kMathSign:
- case kMathSin:
- case kMathSinh:
- case kMathSqrt:
- case kMathTan:
- case kMathTanh:
- case kMathTrunc:
- case kModulus:
- case kMultiply:
- case kNonPrimitiveToPrimitive_Default:
- case kNonPrimitiveToPrimitive_Number:
- case kNonPrimitiveToPrimitive_String:
- case kNumberIsFinite:
- case kNumberIsInteger:
- case kNumberIsNaN:
- case kNumberIsSafeInteger:
- case kNumberPrototypeValueOf:
- case kObjectPrototypeToLocaleString:
- case kObjectPrototypeValueOf:
- case kPromiseCapabilityDefaultReject:
- case kPromiseCapabilityDefaultResolve:
- case kPromiseConstructorLazyDeoptContinuation:
- case kPromiseInternalReject:
- case kPromiseInternalResolve:
- case kPromiseResolveTrampoline:
- case kPromiseThrowerFinally:
- case kPromiseValueThunkFinally:
- case kProxyConstructor:
- case kReflectHas:
- case kRegExpPrototypeDotAllGetter:
- case kRegExpPrototypeGlobalGetter:
- case kRegExpPrototypeIgnoreCaseGetter:
- case kRegExpPrototypeMultilineGetter:
- case kRegExpPrototypeSourceGetter:
- case kRegExpPrototypeStickyGetter:
- case kRegExpPrototypeUnicodeGetter:
- case kReturnReceiver:
- case kSetPrototypeEntries:
- case kSetPrototypeGetSize:
- case kSetPrototypeValues:
- case kStoreGlobalIC_Slow:
- case kStoreGlobalICTrampoline:
- case kStoreICTrampoline:
- case kStringPrototypeBig:
- case kStringPrototypeBlink:
- case kStringPrototypeBold:
- case kStringPrototypeConcat:
- case kStringPrototypeFixed:
- case kStringPrototypeItalics:
- case kStringPrototypeIterator:
- case kStringPrototypeSmall:
- case kStringPrototypeStrike:
- case kStringPrototypeSub:
- case kStringPrototypeSup:
-#ifdef V8_INTL_SUPPORT
- case kStringPrototypeToLowerCaseIntl:
-#endif
- case kSubtract:
- case kSymbolPrototypeToPrimitive:
- case kSymbolPrototypeToString:
- case kSymbolPrototypeValueOf:
+ case kThrowWasmTrapMemOutOfBounds:
case kThrowWasmTrapDivByZero:
case kThrowWasmTrapDivUnrepresentable:
+ case kThrowWasmTrapRemByZero:
case kThrowWasmTrapFloatUnrepresentable:
case kThrowWasmTrapFuncInvalid:
case kThrowWasmTrapFuncSigMismatch:
- case kThrowWasmTrapMemOutOfBounds:
- case kThrowWasmTrapRemByZero:
- case kThrowWasmTrapUnreachable:
- case kToInteger:
- case kTypedArrayConstructor:
- case kWasmStackGuard:
- case kWeakMapGet:
- case kWeakMapHas:
- case kWeakMapPrototypeDelete:
- case kWeakMapPrototypeSet:
- case kWeakSetHas:
- case kWeakSetPrototypeAdd:
- case kWeakSetPrototypeDelete:
#else
case kAbortJS:
case kAdd:
@@ -1068,9 +379,6 @@ bool Builtins::IsOffHeapSafe(int index) {
case kBitwiseNot:
case kBooleanPrototypeToString:
case kBooleanPrototypeValueOf:
- case kCallProxy:
- case kConstructFunction:
- case kConstructProxy:
case kContinueToCodeStubBuiltin:
case kContinueToCodeStubBuiltinWithResult:
case kContinueToJavaScriptBuiltin:
@@ -1140,6 +448,8 @@ bool Builtins::IsOffHeapSafe(int index) {
case kLoadIC_FunctionPrototype:
case kLoadIC_Noninlined:
case kLoadIC_Slow:
+ case kLoadIC_StringLength:
+ case kLoadIC_StringWrapperLength:
case kLoadICTrampoline:
case kLoadIC_Uninitialized:
case kMapPrototypeEntries:
@@ -1149,36 +459,15 @@ bool Builtins::IsOffHeapSafe(int index) {
case kMapPrototypeHas:
case kMapPrototypeKeys:
case kMapPrototypeValues:
- case kMathAcos:
- case kMathAcosh:
- case kMathAsin:
- case kMathAsinh:
- case kMathAtan:
- case kMathAtan2:
- case kMathAtanh:
- case kMathCbrt:
case kMathCeil:
- case kMathCos:
- case kMathCosh:
- case kMathExp:
- case kMathExpm1:
case kMathFloor:
case kMathFround:
- case kMathLog:
- case kMathLog10:
- case kMathLog1p:
- case kMathLog2:
case kMathMax:
case kMathMin:
case kMathRound:
case kMathSign:
- case kMathSin:
- case kMathSinh:
case kMathSqrt:
- case kMathTan:
- case kMathTanh:
case kMathTrunc:
- case kModulus:
case kMultiply:
case kNegate:
case kNewArgumentsElements:
@@ -1187,7 +476,6 @@ bool Builtins::IsOffHeapSafe(int index) {
case kNonPrimitiveToPrimitive_Default:
case kNonPrimitiveToPrimitive_Number:
case kNonPrimitiveToPrimitive_String:
- case kNumberConstructor:
case kNumberIsFinite:
case kNumberIsInteger:
case kNumberIsNaN:
@@ -1196,7 +484,6 @@ bool Builtins::IsOffHeapSafe(int index) {
case kNumberPrototypeValueOf:
case kNumberToString:
case kObjectConstructor:
- case kObjectConstructor_ConstructStub:
case kObjectCreate:
case kObjectIs:
case kObjectKeys:
@@ -1209,7 +496,6 @@ bool Builtins::IsOffHeapSafe(int index) {
case kOrdinaryHasInstance:
case kOrdinaryToPrimitive_Number:
case kOrdinaryToPrimitive_String:
- case kPromiseAll:
case kPromiseCapabilityDefaultReject:
case kPromiseCapabilityDefaultResolve:
case kPromiseCatchFinally:
@@ -1230,11 +516,9 @@ bool Builtins::IsOffHeapSafe(int index) {
case kPromiseThenFinally:
case kPromiseThrowerFinally:
case kPromiseValueThunkFinally:
- case kProxyConstructor:
case kProxyGetProperty:
case kProxyHasProperty:
case kProxySetProperty:
- case kRecordWrite:
case kReflectHas:
case kRegExpConstructor:
case kRegExpPrototypeCompile:
@@ -1264,7 +548,6 @@ bool Builtins::IsOffHeapSafe(int index) {
case kStrictEqual:
case kStringCodePointAtUTF16:
case kStringCodePointAtUTF32:
- case kStringConstructor:
case kStringEqual:
case kStringGreaterThan:
case kStringGreaterThanOrEqual:
@@ -1325,7 +608,6 @@ bool Builtins::IsOffHeapSafe(int index) {
case kToNumeric:
case kToString:
case kTypedArrayConstructor:
- case kTypedArrayConstructor_ConstructStub:
case kTypedArrayPrototypeByteLength:
case kTypedArrayPrototypeByteOffset:
case kTypedArrayPrototypeEntries:
@@ -1359,19 +641,33 @@ bool Builtins::IsOffHeapSafe(int index) {
return false;
}
UNREACHABLE();
-#endif // V8_EMBEDDED_BUILTINS
}
+#ifdef V8_EMBEDDED_BUILTINS
// static
-bool Builtins::IsTooShortForOffHeapTrampoline(int index) {
- switch (index) {
- case kLoadIC_StringLength:
- case kLoadIC_StringWrapperLength:
- return true;
- default:
- return false;
+Handle<Code> Builtins::GenerateOffHeapTrampolineFor(Isolate* isolate,
+ Address off_heap_entry) {
+ DCHECK(isolate->serializer_enabled());
+ DCHECK_NOT_NULL(isolate->embedded_blob());
+ DCHECK_NE(0, isolate->embedded_blob_size());
+
+ constexpr size_t buffer_size = 256; // Enough to fit the single jmp.
+ byte buffer[buffer_size]; // NOLINT(runtime/arrays)
+
+ // Generate replacement code that simply tail-calls the off-heap code.
+ MacroAssembler masm(isolate, buffer, buffer_size, CodeObjectRequired::kYes);
+ DCHECK(!masm.has_frame());
+ {
+ FrameScope scope(&masm, StackFrame::NONE);
+ masm.JumpToInstructionStream(off_heap_entry);
}
+
+ CodeDesc desc;
+ masm.GetCode(isolate, &desc);
+
+ return isolate->factory()->NewCode(desc, Code::BUILTIN, masm.CodeObject());
}
+#endif // V8_EMBEDDED_BUILTINS
// static
Builtins::Kind Builtins::KindOf(int index) {
diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h
index bf96469d19..7745420366 100644
--- a/deps/v8/src/builtins/builtins.h
+++ b/deps/v8/src/builtins/builtins.h
@@ -72,7 +72,7 @@ class Builtins {
Handle<Code> NewFunctionContext(ScopeType scope_type);
Handle<Code> JSConstructStubGeneric();
- // Used by BuiltinDeserializer.
+ // Used by BuiltinDeserializer and CreateOffHeapTrampolines in isolate.cc.
void set_builtin(int index, HeapObject* builtin);
Code* builtin(int index) {
@@ -111,10 +111,10 @@ class Builtins {
// True, iff the given code object is a builtin. Note that this does not
// necessarily mean that its kind is Code::BUILTIN.
- static bool IsBuiltin(Code* code);
+ static bool IsBuiltin(const Code* code);
- // True, iff the given code object is a builtin with off-heap code.
- static bool IsOffHeapBuiltin(Code* code);
+ // True, iff the given code object is a builtin with off-heap embedded code.
+ static bool IsEmbeddedBuiltin(const Code* code);
// Returns true iff the given builtin can be lazy-loaded from the snapshot.
// This is true in general for most builtins with the exception of a few
@@ -125,17 +125,6 @@ class Builtins {
// TODO(jgruber,v8:6666): Remove once all builtins have been migrated.
static bool IsIsolateIndependent(int index);
- // This is the condition we currently use to determine whether a builtin is
- // copied off-heap when --stress-off-heap-code is passed. Such builtins do not
- // need to be isolate-independent, e.g. they can contain external references
- // that point to one specific isolate. A further restrictions is that there
- // must be enough space for the trampoline.
- static bool IsOffHeapSafe(int index);
-
- // The off-heap trampoline is short but requires a certain minimal instruction
- // size. This function states whether a given builtin is too short.
- static bool IsTooShortForOffHeapTrampoline(int index);
-
bool is_initialized() const { return initialized_; }
// Used by SetupIsolateDelegate and Deserializer.
@@ -144,7 +133,7 @@ class Builtins {
initialized_ = true;
}
- MUST_USE_RESULT static MaybeHandle<Object> InvokeApiFunction(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> InvokeApiFunction(
Isolate* isolate, bool is_construct, Handle<HeapObject> function,
Handle<Object> receiver, int argc, Handle<Object> args[],
Handle<HeapObject> new_target);
@@ -160,6 +149,14 @@ class Builtins {
private:
Builtins();
+#ifdef V8_EMBEDDED_BUILTINS
+ // Creates a trampoline code object that jumps to the given off-heap entry.
+ // The result should not be used directly, but only from the related Factory
+ // function.
+ static Handle<Code> GenerateOffHeapTrampolineFor(Isolate* isolate,
+ Address off_heap_entry);
+#endif
+
static void Generate_CallFunction(MacroAssembler* masm,
ConvertReceiverMode mode);
@@ -198,6 +195,7 @@ class Builtins {
Object* builtins_[builtin_count];
bool initialized_;
+ friend class Factory; // For GenerateOffHeapTrampolineFor.
friend class Isolate;
friend class SetupIsolateDelegate;
diff --git a/deps/v8/src/builtins/constants-table-builder.cc b/deps/v8/src/builtins/constants-table-builder.cc
index a4117bd5a2..c044a348da 100644
--- a/deps/v8/src/builtins/constants-table-builder.cc
+++ b/deps/v8/src/builtins/constants-table-builder.cc
@@ -32,6 +32,8 @@ uint32_t BuiltinsConstantsTableBuilder::AddObject(Handle<Object> object) {
// Not yet finalized.
DCHECK_EQ(isolate_->heap()->empty_fixed_array(),
isolate_->heap()->builtins_constants_table());
+
+ DCHECK(isolate_->serializer_enabled());
#endif
uint32_t* maybe_key = map_.Find(object);
@@ -49,6 +51,7 @@ void BuiltinsConstantsTableBuilder::Finalize() {
DCHECK_EQ(isolate_->heap()->empty_fixed_array(),
isolate_->heap()->builtins_constants_table());
+ DCHECK(isolate_->serializer_enabled());
DCHECK_LT(0, map_.size());
Handle<FixedArray> table =
diff --git a/deps/v8/src/builtins/constants-table-builder.h b/deps/v8/src/builtins/constants-table-builder.h
index d251d5849b..aefad8c3bb 100644
--- a/deps/v8/src/builtins/constants-table-builder.h
+++ b/deps/v8/src/builtins/constants-table-builder.h
@@ -24,8 +24,8 @@ class BuiltinsConstantsTableBuilder final {
public:
explicit BuiltinsConstantsTableBuilder(Isolate* isolate);
- // Returns the index within the builtins constants list for the given object,
- // possibly adding the object to the cache. Objects are deduplicated.
+ // Returns the index within the builtins constants table for the given
+ // object, possibly adding the object to the table. Objects are deduplicated.
uint32_t AddObject(Handle<Object> object);
// Should be called after all affected code (e.g. builtins and bytecode
diff --git a/deps/v8/src/builtins/growable-fixed-array-gen.cc b/deps/v8/src/builtins/growable-fixed-array-gen.cc
index 3a155e26f9..eae9ff5594 100644
--- a/deps/v8/src/builtins/growable-fixed-array-gen.cc
+++ b/deps/v8/src/builtins/growable-fixed-array-gen.cc
@@ -90,8 +90,8 @@ TNode<FixedArray> GrowableFixedArray::ResizeFixedArray(
CodeStubAssembler::ExtractFixedArrayFlags flags;
flags |= CodeStubAssembler::ExtractFixedArrayFlag::kFixedArrays;
- TNode<FixedArray> to_array = CAST(ExtractFixedArray(
- from_array, nullptr, element_count, new_capacity, flags));
+ TNode<FixedArray> to_array = ExtractFixedArray(
+ from_array, nullptr, element_count, new_capacity, flags);
return to_array;
}
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 5bc083f531..2afcebab80 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -217,7 +217,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -----------------------------------
__ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ test(FieldOperand(ebx, SharedFunctionInfo::kCompilerHintsOffset),
+ __ test(FieldOperand(ebx, SharedFunctionInfo::kFlagsOffset),
Immediate(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
__ j(not_zero, &not_create_implicit_receiver);
@@ -338,7 +338,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ bind(&other_result);
__ mov(ebx, Operand(ebp, ConstructFrameConstants::kConstructorOffset));
__ mov(ebx, FieldOperand(ebx, JSFunction::kSharedFunctionInfoOffset));
- __ test(FieldOperand(ebx, SharedFunctionInfo::kCompilerHintsOffset),
+ __ test(FieldOperand(ebx, SharedFunctionInfo::kFlagsOffset),
Immediate(SharedFunctionInfo::IsClassConstructorBit::kMask));
if (restrict_constructor_return) {
@@ -382,9 +382,6 @@ void Builtins::Generate_JSConstructStubGenericUnrestrictedReturn(
MacroAssembler* masm) {
return Generate_JSConstructStubGeneric(masm, false);
}
-void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSBuiltinsConstructStubHelper(masm);
-}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
@@ -497,6 +494,19 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
+static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
+ Register sfi_data,
+ Register scratch1) {
+ Label done;
+
+ __ CmpObjectType(sfi_data, INTERPRETER_DATA_TYPE, scratch1);
+ __ j(not_equal, &done, Label::kNear);
+ __ mov(sfi_data,
+ FieldOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
+
+ __ bind(&done);
+}
+
// static
void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -571,6 +581,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFunctionDataOffset));
+ __ Push(eax);
+ GetSharedFunctionInfoBytecode(masm, ecx, eax);
+ __ Pop(eax);
__ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, ecx);
__ Assert(equal, AbortReason::kMissingBytecodeArray);
}
@@ -673,7 +686,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, eax, edx, edi, scratch));
- Label optimized_code_slot_is_cell, fallthrough;
+ Label optimized_code_slot_is_weak_ref, fallthrough;
Register closure = edi;
Register optimized_code_entry = scratch;
@@ -682,9 +695,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
FieldOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
- // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // optimisation marker. Otherwise, interpret it as a weak reference to a code
// object.
- __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
{
// Optimized code slot is an optimization marker.
@@ -719,12 +732,11 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
{
- // Optimized code slot is a WeakCell.
- __ bind(&optimized_code_slot_is_cell);
+ // Optimized code slot is a weak reference.
+ __ bind(&optimized_code_slot_is_weak_ref);
+
+ __ LoadWeakValue(optimized_code_entry, &fallthrough);
- __ mov(optimized_code_entry,
- FieldOperand(optimized_code_entry, WeakCell::kValueOffset));
- __ JumpIfSmi(optimized_code_entry, &fallthrough);
__ push(eax);
__ push(edx);
@@ -858,10 +870,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
- Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
+ Label maybe_load_debug_bytecode_array, bytecode_array_loaded,
+ apply_instrumentation;
__ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(kInterpreterBytecodeArrayRegister,
FieldOperand(eax, SharedFunctionInfo::kFunctionDataOffset));
+ __ Push(eax);
+ GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, eax);
+ __ Pop(eax);
__ JumpIfNotSmi(FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset),
&maybe_load_debug_bytecode_array);
__ bind(&bytecode_array_loaded);
@@ -978,15 +994,29 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// kInterpreterBytecodeArrayRegister is already loaded with
// SharedFunctionInfo::kFunctionDataOffset.
__ bind(&maybe_load_debug_bytecode_array);
- __ push(ebx); // feedback_vector == ebx, so save it.
- __ mov(ecx, FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset));
- __ mov(ebx, FieldOperand(ecx, DebugInfo::kFlagsOffset));
- __ SmiUntag(ebx);
- __ test(ebx, Immediate(DebugInfo::kHasBreakInfo));
+ __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset));
+ __ mov(ecx, FieldOperand(eax, DebugInfo::kDebugBytecodeArrayOffset));
+ __ JumpIfRoot(ecx, Heap::kUndefinedValueRootIndex, &bytecode_array_loaded);
+
+ __ mov(kInterpreterBytecodeArrayRegister, ecx);
+ __ mov(ecx, FieldOperand(eax, DebugInfo::kFlagsOffset));
+ __ SmiUntag(ecx);
+ __ and_(ecx, Immediate(DebugInfo::kDebugExecutionMode));
+ STATIC_ASSERT(static_cast<int>(DebugInfo::kDebugExecutionMode) ==
+ static_cast<int>(DebugInfo::kSideEffects));
+ ExternalReference debug_execution_mode =
+ ExternalReference::debug_execution_mode_address(masm->isolate());
+ __ cmp(ecx, Operand::StaticVariable(debug_execution_mode));
+ __ j(equal, &bytecode_array_loaded);
+
+ __ pop(ecx); // get JSFunction from stack
+ __ push(ecx);
+ __ push(ebx); // preserve feedback_vector and bytecode array register
+ __ push(kInterpreterBytecodeArrayRegister);
+ __ push(ecx); // pass function as argument
+ __ CallRuntime(Runtime::kDebugApplyInstrumentation);
+ __ pop(kInterpreterBytecodeArrayRegister);
__ pop(ebx);
- __ j(zero, &bytecode_array_loaded);
- __ mov(kInterpreterBytecodeArrayRegister,
- FieldOperand(ecx, DebugInfo::kDebugBytecodeArrayOffset));
__ jmp(&bytecode_array_loaded);
}
@@ -1013,6 +1043,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
InterpreterPushArgsMode mode) {
+ DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- ebx : the address of the first argument to be pushed. Subsequent
@@ -1055,11 +1086,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Call the target.
__ Push(edx); // Re-push return address.
- if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(
- masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny),
- RelocInfo::CODE_TARGET);
- } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
RelocInfo::CODE_TARGET);
} else {
@@ -1203,15 +1230,12 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
__ AssertUndefinedOrAllocationSite(ebx);
}
- if (mode == InterpreterPushArgsMode::kJSFunction) {
- // Tail call to the function-specific construct stub (still in the caller
+ if (mode == InterpreterPushArgsMode::kArrayFunction) {
+ // Tail call to the array construct stub (still in the caller
// context at this point).
__ AssertFunction(edi);
-
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kConstructStubOffset));
- __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
- __ jmp(ecx);
+ ArrayConstructorStub array_constructor_stub(masm->isolate());
+ __ Jump(array_constructor_stub.GetCode(), RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with unmodified eax, edi, edx values.
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
@@ -1238,10 +1262,28 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
+ Label builtin_trampoline, trampoline_loaded;
Smi* interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
+
+ // If the SFI function_data is an InterpreterData, get the trampoline stored
+ // in it, otherwise get the trampoline from the builtins list.
+ __ mov(ebx, Operand(ebp, StandardFrameConstants::kFunctionOffset));
+ __ mov(ebx, FieldOperand(ebx, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kFunctionDataOffset));
+ __ Push(eax);
+ __ CmpObjectType(ebx, INTERPRETER_DATA_TYPE, eax);
+ __ j(not_equal, &builtin_trampoline, Label::kNear);
+
+ __ mov(ebx, FieldOperand(ebx, InterpreterData::kInterpreterTrampolineOffset));
+ __ jmp(&trampoline_loaded, Label::kNear);
+
+ __ bind(&builtin_trampoline);
__ Move(ebx, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+
+ __ bind(&trampoline_loaded);
+ __ Pop(eax);
__ add(ebx, Immediate(interpreter_entry_return_pc_offset->value() +
Code::kHeaderSize - kHeapObjectTag));
__ push(ebx);
@@ -1313,42 +1355,9 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
-void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : argument count (preserved for callee)
- // -- rdx : new target (preserved for callee)
- // -- rdi : target function (preserved for callee)
- // -----------------------------------
- Register closure = edi;
-
- // Get the feedback vector.
- Register feedback_vector = ebx;
- __ mov(feedback_vector,
- FieldOperand(closure, JSFunction::kFeedbackCellOffset));
- __ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
-
- // The feedback vector must be defined.
- if (FLAG_debug_code) {
- __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
- __ Assert(not_equal, AbortReason::kExpectedFeedbackVector);
- }
-
- // Is there an optimization marker or optimized code in the feedback vector?
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, ecx);
-
- // Otherwise, tail call the SFI code.
- static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kCodeOffset));
- __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
- __ jmp(ecx);
-}
-
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
- // Set the code slot inside the JSFunction to the trampoline to the
- // interpreter entry.
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kCodeOffset));
+ // Set the code slot inside the JSFunction to CompileLazy.
+ __ Move(ecx, BUILTIN_CODE(masm->isolate(), CompileLazy));
__ mov(FieldOperand(edi, JSFunction::kCodeOffset), ecx);
__ RecordWriteField(edi, JSFunction::kCodeOffset, ecx, ebx, kDontSaveFPRegs,
OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
@@ -1356,6 +1365,77 @@ void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
Generate_CompileLazy(masm);
}
+static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data,
+ Register scratch1) {
+ // Figure out the SFI's code object.
+ Label done;
+ Label check_is_bytecode_array;
+ Label check_is_code;
+ Label check_is_fixed_array;
+ Label check_is_pre_parsed_scope_data;
+ Label check_is_function_template_info;
+ Label check_is_interpreter_data;
+
+ Register data_type = scratch1;
+
+ // IsSmi: Is builtin
+ __ JumpIfNotSmi(sfi_data, &check_is_bytecode_array);
+ __ mov(scratch1,
+ Immediate(ExternalReference::builtins_address(masm->isolate())));
+ // Avoid untagging the Smi unnecessarily.
+ STATIC_ASSERT(times_2 == times_pointer_size - kSmiTagSize);
+ __ mov(sfi_data, Operand(scratch1, sfi_data, times_2, 0));
+ __ jmp(&done);
+
+ // Get map for subsequent checks.
+ __ bind(&check_is_bytecode_array);
+ __ mov(data_type, FieldOperand(sfi_data, HeapObject::kMapOffset));
+ __ mov(data_type, FieldOperand(data_type, Map::kInstanceTypeOffset));
+
+ // IsBytecodeArray: Interpret bytecode
+ __ cmpw(data_type, Immediate(BYTECODE_ARRAY_TYPE));
+ __ j(not_equal, &check_is_code);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+ __ jmp(&done);
+
+ // IsCode: Run code
+ __ bind(&check_is_code);
+ __ cmpw(data_type, Immediate(CODE_TYPE));
+ __ j(equal, &done);
+
+ // IsFixedArray: Instantiate using AsmWasmData,
+ __ bind(&check_is_fixed_array);
+ __ cmpw(data_type, Immediate(FIXED_ARRAY_TYPE));
+ __ j(not_equal, &check_is_pre_parsed_scope_data);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InstantiateAsmJs));
+ __ jmp(&done);
+
+ // IsPreParsedScopeData: Compile lazy
+ __ bind(&check_is_pre_parsed_scope_data);
+ __ cmpw(data_type, Immediate(TUPLE2_TYPE));
+ __ j(not_equal, &check_is_function_template_info);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), CompileLazy));
+ __ jmp(&done);
+
+ // IsFunctionTemplateInfo: API call
+ __ bind(&check_is_function_template_info);
+ __ cmpw(data_type, Immediate(FUNCTION_TEMPLATE_INFO_TYPE));
+ __ j(not_equal, &check_is_interpreter_data);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
+ __ jmp(&done);
+
+ // IsInterpreterData: Interpret bytecode
+ __ bind(&check_is_interpreter_data);
+ if (FLAG_debug_code) {
+ __ cmpw(data_type, Immediate(INTERPRETER_DATA_TYPE));
+ __ Check(equal, AbortReason::kInvalidSharedFunctionInfoData);
+ }
+ __ mov(sfi_data,
+ FieldOperand(sfi_data, InterpreterData::kInterpreterTrampolineOffset));
+
+ __ bind(&done);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argument count (preserved for callee)
@@ -1378,12 +1458,13 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, ecx);
- // We found no optimized code.
+ // We found no optimized code. Infer the code object needed for the SFI.
Register entry = ecx;
__ mov(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(entry, FieldOperand(entry, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoCode(masm, entry, ebx);
- // If SFI points to anything other than CompileLazy, install that.
- __ mov(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
+ // If code entry points to anything other than CompileLazy, install that.
__ Move(ebx, masm->CodeObject());
__ cmp(entry, ebx);
__ j(equal, &gotta_call_runtime);
@@ -1451,26 +1532,13 @@ void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
{
// If we've reached this spot, the target builtin has been deserialized and
- // we simply need to copy it over. First to the shared function info.
+ // we simply need to copy it over to the target function.
Register target_builtin = scratch1;
- Register shared = scratch0;
- __ mov(shared, FieldOperand(target, JSFunction::kSharedFunctionInfoOffset));
-
- __ mov(FieldOperand(shared, SharedFunctionInfo::kCodeOffset),
- target_builtin);
+ __ mov(FieldOperand(target, JSFunction::kCodeOffset), target_builtin);
__ push(eax); // Write barrier clobbers these below.
__ push(target_builtin);
- __ RecordWriteField(shared, SharedFunctionInfo::kCodeOffset, target_builtin,
- eax, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ pop(target_builtin); // eax is popped later, shared is now available.
-
- // And second to the target function.
-
- __ mov(FieldOperand(target, JSFunction::kCodeOffset), target_builtin);
- __ push(target_builtin); // Write barrier clobbers these below.
__ RecordWriteField(target, JSFunction::kCodeOffset, target_builtin, eax,
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ pop(target_builtin);
@@ -1845,9 +1913,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// -----------------------------------
Label generic_array_code;
- // Get the InternalArray function.
- __ LoadGlobalFunction(Context::INTERNAL_ARRAY_FUNCTION_INDEX, edi);
-
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray function should be a map.
__ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
@@ -1863,6 +1928,7 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
// tail call a stub
+ __ mov(ebx, masm->isolate()->factory()->undefined_value());
InternalArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -1870,15 +1936,12 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
+ // -- edi : array function
// -- esp[0] : return address
// -- esp[4] : last argument
// -----------------------------------
Label generic_array_code;
- // Get the Array function.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, edi);
- __ mov(edx, edi);
-
if (FLAG_debug_code) {
// Initial map for the builtin Array function should be a map.
__ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
@@ -1889,9 +1952,17 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ Assert(equal, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
- // Run the native code for the Array function called as a normal function.
- // tail call a stub
+ // ebx is the AllocationSite - here undefined.
__ mov(ebx, masm->isolate()->factory()->undefined_value());
+ // If edx (new target) is undefined, then this is the 'Call' case, so move
+ // edi (the constructor) to rdx.
+ Label call;
+ __ cmp(edx, ebx);
+ __ j(not_equal, &call);
+ __ mov(edx, edi);
+
+ // Run the native code for the Array function called as a normal function.
+ __ bind(&call);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -2115,7 +2186,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Check that the function is not a "classConstructor".
Label class_constructor;
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ test(FieldOperand(edx, SharedFunctionInfo::kCompilerHintsOffset),
+ __ test(FieldOperand(edx, SharedFunctionInfo::kFlagsOffset),
Immediate(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ j(not_zero, &class_constructor);
@@ -2125,7 +2196,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
- __ test(FieldOperand(edx, SharedFunctionInfo::kCompilerHintsOffset),
+ __ test(FieldOperand(edx, SharedFunctionInfo::kFlagsOffset),
Immediate(SharedFunctionInfo::IsNativeBit::kMask |
SharedFunctionInfo::IsStrictBit::kMask));
__ j(not_zero, &done_convert);
@@ -2368,18 +2439,27 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// -- edx : the new target (checked to be a constructor)
// -- edi : the constructor to call (checked to be a JSFunction)
// -----------------------------------
+ __ AssertConstructor(edi);
__ AssertFunction(edi);
// Calling convention for function specific ConstructStubs require
// ebx to contain either an AllocationSite or undefined.
__ LoadRoot(ebx, Heap::kUndefinedValueRootIndex);
- // Tail call to the function-specific construct stub (still in the caller
- // context at this point).
+ Label call_generic_stub;
+
+ // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
__ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kConstructStubOffset));
- __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
- __ jmp(ecx);
+ __ test(FieldOperand(ecx, SharedFunctionInfo::kFlagsOffset),
+ Immediate(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
+ __ j(zero, &call_generic_stub, Label::kNear);
+
+ __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&call_generic_stub);
+ __ Jump(masm->isolate()->builtins()->JSConstructStubGeneric(),
+ RelocInfo::CODE_TARGET);
}
// static
@@ -2389,6 +2469,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// -- edx : the new target (checked to be a constructor)
// -- edi : the constructor to call (checked to be a JSBoundFunction)
// -----------------------------------
+ __ AssertConstructor(edi);
__ AssertBoundFunction(edi);
// Push the [[BoundArguments]] onto the stack.
@@ -2421,16 +2502,17 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
Label non_constructor, non_proxy;
__ JumpIfSmi(edi, &non_constructor, Label::kNear);
- // Dispatch based on instance type.
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(equal, BUILTIN_CODE(masm->isolate(), ConstructFunction),
- RelocInfo::CODE_TARGET);
-
// Check if target has a [[Construct]] internal method.
+ __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
__ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
Immediate(Map::IsConstructorBit::kMask));
__ j(zero, &non_constructor, Label::kNear);
+ // Dispatch based on instance type.
+ __ CmpInstanceType(ecx, JS_FUNCTION_TYPE);
+ __ j(equal, BUILTIN_CODE(masm->isolate(), ConstructFunction),
+ RelocInfo::CODE_TARGET);
+
// Only dispatch to bound functions after checking whether they are
// constructors.
__ CmpInstanceType(ecx, JS_BOUND_FUNCTION_TYPE);
@@ -2680,10 +2762,12 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ auto wasm_instance_reg = esi; // TODO(titzer): put in a common place.
+
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- constexpr Register gp_regs[]{eax, ebx, ecx, edx, esi};
+ constexpr Register gp_regs[]{eax, ebx, ecx, edx};
constexpr XMMRegister xmm_regs[]{xmm1, xmm2, xmm3, xmm4, xmm5, xmm6};
for (auto reg : gp_regs) {
@@ -2694,12 +2778,16 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ movdqu(Operand(esp, 16 * i), xmm_regs[i]);
}
- // Initialize rsi register with kZero, CEntryStub will use it to set the
- // current context on the isolate.
+ // Pass the WASM instance as an explicit argument to WasmCompileLazy.
+ __ Push(wasm_instance_reg);
+ // Initialize the JavaScript context with 0. CEntryStub will use it to
+ // set the current context on the isolate.
__ Move(esi, Smi::kZero);
__ CallRuntime(Runtime::kWasmCompileLazy);
- // Store returned instruction start in edi.
- __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
+ // The entrypoint address is the first return value.
+ __ mov(edi, kReturnRegister0);
+ // The WASM instance is the second return value.
+ __ mov(wasm_instance_reg, kReturnRegister1);
// Restore registers.
for (int i = arraysize(xmm_regs) - 1; i >= 0; --i) {
@@ -2710,7 +2798,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ Pop(gp_regs[i]);
}
}
- // Now jump to the instructions of the returned code object.
+ // Finally, jump to the entrypoint.
__ jmp(edi);
}
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index 0d42834612..34faac0969 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -82,19 +82,6 @@ void Builtins::Generate_AdaptorWithBuiltinExitFrame(MacroAssembler* masm) {
AdaptorWithExitFrameType(masm, BUILTIN_EXIT);
}
-// Load the built-in InternalArray function from the current context.
-static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
- Register result) {
- // Load the InternalArray function from the native context.
- __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
-}
-
-// Load the built-in Array function from the current context.
-static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the Array function from the native context.
- __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
-}
-
void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
@@ -103,9 +90,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// -----------------------------------
Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
- // Get the InternalArray function.
- GenerateLoadInternalArrayFunction(masm, a1);
-
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
__ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
@@ -120,6 +104,7 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
// Tail call a stub.
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
InternalArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -127,29 +112,33 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
+ // -- a1 : array function
// -- ra : return address
// -- sp[...]: constructor arguments
// -----------------------------------
Label generic_array_code;
- // Get the Array function.
- GenerateLoadArrayFunction(masm, a1);
-
if (FLAG_debug_code) {
// Initial map for the builtin Array functions should be maps.
__ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
__ SmiTst(a2, t0);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction1, t0,
Operand(zero_reg));
- __ GetObjectType(a2, a3, t0);
+ __ GetObjectType(a2, t1, t0);
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction2, t0,
Operand(MAP_TYPE));
}
- // Run the native code for the Array function called as a normal function.
- // Tail call a stub.
- __ mov(a3, a1);
+ // a2 is the AllocationSite - here undefined.
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ // If a3 (new target) is undefined, then this is the 'Call' case, so move
+ // a1 (the constructor) to a3.
+ Label call;
+ __ Branch(&call, ne, a3, Operand(a2));
+ __ mov(a3, a1);
+
+ // Run the native code for the Array function called as a normal function.
+ __ bind(&call);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -280,7 +269,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -----------------------------------
__ lw(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(t2, FieldMemOperand(t2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ lw(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset));
__ And(t2, t2, Operand(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
__ Branch(&not_create_implicit_receiver, ne, t2, Operand(zero_reg));
@@ -401,7 +390,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ bind(&other_result);
__ lw(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ lw(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(t2, FieldMemOperand(t2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ lw(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset));
__ And(t2, t2, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
if (restrict_constructor_return) {
@@ -444,9 +433,6 @@ void Builtins::Generate_JSConstructStubGenericUnrestrictedReturn(
MacroAssembler* masm) {
Generate_JSConstructStubGeneric(masm, false);
}
-void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSBuiltinsConstructStubHelper(masm);
-}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
@@ -562,6 +548,19 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
+static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
+ Register sfi_data,
+ Register scratch1) {
+ Label done;
+
+ __ GetObjectType(sfi_data, scratch1, scratch1);
+ __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
+ __ lw(sfi_data,
+ FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
+
+ __ bind(&done);
+}
+
// static
void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -637,6 +636,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
__ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, a3, a0);
__ GetObjectType(a3, a3, a3);
__ Assert(eq, AbortReason::kMissingBytecodeArray, a3,
Operand(BYTECODE_ARRAY_TYPE));
@@ -737,7 +737,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
DCHECK(
!AreAliased(feedback_vector, a0, a1, a3, scratch1, scratch2, scratch3));
- Label optimized_code_slot_is_cell, fallthrough;
+ Label optimized_code_slot_is_weak_ref, fallthrough;
Register closure = a1;
Register optimized_code_entry = scratch1;
@@ -746,9 +746,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
- // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // optimisation marker. Otherwise, interpret it as a weak cell to a code
// object.
- __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
{
// Optimized code slot is a Smi optimization marker.
@@ -782,12 +782,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
{
- // Optimized code slot is a WeakCell.
- __ bind(&optimized_code_slot_is_cell);
+ // Optimized code slot is a weak reference.
+ __ bind(&optimized_code_slot_is_weak_ref);
- __ lw(optimized_code_entry,
- FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
- __ JumpIfSmi(optimized_code_entry, &fallthrough);
+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
@@ -919,6 +917,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ lw(a0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ lw(kInterpreterBytecodeArrayRegister,
FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, t0);
__ lw(t0, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
__ JumpIfNotSmi(t0, &maybe_load_debug_bytecode_array);
__ bind(&bytecode_array_loaded);
@@ -1041,12 +1040,30 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// kInterpreterBytecodeArrayRegister is already loaded with
// SharedFunctionInfo::kFunctionDataOffset.
__ bind(&maybe_load_debug_bytecode_array);
+ __ lw(t1, FieldMemOperand(t0, DebugInfo::kDebugBytecodeArrayOffset));
+ __ JumpIfRoot(t1, Heap::kUndefinedValueRootIndex, &bytecode_array_loaded);
+
+ __ mov(kInterpreterBytecodeArrayRegister, t1);
__ lw(t1, FieldMemOperand(t0, DebugInfo::kFlagsOffset));
__ SmiUntag(t1);
- __ And(t1, t1, Operand(DebugInfo::kHasBreakInfo));
- __ Branch(&bytecode_array_loaded, eq, t1, Operand(zero_reg));
- __ lw(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(t0, DebugInfo::kDebugBytecodeArrayOffset));
+ __ And(t1, t1, Operand(DebugInfo::kDebugExecutionMode));
+
+ ExternalReference debug_execution_mode =
+ ExternalReference::debug_execution_mode_address(masm->isolate());
+ __ li(t0, Operand(debug_execution_mode));
+ __ lb(t0, MemOperand(t0));
+ STATIC_ASSERT(static_cast<int>(DebugInfo::kDebugExecutionMode) ==
+ static_cast<int>(DebugInfo::kSideEffects));
+ __ Branch(&bytecode_array_loaded, eq, t0, Operand(t1));
+
+ __ push(closure);
+ __ push(feedback_vector);
+ __ push(kInterpreterBytecodeArrayRegister);
+ __ push(closure);
+ __ CallRuntime(Runtime::kDebugApplyInstrumentation);
+ __ pop(kInterpreterBytecodeArrayRegister);
+ __ pop(feedback_vector);
+ __ pop(closure);
__ Branch(&bytecode_array_loaded);
}
@@ -1090,6 +1107,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
InterpreterPushArgsMode mode) {
+ DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a2 : the address of the first argument to be pushed. Subsequent
@@ -1118,11 +1136,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
}
// Call the target.
- if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(
- masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny),
- RelocInfo::CODE_TARGET);
- } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
RelocInfo::CODE_TARGET);
} else {
@@ -1165,14 +1179,13 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
__ AssertUndefinedOrAllocationSite(a2, t0);
}
- if (mode == InterpreterPushArgsMode::kJSFunction) {
+ if (mode == InterpreterPushArgsMode::kArrayFunction) {
__ AssertFunction(a1);
- // Tail call to the function-specific construct stub (still in the caller
+ // Tail call to the array construct stub (still in the caller
// context at this point).
- __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kConstructStubOffset));
- __ Jump(at, t0, Code::kHeaderSize - kHeapObjectTag);
+ ArrayConstructorStub array_constructor_stub(masm->isolate());
+ __ Jump(array_constructor_stub.GetCode(), RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with a0, a1, and a3 unmodified.
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
@@ -1194,10 +1207,28 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
+ Label builtin_trampoline, trampoline_loaded;
Smi* interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
+
+ // If the SFI function_data is an InterpreterData, get the trampoline stored
+ // in it, otherwise get the trampoline from the builtins list.
+ __ lw(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ lw(t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset));
+ __ GetObjectType(t0, kInterpreterDispatchTableRegister,
+ kInterpreterDispatchTableRegister);
+ __ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister,
+ Operand(INTERPRETER_DATA_TYPE));
+
+ __ lw(t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
+ __ Branch(&trampoline_loaded);
+
+ __ bind(&builtin_trampoline);
__ li(t0, Operand(BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline)));
+
+ __ bind(&trampoline_loaded);
__ Addu(ra, t0, Operand(interpreter_entry_return_pc_offset->value() +
Code::kHeaderSize - kHeapObjectTag));
@@ -1272,43 +1303,9 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
-void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : argument count (preserved for callee)
- // -- a3 : new target (preserved for callee)
- // -- a1 : target function (preserved for callee)
- // -----------------------------------
- Register closure = a1;
-
- // Get the feedback vector.
- Register feedback_vector = a2;
- __ lw(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
-
- // The feedback vector must be defined.
- if (FLAG_debug_code) {
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Assert(ne, AbortReason::kExpectedFeedbackVector, feedback_vector,
- Operand(at));
- }
-
- // Is there an optimization marker or optimized code in the feedback vector?
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, t0, t3, t1);
-
- // Otherwise, tail call the SFI code.
- static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
- __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
- __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag);
- __ Jump(a2);
-}
-
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
- // Set the code slot inside the JSFunction to the trampoline to the
- // interpreter entry.
- __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
+ // Set the code slot inside the JSFunction to CompileLazy.
+ __ Move(a2, BUILTIN_CODE(masm->isolate(), CompileLazy));
__ sw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ RecordWriteField(a1, JSFunction::kCodeOffset, a2, t0, kRAHasNotBeenSaved,
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
@@ -1316,6 +1313,76 @@ void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
Generate_CompileLazy(masm);
}
+static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data,
+ Register scratch1) {
+ // Figure out the SFI's code object.
+ Label done;
+ Label check_is_bytecode_array;
+ Label check_is_code;
+ Label check_is_fixed_array;
+ Label check_is_pre_parsed_scope_data;
+ Label check_is_function_template_info;
+ Label check_is_interpreter_data;
+
+ Register data_type = scratch1;
+
+ // IsSmi: Is builtin
+ __ JumpIfNotSmi(sfi_data, &check_is_bytecode_array);
+ __ li(scratch1,
+ Operand(ExternalReference::builtins_address(masm->isolate())));
+ // Avoid untagging the Smi.
+ STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
+ STATIC_ASSERT(kSmiShiftSize == 0);
+ __ Lsa(scratch1, scratch1, sfi_data, kPointerSizeLog2 - kSmiTagSize);
+ __ lw(sfi_data, MemOperand(scratch1));
+ __ Branch(&done);
+
+ // Get map for subsequent checks.
+ __ bind(&check_is_bytecode_array);
+ __ lw(data_type, FieldMemOperand(sfi_data, HeapObject::kMapOffset));
+ __ lhu(data_type, FieldMemOperand(data_type, Map::kInstanceTypeOffset));
+
+ // IsBytecodeArray: Interpret bytecode
+ __ Branch(&check_is_code, ne, data_type, Operand(BYTECODE_ARRAY_TYPE));
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+ __ Branch(&done);
+
+ // IsCode: Run code
+ __ bind(&check_is_code);
+ __ Branch(&done, eq, data_type, Operand(CODE_TYPE));
+
+ // IsFixedArray: Instantiate using AsmWasmData,
+ __ bind(&check_is_fixed_array);
+ __ Branch(&check_is_pre_parsed_scope_data, ne, data_type,
+ Operand(FIXED_ARRAY_TYPE));
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InstantiateAsmJs));
+ __ Branch(&done);
+
+ // IsPreParsedScopeData: Compile lazy
+ __ bind(&check_is_pre_parsed_scope_data);
+ __ Branch(&check_is_function_template_info, ne, data_type,
+ Operand(TUPLE2_TYPE));
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), CompileLazy));
+ __ Branch(&done);
+
+ // IsFunctionTemplateInfo: API call
+ __ bind(&check_is_function_template_info);
+ __ Branch(&check_is_interpreter_data, ne, data_type,
+ Operand(FUNCTION_TEMPLATE_INFO_TYPE));
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
+
+ // IsInterpreterData: Interpret bytecode
+ __ bind(&check_is_interpreter_data);
+ if (FLAG_debug_code) {
+ __ Assert(eq, AbortReason::kInvalidSharedFunctionInfoData, data_type,
+ Operand(INTERPRETER_DATA_TYPE));
+ }
+ __ lw(sfi_data, FieldMemOperand(
+ sfi_data, InterpreterData::kInterpreterTrampolineOffset));
+
+ __ bind(&done);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)
@@ -1338,12 +1405,13 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, t0, t3, t1);
- // We found no optimized code.
+ // We found no optimized code. Infer the code object needed for the SFI.
Register entry = t0;
__ lw(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(entry, FieldMemOperand(entry, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoCode(masm, entry, t1);
- // If SFI points to anything other than CompileLazy, install that.
- __ lw(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
+ // If code entry points to anything other than CompileLazy, install that.
__ Move(t1, masm->CodeObject());
__ Branch(&gotta_call_runtime, eq, entry, Operand(t1));
@@ -1411,25 +1479,9 @@ void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
{
// If we've reached this spot, the target builtin has been deserialized and
- // we simply need to copy it over. First to the shared function info.
+ // we simply need to copy it over to the target function.
Register target_builtin = scratch1;
- Register shared = scratch0;
-
- __ lw(shared,
- FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
-
- CHECK(t1 != target && t1 != scratch0 && t1 != scratch1);
- CHECK(t3 != target && t3 != scratch0 && t3 != scratch1);
-
- __ sw(target_builtin,
- FieldMemOperand(shared, SharedFunctionInfo::kCodeOffset));
- __ mov(t3, target_builtin); // Write barrier clobbers t3 below.
- __ RecordWriteField(shared, SharedFunctionInfo::kCodeOffset, t3, t1,
- kRAHasNotBeenSaved, kDontSaveFPRegs,
- OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // And second to the target function.
__ sw(target_builtin, FieldMemOperand(target, JSFunction::kCodeOffset));
__ mov(t3, target_builtin); // Write barrier clobbers t3 below.
@@ -2006,7 +2058,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Check that the function is not a "classConstructor".
Label class_constructor;
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
__ And(at, a3, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ Branch(&class_constructor, ne, at, Operand(zero_reg));
@@ -2016,7 +2068,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
- __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
__ And(at, a3,
Operand(SharedFunctionInfo::IsNativeBit::kMask |
SharedFunctionInfo::IsStrictBit::kMask));
@@ -2238,17 +2290,27 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// -- a1 : the constructor to call (checked to be a JSFunction)
// -- a3 : the new target (checked to be a constructor)
// -----------------------------------
+ __ AssertConstructor(a1);
__ AssertFunction(a1);
// Calling convention for function specific ConstructStubs require
// a2 to contain either an AllocationSite or undefined.
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- // Tail call to the function-specific construct stub (still in the caller
- // context at this point).
+ Label call_generic_stub;
+
+ // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
__ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kConstructStubOffset));
- __ Jump(at, t0, Code::kHeaderSize - kHeapObjectTag);
+ __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kFlagsOffset));
+ __ And(t0, t0, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
+ __ Branch(&call_generic_stub, eq, t0, Operand(zero_reg));
+
+ __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&call_generic_stub);
+ __ Jump(masm->isolate()->builtins()->JSConstructStubGeneric(),
+ RelocInfo::CODE_TARGET);
}
// static
@@ -2258,6 +2320,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// -- a1 : the function to call (checked to be a JSBoundFunction)
// -- a3 : the new target (checked to be a constructor)
// -----------------------------------
+ __ AssertConstructor(a1);
__ AssertBoundFunction(a1);
// Load [[BoundArguments]] into a2 and length of that into t0.
@@ -2352,16 +2415,17 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
Label non_constructor, non_proxy;
__ JumpIfSmi(a1, &non_constructor);
- // Dispatch based on instance type.
- __ GetObjectType(a1, t1, t2);
- __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
- RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
-
// Check if target has a [[Construct]] internal method.
+ __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
__ lbu(t3, FieldMemOperand(t1, Map::kBitFieldOffset));
__ And(t3, t3, Operand(Map::IsConstructorBit::kMask));
__ Branch(&non_constructor, eq, t3, Operand(zero_reg));
+ // Dispatch based on instance type.
+ __ lhu(t2, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
+ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
+
// Only dispatch to bound functions after checking whether they are
// constructors.
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
@@ -2566,24 +2630,32 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ auto wasm_instance_reg = a0; // TODO(titzer): put in a common place.
+
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- constexpr RegList gp_regs = Register::ListOf<a0, a1, a2, a3>();
+ constexpr RegList gp_regs = Register::ListOf<a1, a2, a3>();
constexpr RegList fp_regs =
DoubleRegister::ListOf<f2, f4, f6, f8, f10, f12, f14>();
__ MultiPush(gp_regs);
__ MultiPushFPU(fp_regs);
+ // Pass the WASM instance as an explicit argument to WasmCompileLazy.
+ __ push(wasm_instance_reg);
+ // Initialize the JavaScript context with 0. CEntryStub will use it to
+ // set the current context on the isolate.
__ Move(kContextRegister, Smi::kZero);
__ CallRuntime(Runtime::kWasmCompileLazy);
+ // The WASM instance is the second return value.
+ __ mov(wasm_instance_reg, kReturnRegister1);
// Restore registers.
__ MultiPopFPU(fp_regs);
__ MultiPop(gp_regs);
}
- // Now jump to the instructions of the returned code object.
- __ Jump(at, v0, Code::kHeaderSize - kHeapObjectTag);
+ // Finally, jump to the entrypoint.
+ __ Jump(at, v0, 0);
}
#undef __
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index 15fdfc3d7d..7756872b14 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -82,19 +82,6 @@ void Builtins::Generate_AdaptorWithBuiltinExitFrame(MacroAssembler* masm) {
AdaptorWithExitFrameType(masm, BUILTIN_EXIT);
}
-// Load the built-in InternalArray function from the current context.
-static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
- Register result) {
- // Load the InternalArray function from the native context.
- __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
-}
-
-// Load the built-in Array function from the current context.
-static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the Array function from the native context.
- __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
-}
-
void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
@@ -103,9 +90,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// -----------------------------------
Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
- // Get the InternalArray function.
- GenerateLoadInternalArrayFunction(masm, a1);
-
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
__ Ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
@@ -120,6 +104,7 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
// Tail call a stub.
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
InternalArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -127,29 +112,33 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
+ // -- a1 : array function
// -- ra : return address
// -- sp[...]: constructor arguments
// -----------------------------------
Label generic_array_code;
- // Get the Array function.
- GenerateLoadArrayFunction(masm, a1);
-
if (FLAG_debug_code) {
// Initial map for the builtin Array functions should be maps.
__ Ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
__ SmiTst(a2, a4);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction1, a4,
Operand(zero_reg));
- __ GetObjectType(a2, a3, a4);
+ __ GetObjectType(a2, t0, a4);
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction2, a4,
Operand(MAP_TYPE));
}
- // Run the native code for the Array function called as a normal function.
- // Tail call a stub.
- __ mov(a3, a1);
+ // a2 is the AllocationSite - here undefined.
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ // If a3 (new target) is undefined, then this is the 'Call' case, so move
+ // a1 (the constructor) to a3.
+ Label call;
+ __ Branch(&call, ne, a3, Operand(a2));
+ __ mov(a3, a1);
+
+ // Run the native code for the Array function called as a normal function.
+ __ bind(&call);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -280,7 +269,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -----------------------------------
__ Ld(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lwu(t2, FieldMemOperand(t2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ lwu(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset));
__ And(t2, t2, Operand(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
__ Branch(&not_create_implicit_receiver, ne, t2, Operand(zero_reg));
@@ -401,7 +390,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ bind(&other_result);
__ Ld(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ Ld(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lwu(t2, FieldMemOperand(t2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ lwu(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset));
__ And(t2, t2, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
if (restrict_constructor_return) {
@@ -445,13 +434,23 @@ void Builtins::Generate_JSConstructStubGenericUnrestrictedReturn(
MacroAssembler* masm) {
Generate_JSConstructStubGeneric(masm, false);
}
-void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSBuiltinsConstructStubHelper(masm);
-}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
+static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
+ Register sfi_data,
+ Register scratch1) {
+ Label done;
+
+ __ GetObjectType(sfi_data, scratch1, scratch1);
+ __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
+ __ Ld(sfi_data,
+ FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
+
+ __ bind(&done);
+}
+
// static
void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -526,6 +525,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ Ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
__ Ld(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, a3, a0);
__ GetObjectType(a3, a3, a3);
__ Assert(eq, AbortReason::kMissingBytecodeArray, a3,
Operand(BYTECODE_ARRAY_TYPE));
@@ -734,7 +734,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
DCHECK(
!AreAliased(feedback_vector, a0, a1, a3, scratch1, scratch2, scratch3));
- Label optimized_code_slot_is_cell, fallthrough;
+ Label optimized_code_slot_is_weak_ref, fallthrough;
Register closure = a1;
Register optimized_code_entry = scratch1;
@@ -743,9 +743,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
- // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // optimisation marker. Otherwise, interpret it as a weak reference to a code
// object.
- __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
{
// Optimized code slot is a Smi optimization marker.
@@ -779,12 +779,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
{
- // Optimized code slot is a WeakCell.
- __ bind(&optimized_code_slot_is_cell);
+ // Optimized code slot is a weak reference.
+ __ bind(&optimized_code_slot_is_weak_ref);
- __ Ld(optimized_code_entry,
- FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
- __ JumpIfSmi(optimized_code_entry, &fallthrough);
+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
@@ -916,6 +914,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Ld(a0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ld(kInterpreterBytecodeArrayRegister,
FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, a4);
__ Ld(a4, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
__ JumpIfNotSmi(a4, &maybe_load_debug_bytecode_array);
__ bind(&bytecode_array_loaded);
@@ -1039,12 +1038,30 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// kInterpreterBytecodeArrayRegister is already loaded with
// SharedFunctionInfo::kFunctionDataOffset.
__ bind(&maybe_load_debug_bytecode_array);
+ __ Ld(a5, FieldMemOperand(a4, DebugInfo::kDebugBytecodeArrayOffset));
+ __ JumpIfRoot(a5, Heap::kUndefinedValueRootIndex, &bytecode_array_loaded);
+
+ __ mov(kInterpreterBytecodeArrayRegister, a5);
__ Ld(a5, FieldMemOperand(a4, DebugInfo::kFlagsOffset));
__ SmiUntag(a5);
- __ And(a5, a5, Operand(DebugInfo::kHasBreakInfo));
- __ Branch(&bytecode_array_loaded, eq, a5, Operand(zero_reg));
- __ Ld(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(a4, DebugInfo::kDebugBytecodeArrayOffset));
+ __ And(a5, a5, Operand(DebugInfo::kDebugExecutionMode));
+
+ ExternalReference debug_execution_mode =
+ ExternalReference::debug_execution_mode_address(masm->isolate());
+ __ li(a4, Operand(debug_execution_mode));
+ __ Lb(a4, MemOperand(a4));
+ STATIC_ASSERT(static_cast<int>(DebugInfo::kDebugExecutionMode) ==
+ static_cast<int>(DebugInfo::kSideEffects));
+ __ Branch(&bytecode_array_loaded, eq, a4, Operand(a5));
+
+ __ push(closure);
+ __ push(feedback_vector);
+ __ push(kInterpreterBytecodeArrayRegister);
+ __ push(closure);
+ __ CallRuntime(Runtime::kDebugApplyInstrumentation);
+ __ pop(kInterpreterBytecodeArrayRegister);
+ __ pop(feedback_vector);
+ __ pop(closure);
__ Branch(&bytecode_array_loaded);
}
@@ -1087,6 +1104,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
InterpreterPushArgsMode mode) {
+ DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a2 : the address of the first argument to be pushed. Subsequent
@@ -1115,11 +1133,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
}
// Call the target.
- if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(
- masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny),
- RelocInfo::CODE_TARGET);
- } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
RelocInfo::CODE_TARGET);
} else {
@@ -1162,15 +1176,13 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
__ AssertUndefinedOrAllocationSite(a2, t0);
}
- if (mode == InterpreterPushArgsMode::kJSFunction) {
+ if (mode == InterpreterPushArgsMode::kArrayFunction) {
__ AssertFunction(a1);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
- __ Ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ Ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
- __ Daddu(at, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
+ ArrayConstructorStub array_constructor_stub(masm->isolate());
+ __ Jump(array_constructor_stub.GetCode(), RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with a0, a1, and a3 unmodified.
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
@@ -1192,10 +1204,28 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
+ Label builtin_trampoline, trampoline_loaded;
Smi* interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
+
+ // If the SFI function_data is an InterpreterData, get the trampoline stored
+ // in it, otherwise get the trampoline from the builtins list.
+ __ Ld(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ Ld(t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset));
+ __ GetObjectType(t0, kInterpreterDispatchTableRegister,
+ kInterpreterDispatchTableRegister);
+ __ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister,
+ Operand(INTERPRETER_DATA_TYPE));
+
+ __ Ld(t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
+ __ Branch(&trampoline_loaded);
+
+ __ bind(&builtin_trampoline);
__ li(t0, Operand(BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline)));
+
+ __ bind(&trampoline_loaded);
__ Daddu(ra, t0, Operand(interpreter_entry_return_pc_offset->value() +
Code::kHeaderSize - kHeapObjectTag));
@@ -1270,43 +1300,9 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
-void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : argument count (preserved for callee)
- // -- a3 : new target (preserved for callee)
- // -- a1 : target function (preserved for callee)
- // -----------------------------------
- Register closure = a1;
-
- // Get the feedback vector.
- Register feedback_vector = a2;
- __ Ld(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
-
- // The feedback vector must be defined.
- if (FLAG_debug_code) {
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Assert(ne, AbortReason::kExpectedFeedbackVector, feedback_vector,
- Operand(at));
- }
-
- // Is there an optimization marker or optimized code in the feedback vector?
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, a4, t3, a5);
-
- // Otherwise, tail call the SFI code.
- static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
- __ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ Ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
- __ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(a2);
-}
-
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
- // Set the code slot inside the JSFunction to the trampoline to the
- // interpreter entry.
- __ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ Ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
+ // Set the code slot inside the JSFunction to CompileLazy.
+ __ Move(a2, BUILTIN_CODE(masm->isolate(), CompileLazy));
__ Sd(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ RecordWriteField(a1, JSFunction::kCodeOffset, a2, a4, kRAHasNotBeenSaved,
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
@@ -1314,6 +1310,76 @@ void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
Generate_CompileLazy(masm);
}
+static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data,
+ Register scratch1) {
+ // Figure out the SFI's code object.
+ Label done;
+ Label check_is_bytecode_array;
+ Label check_is_code;
+ Label check_is_fixed_array;
+ Label check_is_pre_parsed_scope_data;
+ Label check_is_function_template_info;
+ Label check_is_interpreter_data;
+
+ Register data_type = scratch1;
+
+ // IsSmi: Is builtin
+ __ JumpIfNotSmi(sfi_data, &check_is_bytecode_array);
+ __ li(scratch1,
+ Operand(ExternalReference::builtins_address(masm->isolate())));
+ // Avoid untagging the Smi by merging the shift
+ STATIC_ASSERT(kPointerSizeLog2 < kSmiShift);
+ __ dsrl(sfi_data, sfi_data, kSmiShift - kPointerSizeLog2);
+ __ Daddu(scratch1, scratch1, sfi_data);
+ __ Ld(sfi_data, MemOperand(scratch1));
+ __ Branch(&done);
+
+ // Get map for subsequent checks.
+ __ bind(&check_is_bytecode_array);
+ __ Ld(data_type, FieldMemOperand(sfi_data, HeapObject::kMapOffset));
+ __ Lhu(data_type, FieldMemOperand(data_type, Map::kInstanceTypeOffset));
+
+ // IsBytecodeArray: Interpret bytecode
+ __ Branch(&check_is_code, ne, data_type, Operand(BYTECODE_ARRAY_TYPE));
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+ __ Branch(&done);
+
+ // IsCode: Run code
+ __ bind(&check_is_code);
+ __ Branch(&done, eq, data_type, Operand(CODE_TYPE));
+
+ // IsFixedArray: Instantiate using AsmWasmData,
+ __ bind(&check_is_fixed_array);
+ __ Branch(&check_is_pre_parsed_scope_data, ne, data_type,
+ Operand(FIXED_ARRAY_TYPE));
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InstantiateAsmJs));
+ __ Branch(&done);
+
+ // IsPreParsedScopeData: Compile lazy
+ __ bind(&check_is_pre_parsed_scope_data);
+ __ Branch(&check_is_function_template_info, ne, data_type,
+ Operand(TUPLE2_TYPE));
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), CompileLazy));
+ __ Branch(&done);
+
+ // IsFunctionTemplateInfo: API call
+ __ bind(&check_is_function_template_info);
+ __ Branch(&check_is_interpreter_data, ne, data_type,
+ Operand(FUNCTION_TEMPLATE_INFO_TYPE));
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
+
+ // IsInterpreterData: Interpret bytecode
+ __ bind(&check_is_interpreter_data);
+ if (FLAG_debug_code) {
+ __ Assert(eq, AbortReason::kInvalidSharedFunctionInfoData, data_type,
+ Operand(INTERPRETER_DATA_TYPE));
+ }
+ __ Ld(sfi_data, FieldMemOperand(
+ sfi_data, InterpreterData::kInterpreterTrampolineOffset));
+
+ __ bind(&done);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)
@@ -1336,12 +1402,12 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, a4, t3, a5);
- // We found no optimized code.
+ // We found no optimized code. Infer the code object needed for the SFI.
Register entry = a4;
__ Ld(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(entry, FieldMemOperand(entry, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoCode(masm, entry, t1);
- // If SFI points to anything other than CompileLazy, install that.
- __ Ld(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
__ Move(t1, masm->CodeObject());
__ Branch(&gotta_call_runtime, eq, entry, Operand(t1));
@@ -1410,25 +1476,9 @@ void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
{
// If we've reached this spot, the target builtin has been deserialized and
- // we simply need to copy it over. First to the shared function info.
+ // we simply need to copy it over to the target function.
Register target_builtin = scratch1;
- Register shared = scratch0;
-
- __ Ld(shared,
- FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
-
- CHECK(t1 != target && t1 != scratch0 && t1 != scratch1);
- CHECK(t3 != target && t3 != scratch0 && t3 != scratch1);
-
- __ Sd(target_builtin,
- FieldMemOperand(shared, SharedFunctionInfo::kCodeOffset));
- __ mov(t3, target_builtin); // Write barrier clobbers t3 below.
- __ RecordWriteField(shared, SharedFunctionInfo::kCodeOffset, t3, t1,
- kRAHasNotBeenSaved, kDontSaveFPRegs,
- OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // And second to the target function.
__ Sd(target_builtin, FieldMemOperand(target, JSFunction::kCodeOffset));
__ mov(t3, target_builtin); // Write barrier clobbers t3 below.
@@ -2031,7 +2081,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Check that function is not a "classConstructor".
Label class_constructor;
__ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
__ And(at, a3, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ Branch(&class_constructor, ne, at, Operand(zero_reg));
@@ -2041,7 +2091,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
- __ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
__ And(at, a3,
Operand(SharedFunctionInfo::IsNativeBit::kMask |
SharedFunctionInfo::IsStrictBit::kMask));
@@ -2259,18 +2309,27 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// -- a1 : the constructor to call (checked to be a JSFunction)
// -- a3 : the new target (checked to be a constructor)
// -----------------------------------
+ __ AssertConstructor(a1);
__ AssertFunction(a1);
// Calling convention for function specific ConstructStubs require
// a2 to contain either an AllocationSite or undefined.
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- // Tail call to the function-specific construct stub (still in the caller
- // context at this point).
+ Label call_generic_stub;
+
+ // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
__ Ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ Ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
- __ Daddu(at, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
+ __ lwu(a4, FieldMemOperand(a4, SharedFunctionInfo::kFlagsOffset));
+ __ And(a4, a4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
+ __ Branch(&call_generic_stub, eq, a4, Operand(zero_reg));
+
+ __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&call_generic_stub);
+ __ Jump(masm->isolate()->builtins()->JSConstructStubGeneric(),
+ RelocInfo::CODE_TARGET);
}
// static
@@ -2280,6 +2339,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// -- a1 : the function to call (checked to be a JSBoundFunction)
// -- a3 : the new target (checked to be a constructor)
// -----------------------------------
+ __ AssertConstructor(a1);
__ AssertBoundFunction(a1);
// Load [[BoundArguments]] into a2 and length of that into a4.
@@ -2372,16 +2432,17 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
Label non_constructor, non_proxy;
__ JumpIfSmi(a1, &non_constructor);
- // Dispatch based on instance type.
- __ GetObjectType(a1, t1, t2);
- __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
- RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
-
// Check if target has a [[Construct]] internal method.
+ __ ld(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
__ Lbu(t3, FieldMemOperand(t1, Map::kBitFieldOffset));
__ And(t3, t3, Operand(Map::IsConstructorBit::kMask));
__ Branch(&non_constructor, eq, t3, Operand(zero_reg));
+ // Dispatch based on instance type.
+ __ Lhu(t2, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
+ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
+
// Only dispatch to bound functions after checking whether they are
// constructors.
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
@@ -2588,26 +2649,32 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ auto wasm_instance_reg = a0; // TODO(titzer): put in a common place.
+
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- constexpr RegList gp_regs =
- Register::ListOf<a0, a1, a2, a3, a4, a5, a6, a7>();
+ constexpr RegList gp_regs = Register::ListOf<a1, a2, a3, a4, a5, a6, a7>();
constexpr RegList fp_regs =
DoubleRegister::ListOf<f2, f4, f6, f8, f10, f12, f14>();
__ MultiPush(gp_regs);
__ MultiPushFPU(fp_regs);
+ // Pass the WASM instance as an explicit argument to WasmCompileLazy.
+ __ push(wasm_instance_reg);
+ // Initialize the JavaScript context with 0. CEntryStub will use it to
+ // set the current context on the isolate.
__ Move(kContextRegister, Smi::kZero);
__ CallRuntime(Runtime::kWasmCompileLazy);
+ // The WASM instance is the second return value.
+ __ mov(wasm_instance_reg, kReturnRegister1);
// Restore registers.
__ MultiPopFPU(fp_regs);
__ MultiPop(gp_regs);
}
- // Now jump to the instructions of the returned code object.
- __ Daddu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
+ // Finally, jump to the entrypoint.
+ __ Jump(v0);
}
#undef __
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index 9206920d45..2989d69a4b 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -81,19 +81,6 @@ void Builtins::Generate_AdaptorWithBuiltinExitFrame(MacroAssembler* masm) {
AdaptorWithExitFrameType(masm, BUILTIN_EXIT);
}
-// Load the built-in InternalArray function from the current context.
-static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
- Register result) {
- // Load the InternalArray function from the current native context.
- __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
-}
-
-// Load the built-in Array function from the current context.
-static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the Array function from the current native context.
- __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
-}
-
void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : number of arguments
@@ -102,9 +89,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// -----------------------------------
Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
- // Get the InternalArray function.
- GenerateLoadInternalArrayFunction(masm, r4);
-
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
@@ -118,6 +102,7 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
// tail call a stub
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
InternalArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -125,27 +110,32 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : number of arguments
+ // -- r4 : array function
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
- // Get the Array function.
- GenerateLoadArrayFunction(masm, r4);
-
if (FLAG_debug_code) {
// Initial map for the builtin Array functions should be maps.
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
__ TestIfSmi(r5, r0);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, cr0);
- __ CompareObjectType(r5, r6, r7, MAP_TYPE);
+ __ CompareObjectType(r5, r7, r8, MAP_TYPE);
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
+ // r5 is the AllocationSite - here undefined.
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ // If r6 (new target) is undefined, then this is the 'Call' case, so move
+ // r4 (the constructor) to r6.
+ Label call;
+ __ cmp(r6, r5);
+ __ bne(&call);
__ mr(r6, r4);
+
// Run the native code for the Array function called as a normal function.
- // tail call a stub
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ bind(&call);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -286,7 +276,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -----------------------------------
__ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kCompilerHintsOffset));
+ __ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
__ TestBitMask(r7, SharedFunctionInfo::IsDerivedConstructorBit::kMask, r0);
__ bne(&not_create_implicit_receiver, cr0);
@@ -413,7 +403,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// Throw if constructor function is a class constructor
__ LoadP(r7, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ LoadP(r7, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
- __ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kCompilerHintsOffset));
+ __ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
__ TestBitMask(r7, SharedFunctionInfo::IsClassConstructorBit::kMask, r0);
__ beq(&use_receiver, cr0);
@@ -458,13 +448,22 @@ void Builtins::Generate_JSConstructStubGenericUnrestrictedReturn(
MacroAssembler* masm) {
Generate_JSConstructStubGeneric(masm, false);
}
-void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSBuiltinsConstructStubHelper(masm);
-}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
+static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
+ Register sfi_data,
+ Register scratch1) {
+ Label done;
+
+ __ CompareObjectType(sfi_data, scratch1, scratch1, INTERPRETER_DATA_TYPE);
+ __ bne(&done);
+ __ LoadP(sfi_data,
+ FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
+ __ bind(&done);
+}
+
// static
void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -545,7 +544,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Underlying function needs to have bytecode available.
if (FLAG_debug_code) {
+ __ LoadP(r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, r6, r3);
__ CompareObjectType(r6, r6, r6, BYTECODE_ARRAY_TYPE);
__ Assert(eq, AbortReason::kMissingBytecodeArray);
}
@@ -751,7 +752,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
DCHECK(
!AreAliased(feedback_vector, r3, r4, r6, scratch1, scratch2, scratch3));
- Label optimized_code_slot_is_cell, fallthrough;
+ Label optimized_code_slot_is_weak_ref, fallthrough;
Register closure = r4;
Register optimized_code_entry = scratch1;
@@ -761,9 +762,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
- // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // optimisation marker. Otherwise, interpret it as a weak reference to a code
// object.
- __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
{
// Optimized code slot is a Smi optimization marker.
@@ -798,12 +799,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
{
- // Optimized code slot is a WeakCell.
- __ bind(&optimized_code_slot_is_cell);
+ // Optimized code slot is a weak reference.
+ __ bind(&optimized_code_slot_is_weak_ref);
- __ LoadP(optimized_code_entry,
- FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
- __ JumpIfSmi(optimized_code_entry, &fallthrough);
+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
@@ -825,7 +824,6 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
__ addi(r5, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
- __ mr(ip, r5);
__ Jump(r5);
// Optimized code slot contains deoptimized code, evict it and re-enter the
@@ -941,6 +939,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load original bytecode array or the debug copy.
__ LoadP(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, r7);
__ LoadP(r7, FieldMemOperand(r3, SharedFunctionInfo::kDebugInfoOffset));
__ TestIfSmi(r7, r0);
__ bne(&maybe_load_debug_bytecode_array, cr0);
@@ -1070,15 +1069,27 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load debug copy of the bytecode array if it exists.
// kInterpreterBytecodeArrayRegister is already loaded with
// SharedFunctionInfo::kFunctionDataOffset.
- Label done;
__ bind(&maybe_load_debug_bytecode_array);
+ __ LoadP(ip, FieldMemOperand(r7, DebugInfo::kDebugBytecodeArrayOffset));
+ __ JumpIfRoot(ip, Heap::kUndefinedValueRootIndex, &bytecode_array_loaded);
+
+ __ mr(kInterpreterBytecodeArrayRegister, ip);
__ LoadP(ip, FieldMemOperand(r7, DebugInfo::kFlagsOffset));
__ SmiUntag(ip);
- __ andi(r0, ip, Operand(DebugInfo::kHasBreakInfo));
- __ beq(&done, cr0);
- __ LoadP(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(r7, DebugInfo::kDebugBytecodeArrayOffset));
- __ bind(&done);
+ __ andi(ip, ip, Operand(DebugInfo::kDebugExecutionMode));
+
+ ExternalReference debug_execution_mode =
+ ExternalReference::debug_execution_mode_address(masm->isolate());
+ __ mov(r7, Operand(debug_execution_mode));
+ __ lwz(r7, MemOperand(r7));
+ STATIC_ASSERT(static_cast<int>(DebugInfo::kDebugExecutionMode) ==
+ static_cast<int>(DebugInfo::kSideEffects));
+ __ cmp(r7, ip);
+ __ beq(&bytecode_array_loaded);
+
+ __ Push(closure, feedback_vector, kInterpreterBytecodeArrayRegister, closure);
+ __ CallRuntime(Runtime::kDebugApplyInstrumentation);
+ __ Pop(closure, feedback_vector, kInterpreterBytecodeArrayRegister);
__ b(&bytecode_array_loaded);
}
@@ -1117,6 +1128,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
InterpreterPushArgsMode mode) {
+ DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r5 : the address of the first argument to be pushed. Subsequent
@@ -1146,11 +1158,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
}
// Call the target.
- if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(
- masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny),
- RelocInfo::CODE_TARGET);
- } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
RelocInfo::CODE_TARGET);
} else {
@@ -1196,16 +1204,13 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
} else {
__ AssertUndefinedOrAllocationSite(r5, r8);
}
- if (mode == InterpreterPushArgsMode::kJSFunction) {
+ if (mode == InterpreterPushArgsMode::kArrayFunction) {
__ AssertFunction(r4);
- // Tail call to the function-specific construct stub (still in the caller
+ // Tail call to the array construct stub (still in the caller
// context at this point).
- __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kConstructStubOffset));
- // Jump to the construct function.
- __ addi(ip, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(ip);
+ ArrayConstructorStub array_constructor_stub(masm->isolate());
+ __ Jump(array_constructor_stub.GetCode(), RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with r3, r4, and r6 unmodified.
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
@@ -1227,10 +1232,29 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
+ Label builtin_trampoline, trampoline_loaded;
Smi* interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
+
+ // If the SFI function_data is an InterpreterData, get the trampoline stored
+ // in it, otherwise get the trampoline from the builtins list.
+ __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ LoadP(r5, FieldMemOperand(r5, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
+ __ CompareObjectType(r5, kInterpreterDispatchTableRegister,
+ kInterpreterDispatchTableRegister,
+ INTERPRETER_DATA_TYPE);
+ __ bne(&builtin_trampoline);
+
+ __ LoadP(r5,
+ FieldMemOperand(r5, InterpreterData::kInterpreterTrampolineOffset));
+ __ b(&trampoline_loaded);
+
+ __ bind(&builtin_trampoline);
__ Move(r5, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+
+ __ bind(&trampoline_loaded);
__ addi(r0, r5, Operand(interpreter_entry_return_pc_offset->value() +
Code::kHeaderSize - kHeapObjectTag));
__ mtlr(r0);
@@ -1304,43 +1328,9 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
-void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : argument count (preserved for callee)
- // -- r6 : new target (preserved for callee)
- // -- r4 : target function (preserved for callee)
- // -----------------------------------
- Register closure = r4;
-
- // Get the feedback vector.
- Register feedback_vector = r5;
- __ LoadP(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ LoadP(feedback_vector,
- FieldMemOperand(feedback_vector, Cell::kValueOffset));
-
- // The feedback vector must be defined.
- if (FLAG_debug_code) {
- __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
- __ Assert(ne, AbortReason::kExpectedFeedbackVector);
- }
-
- // Is there an optimization marker or optimized code in the feedback vector?
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r7, r9, r8);
-
- // Otherwise, tail call the SFI code.
- static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
- __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kCodeOffset));
- __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(r5);
-}
-
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
- // Set the code slot inside the JSFunction to the trampoline to the
- // interpreter entry.
- __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kCodeOffset));
+ // Set the code slot inside the JSFunction to CompileLazy.
+ __ Move(r5, BUILTIN_CODE(masm->isolate(), CompileLazy));
__ StoreP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset), r0);
__ RecordWriteField(r4, JSFunction::kCodeOffset, r5, r7, kLRHasNotBeenSaved,
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
@@ -1348,6 +1338,79 @@ void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
Generate_CompileLazy(masm);
}
+static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data,
+ Register scratch1) {
+ // Figure out the SFI's code object.
+ Label done;
+ Label check_is_bytecode_array;
+ Label check_is_code;
+ Label check_is_fixed_array;
+ Label check_is_pre_parsed_scope_data;
+ Label check_is_function_template_info;
+ Label check_is_interpreter_data;
+
+ Register data_type = scratch1;
+
+ // IsSmi: Is builtin
+ __ JumpIfNotSmi(sfi_data, &check_is_bytecode_array);
+ __ mov(scratch1,
+ Operand(ExternalReference::builtins_address(masm->isolate())));
+ __ SmiUntag(sfi_data, LeaveRC, kPointerSizeLog2);
+ __ LoadPX(sfi_data, MemOperand(scratch1, sfi_data));
+ __ b(&done);
+
+ // Get map for subsequent checks.
+ __ bind(&check_is_bytecode_array);
+ __ LoadP(data_type, FieldMemOperand(sfi_data, HeapObject::kMapOffset));
+ __ LoadHalfWord(data_type,
+ FieldMemOperand(data_type, Map::kInstanceTypeOffset), r0);
+
+ // IsBytecodeArray: Interpret bytecode
+ __ cmpi(data_type, Operand(BYTECODE_ARRAY_TYPE));
+ __ bne(&check_is_code);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+ __ b(&done);
+
+ // IsCode: Run code
+ __ bind(&check_is_code);
+ __ cmpi(data_type, Operand(CODE_TYPE));
+ __ beq(&done);
+
+ // IsFixedArray: Instantiate using AsmWasmData,
+ __ bind(&check_is_fixed_array);
+ __ cmpi(data_type, Operand(FIXED_ARRAY_TYPE));
+ __ bne(&check_is_pre_parsed_scope_data);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InstantiateAsmJs));
+ __ b(&done);
+
+ // IsPreParsedScopeData: Compile lazy
+ __ bind(&check_is_pre_parsed_scope_data);
+ __ cmpi(data_type, Operand(TUPLE2_TYPE));
+ __ bne(&check_is_function_template_info);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), CompileLazy));
+ __ b(&done);
+
+ // IsFunctionTemplateInfo: API call
+ __ bind(&check_is_function_template_info);
+ __ cmpi(data_type, Operand(FUNCTION_TEMPLATE_INFO_TYPE));
+ __ bne(&check_is_interpreter_data);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
+ __ b(&done);
+
+ // IsInterpreterData: Interpret bytecode
+ __ bind(&check_is_interpreter_data);
+ if (FLAG_debug_code) {
+ __ cmpi(data_type, Operand(INTERPRETER_DATA_TYPE));
+ __ Assert(eq, AbortReason::kInvalidSharedFunctionInfoData);
+ }
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
+ __ LoadP(
+ sfi_data,
+ FieldMemOperand(sfi_data, InterpreterData::kInterpreterTrampolineOffset));
+
+ __ bind(&done);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : argument count (preserved for callee)
@@ -1371,13 +1434,15 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r7, r9, r8);
- // We found no optimized code.
+ // We found no optimized code. Infer the code object needed for the SFI.
Register entry = r7;
__ LoadP(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(entry,
+ FieldMemOperand(entry, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoCode(masm, entry, r8);
- // If SFI points to anything other than CompileLazy, install that.
- __ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
+ // If code entry points to anything other than CompileLazy, install that.
__ mov(r8, Operand(masm->CodeObject()));
__ cmp(entry, r8);
__ beq(&gotta_call_runtime);
@@ -1447,25 +1512,9 @@ void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
}
{
// If we've reached this spot, the target builtin has been deserialized and
- // we simply need to copy it over. First to the shared function info.
+ // we simply need to copy it over to the target function.
Register target_builtin = scratch1;
- Register shared = scratch0;
-
- __ LoadP(shared,
- FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
-
- CHECK(r8 != target && r8 != scratch0 && r8 != scratch1);
- CHECK(r9 != target && r9 != scratch0 && r9 != scratch1);
-
- __ StoreP(target_builtin,
- FieldMemOperand(shared, SharedFunctionInfo::kCodeOffset), r0);
- __ mr(r9, target_builtin); // Write barrier clobbers r9 below.
- __ RecordWriteField(shared, SharedFunctionInfo::kCodeOffset, r9, r8,
- kLRHasNotBeenSaved, kDontSaveFPRegs,
- OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // And second to the target function.
__ StoreP(target_builtin, FieldMemOperand(target, JSFunction::kCodeOffset),
r0);
@@ -2073,7 +2122,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Check that the function is not a "classConstructor".
Label class_constructor;
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kCompilerHintsOffset));
+ __ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kFlagsOffset));
__ TestBitMask(r6, SharedFunctionInfo::IsClassConstructorBit::kMask, r0);
__ bne(&class_constructor, cr0);
@@ -2325,18 +2374,28 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// -- r4 : the constructor to call (checked to be a JSFunction)
// -- r6 : the new target (checked to be a constructor)
// -----------------------------------
+ __ AssertConstructor(r4);
__ AssertFunction(r4);
// Calling convention for function specific ConstructStubs require
// r5 to contain either an AllocationSite or undefined.
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- // Tail call to the function-specific construct stub (still in the caller
- // context at this point).
+ Label call_generic_stub;
+
+ // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
__ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kConstructStubOffset));
- __ addi(ip, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(ip);
+ __ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
+ __ mov(ip, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
+ __ and_(r7, r7, ip, SetRC);
+ __ beq(&call_generic_stub, cr0);
+
+ __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&call_generic_stub);
+ __ Jump(masm->isolate()->builtins()->JSConstructStubGeneric(),
+ RelocInfo::CODE_TARGET);
}
// static
@@ -2346,6 +2405,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// -- r4 : the function to call (checked to be a JSBoundFunction)
// -- r6 : the new target (checked to be a constructor)
// -----------------------------------
+ __ AssertConstructor(r4);
__ AssertBoundFunction(r4);
// Push the [[BoundArguments]] onto the stack.
@@ -2378,16 +2438,17 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
Label non_constructor, non_proxy;
__ JumpIfSmi(r4, &non_constructor);
- // Dispatch based on instance type.
- __ CompareObjectType(r4, r7, r8, JS_FUNCTION_TYPE);
- __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
- RelocInfo::CODE_TARGET, eq);
-
// Check if target has a [[Construct]] internal method.
+ __ LoadP(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
__ lbz(r5, FieldMemOperand(r7, Map::kBitFieldOffset));
__ TestBit(r5, Map::IsConstructorBit::kShift, r0);
__ beq(&non_constructor, cr0);
+ // Dispatch based on instance type.
+ __ CompareInstanceType(r7, r8, JS_FUNCTION_TYPE);
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
+ RelocInfo::CODE_TARGET, eq);
+
// Only dispatch to bound functions after checking whether they are
// constructors.
__ cmpi(r8, Operand(JS_BOUND_FUNCTION_TYPE));
@@ -2592,28 +2653,33 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ auto wasm_instance_reg = r10; // TODO(titzer): put in a common place.
+
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- constexpr RegList gp_regs =
- Register::ListOf<r3, r4, r5, r6, r7, r8, r9, r10>();
+ constexpr RegList gp_regs = Register::ListOf<r3, r4, r5, r6, r7, r8, r9>();
constexpr RegList fp_regs =
DoubleRegister::ListOf<d1, d2, d3, d4, d5, d6, d7, d8>();
__ MultiPush(gp_regs);
__ MultiPushDoubles(fp_regs);
- // Initialize cp register with kZero, CEntryStub will use it to set the
- // current context on the isolate.
+ // Pass the WASM instance as an explicit argument to WasmCompileLazy.
+ __ Push(wasm_instance_reg);
+ // Initialize the JavaScript context with 0. CEntryStub will use it to
+ // set the current context on the isolate.
__ LoadSmiLiteral(cp, Smi::kZero);
__ CallRuntime(Runtime::kWasmCompileLazy);
- // Store returned instruction start in r11.
- __ addi(r11, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // The entrypoint address is the first return value.
+ __ mr(r11, kReturnRegister0);
+ // The WASM instance is the second return value.
+ __ mr(wasm_instance_reg, kReturnRegister1);
// Restore registers.
__ MultiPopDoubles(fp_regs);
__ MultiPop(gp_regs);
}
- // Now jump to the instructions of the returned code object.
+ // Finally, jump to the entrypoint.
__ Jump(r11);
}
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index 86239e0052..bf7229ac69 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -81,13 +81,6 @@ void Builtins::Generate_AdaptorWithBuiltinExitFrame(MacroAssembler* masm) {
AdaptorWithExitFrameType(masm, BUILTIN_EXIT);
}
-// Load the built-in InternalArray function from the current context.
-static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
- Register result) {
- // Load the InternalArray function from the current native context.
- __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
-}
-
// Load the built-in Array function from the current context.
static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
// Load the Array function from the current native context.
@@ -102,9 +95,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// -----------------------------------
Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
- // Get the InternalArray function.
- GenerateLoadInternalArrayFunction(masm, r3);
-
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
@@ -118,6 +108,7 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
// tail call a stub
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
InternalArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -125,6 +116,7 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : number of arguments
+ // -- r3 : array function
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
@@ -138,14 +130,21 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
__ TestIfSmi(r4);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, cr0);
- __ CompareObjectType(r4, r5, r6, MAP_TYPE);
+ __ CompareObjectType(r4, r6, r7, MAP_TYPE);
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
+ // r4 is the AllocationSite - here undefined.
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ // If r5 (new target) is undefined, then this is the 'Call' case, so move
+ // r3 (the constructor) to r5.
+ Label call;
+ __ CmpP(r5, r4);
+ __ bne(&call);
__ LoadRR(r5, r3);
+
// Run the native code for the Array function called as a normal function.
- // tail call a stub
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ __ bind(&call);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -280,8 +279,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -----------------------------------
__ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
- __ LoadlW(r6,
- FieldMemOperand(r6, SharedFunctionInfo::kCompilerHintsOffset));
+ __ LoadlW(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
__ TestBitMask(r6, SharedFunctionInfo::IsDerivedConstructorBit::kMask, r0);
__ bne(&not_create_implicit_receiver);
@@ -406,8 +404,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// Throw if constructor function is a class constructor
__ LoadP(r6, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ LoadP(r6, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
- __ LoadlW(r6,
- FieldMemOperand(r6, SharedFunctionInfo::kCompilerHintsOffset));
+ __ LoadlW(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
__ TestBitMask(r6, SharedFunctionInfo::IsClassConstructorBit::kMask, r0);
__ beq(&use_receiver);
} else {
@@ -449,13 +446,22 @@ void Builtins::Generate_JSConstructStubGenericUnrestrictedReturn(
MacroAssembler* masm) {
Generate_JSConstructStubGeneric(masm, false);
}
-void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSBuiltinsConstructStubHelper(masm);
-}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
+static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
+ Register sfi_data,
+ Register scratch1) {
+ Label done;
+
+ __ CompareObjectType(sfi_data, scratch1, scratch1, INTERPRETER_DATA_TYPE);
+ __ bne(&done, Label::kNear);
+ __ LoadP(sfi_data,
+ FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
+ __ bind(&done);
+}
+
// static
void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -540,7 +546,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Underlying function needs to have bytecode available.
if (FLAG_debug_code) {
+ __ LoadP(r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, r5, r1);
__ CompareObjectType(r5, r5, r5, BYTECODE_ARRAY_TYPE);
__ Assert(eq, AbortReason::kMissingBytecodeArray);
}
@@ -754,7 +762,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
DCHECK(
!AreAliased(feedback_vector, r2, r3, r5, scratch1, scratch2, scratch3));
- Label optimized_code_slot_is_cell, fallthrough;
+ Label optimized_code_slot_is_weak_ref, fallthrough;
Register closure = r3;
Register optimized_code_entry = scratch1;
@@ -764,9 +772,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
- // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // optimisation marker. Otherwise, interpret it as a weak reference to a code
// object.
- __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
{
// Optimized code slot is a Smi optimization marker.
@@ -801,12 +809,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
{
- // Optimized code slot is a WeakCell.
- __ bind(&optimized_code_slot_is_cell);
+ // Optimized code slot is a weak reference.
+ __ bind(&optimized_code_slot_is_weak_ref);
- __ LoadP(optimized_code_entry,
- FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
- __ JumpIfSmi(optimized_code_entry, &fallthrough);
+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
@@ -944,6 +950,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load original bytecode array or the debug copy.
__ LoadP(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r2, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, r6);
__ LoadP(r6, FieldMemOperand(r2, SharedFunctionInfo::kDebugInfoOffset));
__ TestIfSmi(r6);
__ bne(&maybe_load_debug_bytecode_array);
@@ -1069,15 +1076,27 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load debug copy of the bytecode array if it exists.
// kInterpreterBytecodeArrayRegister is already loaded with
// SharedFunctionInfo::kFunctionDataOffset.
- Label done;
__ bind(&maybe_load_debug_bytecode_array);
+ __ LoadP(ip, FieldMemOperand(r6, DebugInfo::kDebugBytecodeArrayOffset));
+ __ JumpIfRoot(ip, Heap::kUndefinedValueRootIndex, &bytecode_array_loaded);
+
+ __ LoadRR(kInterpreterBytecodeArrayRegister, ip);
__ LoadP(ip, FieldMemOperand(r6, DebugInfo::kFlagsOffset));
__ SmiUntag(ip);
- __ tmll(ip, Operand(DebugInfo::kHasBreakInfo));
- __ beq(&done);
- __ LoadP(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(r6, DebugInfo::kDebugBytecodeArrayOffset));
- __ bind(&done);
+ __ AndP(ip, ip, Operand(DebugInfo::kDebugExecutionMode));
+
+ ExternalReference debug_execution_mode =
+ ExternalReference::debug_execution_mode_address(masm->isolate());
+ __ mov(r6, Operand(debug_execution_mode));
+ __ LoadW(r6, MemOperand(r6));
+ STATIC_ASSERT(static_cast<int>(DebugInfo::kDebugExecutionMode) ==
+ static_cast<int>(DebugInfo::kSideEffects));
+ __ CmpP(r6, ip);
+ __ beq(&bytecode_array_loaded);
+
+ __ Push(closure, feedback_vector, kInterpreterBytecodeArrayRegister, closure);
+ __ CallRuntime(Runtime::kDebugApplyInstrumentation);
+ __ Pop(closure, feedback_vector, kInterpreterBytecodeArrayRegister);
__ b(&bytecode_array_loaded);
}
@@ -1118,6 +1137,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
InterpreterPushArgsMode mode) {
+ DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
// ----------- S t a t e -------------
// -- r2 : the number of arguments (not including the receiver)
// -- r4 : the address of the first argument to be pushed. Subsequent
@@ -1145,11 +1165,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
}
// Call the target.
- if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(
- masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny),
- RelocInfo::CODE_TARGET);
- } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
RelocInfo::CODE_TARGET);
} else {
@@ -1195,16 +1211,13 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
} else {
__ AssertUndefinedOrAllocationSite(r4, r7);
}
- if (mode == InterpreterPushArgsMode::kJSFunction) {
+ if (mode == InterpreterPushArgsMode::kArrayFunction) {
__ AssertFunction(r3);
- // Tail call to the function-specific construct stub (still in the caller
+ // Tail call to the array construct stub (still in the caller
// context at this point).
- __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kConstructStubOffset));
- // Jump to the construct function.
- __ AddP(ip, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(ip);
+ ArrayConstructorStub array_constructor_stub(masm->isolate());
+ __ Jump(array_constructor_stub.GetCode(), RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with r2, r3, and r5 unmodified.
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
@@ -1226,10 +1239,29 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
+ Label builtin_trampoline, trampoline_loaded;
Smi* interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
+
+ // If the SFI function_data is an InterpreterData, get the trampoline stored
+ // in it, otherwise get the trampoline from the builtins list.
+ __ LoadP(r4, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ LoadP(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r4, FieldMemOperand(r4, SharedFunctionInfo::kFunctionDataOffset));
+ __ CompareObjectType(r4, kInterpreterDispatchTableRegister,
+ kInterpreterDispatchTableRegister,
+ INTERPRETER_DATA_TYPE);
+ __ bne(&builtin_trampoline);
+
+ __ LoadP(r4,
+ FieldMemOperand(r4, InterpreterData::kInterpreterTrampolineOffset));
+ __ b(&trampoline_loaded);
+
+ __ bind(&builtin_trampoline);
__ Move(r4, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+
+ __ bind(&trampoline_loaded);
__ AddP(r14, r4, Operand(interpreter_entry_return_pc_offset->value() +
Code::kHeaderSize - kHeapObjectTag));
@@ -1301,43 +1333,9 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
-void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : argument count (preserved for callee)
- // -- r6 : new target (preserved for callee)
- // -- r4 : target function (preserved for callee)
- // -----------------------------------
- Register closure = r3;
-
- // Get the feedback vector.
- Register feedback_vector = r4;
- __ LoadP(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ LoadP(feedback_vector,
- FieldMemOperand(feedback_vector, Cell::kValueOffset));
-
- // The feedback vector must be defined.
- if (FLAG_debug_code) {
- __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
- __ Assert(ne, AbortReason::kExpectedFeedbackVector);
- }
-
- // Is there an optimization marker or optimized code in the feedback vector?
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7);
-
- // Otherwise, tail call the SFI code.
- static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
- __ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r4, FieldMemOperand(r4, SharedFunctionInfo::kCodeOffset));
- __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(r4);
-}
-
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
- // Set the code slot inside the JSFunction to the trampoline to the
- // interpreter entry.
- __ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r4, FieldMemOperand(r4, SharedFunctionInfo::kCodeOffset));
+ // Set the code slot inside the JSFunction to CompileLazy.
+ __ Move(r4, BUILTIN_CODE(masm->isolate(), CompileLazy));
__ StoreP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
__ RecordWriteField(r3, JSFunction::kCodeOffset, r4, r6, kLRHasNotBeenSaved,
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
@@ -1345,6 +1343,79 @@ void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
Generate_CompileLazy(masm);
}
+static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data,
+ Register scratch1) {
+ // Figure out the SFI's code object.
+ Label done;
+ Label check_is_bytecode_array;
+ Label check_is_code;
+ Label check_is_fixed_array;
+ Label check_is_pre_parsed_scope_data;
+ Label check_is_function_template_info;
+ Label check_is_interpreter_data;
+
+ Register data_type = scratch1;
+
+ // IsSmi: Is builtin
+ __ JumpIfNotSmi(sfi_data, &check_is_bytecode_array);
+ __ mov(scratch1,
+ Operand(ExternalReference::builtins_address(masm->isolate())));
+ __ SmiUntag(sfi_data, kPointerSizeLog2);
+ __ LoadP(sfi_data, MemOperand(scratch1, sfi_data));
+ __ b(&done);
+
+ // Get map for subsequent checks.
+ __ bind(&check_is_bytecode_array);
+ __ LoadP(data_type, FieldMemOperand(sfi_data, HeapObject::kMapOffset));
+ __ LoadHalfWordP(data_type,
+ FieldMemOperand(data_type, Map::kInstanceTypeOffset));
+
+ // IsBytecodeArray: Interpret bytecode
+ __ CmpP(data_type, Operand(BYTECODE_ARRAY_TYPE));
+ __ bne(&check_is_code);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+ __ b(&done);
+
+ // IsCode: Run code
+ __ bind(&check_is_code);
+ __ CmpP(data_type, Operand(CODE_TYPE));
+ __ beq(&done);
+
+ // IsFixedArray: Instantiate using AsmWasmData,
+ __ bind(&check_is_fixed_array);
+ __ CmpP(data_type, Operand(FIXED_ARRAY_TYPE));
+ __ bne(&check_is_pre_parsed_scope_data);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InstantiateAsmJs));
+ __ b(&done);
+
+ // IsPreParsedScopeData: Compile lazy
+ __ bind(&check_is_pre_parsed_scope_data);
+ __ CmpP(data_type, Operand(TUPLE2_TYPE));
+ __ bne(&check_is_function_template_info);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), CompileLazy));
+ __ b(&done);
+
+ // IsFunctionTemplateInfo: API call
+ __ bind(&check_is_function_template_info);
+ __ CmpP(data_type, Operand(FUNCTION_TEMPLATE_INFO_TYPE));
+ __ bne(&check_is_interpreter_data);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
+ __ b(&done);
+
+ // IsInterpreterData: Interpret bytecode
+ __ bind(&check_is_interpreter_data);
+ if (FLAG_debug_code) {
+ __ CmpP(data_type, Operand(INTERPRETER_DATA_TYPE));
+ __ Assert(eq, AbortReason::kInvalidSharedFunctionInfoData);
+ }
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
+ __ LoadP(
+ sfi_data,
+ FieldMemOperand(sfi_data, InterpreterData::kInterpreterTrampolineOffset));
+
+ __ bind(&done);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : argument count (preserved for callee)
@@ -1368,13 +1439,15 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7);
- // We found no optimized code.
+ // We found no optimized code. Infer the code object needed for the SFI.
Register entry = r6;
__ LoadP(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(entry,
+ FieldMemOperand(entry, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoCode(masm, entry, r7);
- // If SFI points to anything other than CompileLazy, install that.
- __ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
+ // If code entry points to anything other than CompileLazy, install that.
__ mov(r7, Operand(masm->CodeObject()));
__ CmpP(entry, r7);
__ beq(&gotta_call_runtime);
@@ -1444,25 +1517,9 @@ void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
}
{
// If we've reached this spot, the target builtin has been deserialized and
- // we simply need to copy it over. First to the shared function info.
+ // we simply need to copy it over to the target function.
Register target_builtin = scratch1;
- Register shared = scratch0;
-
- __ LoadP(shared,
- FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
-
- CHECK(r7 != target && r7 != scratch0 && r7 != scratch1);
- CHECK(r8 != target && r8 != scratch0 && r8 != scratch1);
-
- __ StoreP(target_builtin,
- FieldMemOperand(shared, SharedFunctionInfo::kCodeOffset));
- __ LoadRR(r8, target_builtin); // Write barrier clobbers r9 below.
- __ RecordWriteField(shared, SharedFunctionInfo::kCodeOffset, r8, r7,
- kLRHasNotBeenSaved, kDontSaveFPRegs,
- OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // And second to the target function.
__ StoreP(target_builtin, FieldMemOperand(target, JSFunction::kCodeOffset));
__ LoadRR(r8, target_builtin); // Write barrier clobbers r9 below.
@@ -2070,7 +2127,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Check that the function is not a "classConstructor".
Label class_constructor;
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
- __ LoadlW(r5, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
+ __ LoadlW(r5, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
__ TestBitMask(r5, SharedFunctionInfo::IsClassConstructorBit::kMask, r0);
__ bne(&class_constructor);
@@ -2324,18 +2381,27 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// -- r3 : the constructor to call (checked to be a JSFunction)
// -- r5 : the new target (checked to be a constructor)
// -----------------------------------
+ __ AssertConstructor(r3, r1);
__ AssertFunction(r3);
// Calling convention for function specific ConstructStubs require
// r4 to contain either an AllocationSite or undefined.
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
- // Tail call to the function-specific construct stub (still in the caller
- // context at this point).
+ Label call_generic_stub;
+
+ // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
__ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kConstructStubOffset));
- __ AddP(ip, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(ip);
+ __ LoadlW(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
+ __ AndP(r6, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
+ __ beq(&call_generic_stub);
+
+ __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&call_generic_stub);
+ __ Jump(masm->isolate()->builtins()->JSConstructStubGeneric(),
+ RelocInfo::CODE_TARGET);
}
// static
@@ -2345,6 +2411,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// -- r3 : the function to call (checked to be a JSBoundFunction)
// -- r5 : the new target (checked to be a constructor)
// -----------------------------------
+ __ AssertConstructor(r3, r1);
__ AssertBoundFunction(r3);
// Push the [[BoundArguments]] onto the stack.
@@ -2377,16 +2444,17 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
Label non_constructor, non_proxy;
__ JumpIfSmi(r3, &non_constructor);
- // Dispatch based on instance type.
- __ CompareObjectType(r3, r6, r7, JS_FUNCTION_TYPE);
- __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
- RelocInfo::CODE_TARGET, eq);
-
// Check if target has a [[Construct]] internal method.
+ __ LoadP(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadlB(r4, FieldMemOperand(r6, Map::kBitFieldOffset));
__ TestBit(r4, Map::IsConstructorBit::kShift);
__ beq(&non_constructor);
+ // Dispatch based on instance type.
+ __ CompareInstanceType(r6, r7, JS_FUNCTION_TYPE);
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
+ RelocInfo::CODE_TARGET, eq);
+
// Only dispatch to bound functions after checking whether they are
// constructors.
__ CmpP(r7, Operand(JS_BOUND_FUNCTION_TYPE));
@@ -2590,6 +2658,8 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ auto wasm_instance_reg = r6; // TODO(titzer): put in a common place.
+
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
@@ -2602,18 +2672,22 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ MultiPush(gp_regs);
__ MultiPushDoubles(fp_regs);
- // Initialize cp register with kZero, CEntryStub will use it to set the
- // current context on the isolate.
+ // Pass the WASM instance as an explicit argument to WasmCompileLazy.
+ __ Push(wasm_instance_reg);
+ // Initialize the JavaScript context with 0. CEntryStub will use it to
+ // set the current context on the isolate.
__ LoadSmiLiteral(cp, Smi::kZero);
__ CallRuntime(Runtime::kWasmCompileLazy);
- // Store returned instruction start in ip.
- __ AddP(ip, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // The entrypoint address is the first return value.
+ __ LoadRR(ip, r2);
+ // The WASM instance is the second return value.
+ __ LoadRR(wasm_instance_reg, kReturnRegister1);
// Restore registers.
__ MultiPopDoubles(fp_regs);
__ MultiPop(gp_regs);
}
- // Now jump to the instructions of the returned code object.
+ // Finally, jump to the entrypoint.
__ Jump(ip);
}
diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc
index d30cd02ab5..45040844c3 100644
--- a/deps/v8/src/builtins/setup-builtins-internal.cc
+++ b/deps/v8/src/builtins/setup-builtins-internal.cc
@@ -114,8 +114,9 @@ Code* BuildWithCodeStubAssemblerJS(Isolate* isolate, int32_t builtin_index,
Zone zone(isolate->allocator(), ZONE_NAME, segment_size);
const int argc_with_recv =
(argc == SharedFunctionInfo::kDontAdaptArgumentsSentinel) ? 0 : argc + 1;
- compiler::CodeAssemblerState state(isolate, &zone, argc_with_recv,
- Code::BUILTIN, name, builtin_index);
+ compiler::CodeAssemblerState state(
+ isolate, &zone, argc_with_recv, Code::BUILTIN, name,
+ PoisoningMitigationLevel::kOff, builtin_index);
generator(&state);
Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state);
PostBuildProfileAndTracing(isolate, *code, name);
@@ -141,7 +142,8 @@ Code* BuildWithCodeStubAssemblerCS(Isolate* isolate, int32_t builtin_index,
// Ensure descriptor is already initialized.
DCHECK_LE(0, descriptor.GetRegisterParameterCount());
compiler::CodeAssemblerState state(isolate, &zone, descriptor, Code::BUILTIN,
- name, result_size, 0, builtin_index);
+ name, PoisoningMitigationLevel::kOff,
+ result_size, 0, builtin_index);
generator(&state);
Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state);
PostBuildProfileAndTracing(isolate, *code, name);
@@ -186,7 +188,7 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
if (!target->is_builtin()) continue;
Code* new_target =
Code::cast(builtins->builtins_[target->builtin_index()]);
- rinfo->set_target_address(new_target->instruction_start(),
+ rinfo->set_target_address(new_target->raw_instruction_start(),
UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
} else {
DCHECK(RelocInfo::IsEmbeddedObject(rinfo->rmode()));
@@ -202,8 +204,8 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
flush_icache = true;
}
if (flush_icache) {
- Assembler::FlushICache(code->instruction_start(),
- code->instruction_size());
+ Assembler::FlushICache(code->raw_instruction_start(),
+ code->raw_instruction_size());
}
}
}
@@ -283,17 +285,6 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
BUILTIN_EXCEPTION_CAUGHT_PREDICTION_LIST(SET_EXCEPTION_CAUGHT_PREDICTION)
#undef SET_EXCEPTION_CAUGHT_PREDICTION
- // TODO(mstarzinger,6792): This code-space modification section should be
- // moved into {Heap} eventually and a safe wrapper be provided.
- CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
-
-#define SET_CODE_NON_TAGGED_PARAMS(Name) \
- Code::cast(builtins->builtins_[Builtins::k##Name]) \
- ->set_has_tagged_params(false);
-
- BUILTINS_WITH_UNTAGGED_PARAMS(SET_CODE_NON_TAGGED_PARAMS)
-#undef SET_CODE_NON_TAGGED_PARAMS
-
builtins->MarkInitialized();
}
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index ae7e0c151e..ad64b02af9 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -221,7 +221,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -----------------------------------
__ movp(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ testl(FieldOperand(rbx, SharedFunctionInfo::kCompilerHintsOffset),
+ __ testl(FieldOperand(rbx, SharedFunctionInfo::kFlagsOffset),
Immediate(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
__ j(not_zero, &not_create_implicit_receiver, Label::kNear);
@@ -341,7 +341,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ bind(&other_result);
__ movp(rbx, Operand(rbp, ConstructFrameConstants::kConstructorOffset));
__ movp(rbx, FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset));
- __ testl(FieldOperand(rbx, SharedFunctionInfo::kCompilerHintsOffset),
+ __ testl(FieldOperand(rbx, SharedFunctionInfo::kFlagsOffset),
Immediate(SharedFunctionInfo::IsClassConstructorBit::kMask));
if (restrict_constructor_return) {
@@ -385,9 +385,6 @@ void Builtins::Generate_JSConstructStubGenericUnrestrictedReturn(
MacroAssembler* masm) {
return Generate_JSConstructStubGeneric(masm, false);
}
-void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSBuiltinsConstructStubHelper(masm);
-}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
@@ -562,6 +559,19 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
+static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
+ Register sfi_data,
+ Register scratch1) {
+ Label done;
+
+ __ CmpObjectType(sfi_data, INTERPRETER_DATA_TYPE, scratch1);
+ __ j(not_equal, &done, Label::kNear);
+ __ movp(sfi_data,
+ FieldOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
+
+ __ bind(&done);
+}
+
// static
void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -639,6 +649,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, rcx, kScratchRegister);
__ CmpObjectType(rcx, BYTECODE_ARRAY_TYPE, rcx);
__ Assert(equal, AbortReason::kMissingBytecodeArray);
}
@@ -745,7 +756,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
DCHECK(!AreAliased(feedback_vector, rax, rdx, rdi, scratch1, scratch2,
scratch3));
- Label optimized_code_slot_is_cell, fallthrough;
+ Label optimized_code_slot_is_weak_ref, fallthrough;
Register closure = rdi;
Register optimized_code_entry = scratch1;
@@ -754,9 +765,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
FieldOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
- // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // optimisation marker. Otherwise, interpret it as a weak reference to a code
// object.
- __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
{
// Optimized code slot is a Smi optimization marker.
@@ -790,12 +801,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
{
- // Optimized code slot is a WeakCell.
- __ bind(&optimized_code_slot_is_cell);
+ // Optimized code slot is a weak reference.
+ __ bind(&optimized_code_slot_is_weak_ref);
- __ movp(optimized_code_entry,
- FieldOperand(optimized_code_entry, WeakCell::kValueOffset));
- __ JumpIfSmi(optimized_code_entry, &fallthrough);
+ __ LoadWeakValue(optimized_code_entry, &fallthrough);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
@@ -876,7 +885,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
#define JUMP_IF_EQUAL(NAME) \
__ cmpb(bytecode, \
Immediate(static_cast<int>(interpreter::Bytecode::k##NAME))); \
- __ j(equal, if_return, Label::kNear);
+ __ j(equal, if_return, Label::kFar);
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
#undef JUMP_IF_EQUAL
@@ -927,6 +936,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ movp(rax, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ movp(kInterpreterBytecodeArrayRegister,
FieldOperand(rax, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister,
+ kScratchRegister);
__ JumpIfNotSmi(FieldOperand(rax, SharedFunctionInfo::kDebugInfoOffset),
&maybe_load_debug_bytecode_array);
__ bind(&bytecode_array_loaded);
@@ -1047,13 +1058,33 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// kInterpreterBytecodeArrayRegister is already loaded with
// SharedFunctionInfo::kFunctionDataOffset.
__ bind(&maybe_load_debug_bytecode_array);
+ __ movp(rax, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ movp(rcx, FieldOperand(rax, SharedFunctionInfo::kDebugInfoOffset));
- __ SmiToInteger32(kScratchRegister,
- FieldOperand(rcx, DebugInfo::kFlagsOffset));
- __ testl(kScratchRegister, Immediate(DebugInfo::kHasBreakInfo));
- __ j(zero, &bytecode_array_loaded);
- __ movp(kInterpreterBytecodeArrayRegister,
+ __ movp(kScratchRegister,
FieldOperand(rcx, DebugInfo::kDebugBytecodeArrayOffset));
+ __ JumpIfRoot(kScratchRegister, Heap::kUndefinedValueRootIndex,
+ &bytecode_array_loaded);
+
+ __ movp(kInterpreterBytecodeArrayRegister, kScratchRegister);
+ __ SmiToInteger32(rax, FieldOperand(rcx, DebugInfo::kFlagsOffset));
+ __ andb(rax, Immediate(DebugInfo::kDebugExecutionMode));
+ STATIC_ASSERT(static_cast<int>(DebugInfo::kDebugExecutionMode) ==
+ static_cast<int>(DebugInfo::kSideEffects));
+ ExternalReference debug_execution_mode_address =
+ ExternalReference::debug_execution_mode_address(masm->isolate());
+ Operand debug_execution_mode =
+ masm->ExternalOperand(debug_execution_mode_address);
+ __ cmpb(rax, debug_execution_mode);
+ __ j(equal, &bytecode_array_loaded);
+
+ __ Push(closure);
+ __ Push(feedback_vector);
+ __ Push(kInterpreterBytecodeArrayRegister);
+ __ Push(closure);
+ __ CallRuntime(Runtime::kDebugApplyInstrumentation);
+ __ Pop(kInterpreterBytecodeArrayRegister);
+ __ Pop(feedback_vector);
+ __ Pop(closure);
__ jmp(&bytecode_array_loaded);
}
@@ -1082,6 +1113,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
InterpreterPushArgsMode mode) {
+ DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
// -- rbx : the address of the first argument to be pushed. Subsequent
@@ -1118,10 +1150,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Call the target.
__ PushReturnAddressFrom(kScratchRegister); // Re-push return address.
- if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(masm->isolate()->builtins()->CallFunction(receiver_mode),
- RelocInfo::CODE_TARGET);
- } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
RelocInfo::CODE_TARGET);
} else {
@@ -1176,16 +1205,13 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
__ AssertUndefinedOrAllocationSite(rbx);
}
- if (mode == InterpreterPushArgsMode::kJSFunction) {
- // Tail call to the function-specific construct stub (still in the caller
+ if (mode == InterpreterPushArgsMode::kArrayFunction) {
+ // Tail call to the array construct stub (still in the caller
// context at this point).
__ AssertFunction(rdi);
-
- __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kConstructStubOffset));
- __ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
// Jump to the constructor function (rax, rbx, rdx passed on).
- __ jmp(rcx);
+ ArrayConstructorStub array_constructor_stub(masm->isolate());
+ __ Jump(array_constructor_stub.GetCode(), RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor (rax, rdx, rdi passed on).
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
@@ -1208,12 +1234,30 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
- // TODO(jgruber,v8:6666): Update logic once builtin is off-heap-safe.
- DCHECK(!Builtins::IsOffHeapSafe(Builtins::kInterpreterEntryTrampoline));
+ Label builtin_trampoline, trampoline_loaded;
+ // TODO(jgruber,v8:6666): Update logic once builtin is isolate-independent.
+ DCHECK(
+ !Builtins::IsIsolateIndependent(Builtins::kInterpreterEntryTrampoline));
Smi* interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
+
+ // If the SFI function_data is an InterpreterData, get the trampoline stored
+ // in it, otherwise get the trampoline from the builtins list.
+ __ movp(rbx, Operand(rbp, StandardFrameConstants::kFunctionOffset));
+ __ movp(rbx, FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rbx, FieldOperand(rbx, SharedFunctionInfo::kFunctionDataOffset));
+ __ CmpObjectType(rbx, INTERPRETER_DATA_TYPE, kScratchRegister);
+ __ j(not_equal, &builtin_trampoline, Label::kNear);
+
+ __ movp(rbx,
+ FieldOperand(rbx, InterpreterData::kInterpreterTrampolineOffset));
+ __ jmp(&trampoline_loaded, Label::kNear);
+
+ __ bind(&builtin_trampoline);
__ Move(rbx, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+
+ __ bind(&trampoline_loaded);
__ addp(rbx, Immediate(interpreter_entry_return_pc_offset->value() +
Code::kHeaderSize - kHeapObjectTag));
__ Push(rbx);
@@ -1286,52 +1330,89 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
-void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : argument count (preserved for callee)
- // -- rdx : new target (preserved for callee)
- // -- rdi : target function (preserved for callee)
- // -----------------------------------
- Register closure = rdi;
-
- // Get the feedback vector.
- Register feedback_vector = rbx;
- __ movp(feedback_vector,
- FieldOperand(closure, JSFunction::kFeedbackCellOffset));
- __ movp(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
-
- // The feedback vector must be defined.
- if (FLAG_debug_code) {
- __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
- __ Assert(not_equal, AbortReason::kExpectedFeedbackVector);
- }
-
- // Is there an optimization marker or optimized code in the feedback vector?
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, rcx, r14, r15);
-
- // Otherwise, tail call the SFI code.
- static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
- __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kCodeOffset));
- __ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
- __ jmp(rcx);
-}
-
// TODO(jupvfranco): investigate whether there is any case where the CompileLazy
// builtin does not set the code field in the JS function. If there isn't then
// we do not need this builtin and can jump directly to CompileLazy.
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
- // Set the code slot inside the JSFunction to the trampoline to the
- // interpreter entry.
- __ movq(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movq(rcx, FieldOperand(rcx, SharedFunctionInfo::kCodeOffset));
- __ movq(FieldOperand(rdi, JSFunction::kCodeOffset), rcx);
+ // Set the code slot inside the JSFunction to CompileLazy.
+ __ Move(rcx, BUILTIN_CODE(masm->isolate(), CompileLazy));
+ __ movp(FieldOperand(rdi, JSFunction::kCodeOffset), rcx);
__ RecordWriteField(rdi, JSFunction::kCodeOffset, rcx, r15, kDontSaveFPRegs,
OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// Jump to compile lazy.
Generate_CompileLazy(masm);
}
+static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data,
+ Register scratch1) {
+ // Figure out the SFI's code object.
+ Label done;
+ Label check_is_bytecode_array;
+ Label check_is_code;
+ Label check_is_fixed_array;
+ Label check_is_pre_parsed_scope_data;
+ Label check_is_function_template_info;
+ Label check_is_interpreter_data;
+
+ Register data_type = scratch1;
+
+ // IsSmi: Is builtin
+ __ JumpIfNotSmi(sfi_data, &check_is_bytecode_array);
+ __ Move(scratch1, ExternalReference::builtins_address(masm->isolate()));
+ SmiIndex index = masm->SmiToIndex(sfi_data, sfi_data, kPointerSizeLog2);
+ __ movp(sfi_data, Operand(scratch1, index.reg, index.scale, 0));
+ __ j(always, &done);
+
+ // Get map for subsequent checks.
+ __ bind(&check_is_bytecode_array);
+ __ movp(data_type, FieldOperand(sfi_data, HeapObject::kMapOffset));
+ __ movw(data_type, FieldOperand(data_type, Map::kInstanceTypeOffset));
+
+ // IsBytecodeArray: Interpret bytecode
+ __ cmpw(data_type, Immediate(BYTECODE_ARRAY_TYPE));
+ __ j(not_equal, &check_is_code);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+ __ j(always, &done);
+
+ // IsCode: Run code
+ __ bind(&check_is_code);
+ __ cmpw(data_type, Immediate(CODE_TYPE));
+ __ j(equal, &done);
+
+ // IsFixedArray: Instantiate using AsmWasmData,
+ __ bind(&check_is_fixed_array);
+ __ cmpw(data_type, Immediate(FIXED_ARRAY_TYPE));
+ __ j(not_equal, &check_is_pre_parsed_scope_data);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InstantiateAsmJs));
+ __ j(always, &done);
+
+ // IsPreParsedScopeData: Compile lazy
+ __ bind(&check_is_pre_parsed_scope_data);
+ __ cmpw(data_type, Immediate(TUPLE2_TYPE));
+ __ j(not_equal, &check_is_function_template_info);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), CompileLazy));
+ __ j(always, &done);
+
+ // IsFunctionTemplateInfo: API call
+ __ bind(&check_is_function_template_info);
+ __ cmpw(data_type, Immediate(FUNCTION_TEMPLATE_INFO_TYPE));
+ __ j(not_equal, &check_is_interpreter_data);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
+ __ j(always, &done);
+
+ // IsInterpreterData: Interpret bytecode with unique interpreter
+ __ bind(&check_is_interpreter_data);
+ if (FLAG_debug_code) {
+ __ cmpw(data_type, Immediate(INTERPRETER_DATA_TYPE));
+ __ Check(equal, AbortReason::kInvalidSharedFunctionInfoData);
+ }
+ __ movp(
+ sfi_data,
+ FieldOperand(sfi_data, InterpreterData::kInterpreterTrampolineOffset));
+
+ __ bind(&done);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argument count (preserved for callee)
@@ -1354,12 +1435,13 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, rcx, r14, r15);
- // We found no optimized code.
+ // We found no optimized code. Infer the code object needed for the SFI.
Register entry = rcx;
__ movp(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(entry, FieldOperand(entry, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoCode(masm, entry, rbx);
- // If SFI points to anything other than CompileLazy, install that.
- __ movp(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
+ // If code entry points to anything other than CompileLazy, install that.
__ Move(rbx, masm->CodeObject());
__ cmpp(entry, rbx);
__ j(equal, &gotta_call_runtime);
@@ -1427,24 +1509,9 @@ void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
{
// If we've reached this spot, the target builtin has been deserialized and
- // we simply need to copy it over. First to the shared function info.
+ // we simply need to copy it over to the target function.
Register target_builtin = scratch1;
- Register shared = scratch0;
-
- __ movp(shared,
- FieldOperand(target, JSFunction::kSharedFunctionInfoOffset));
-
- CHECK(r14 != target && r14 != scratch0 && r14 != scratch1);
- CHECK(r15 != target && r15 != scratch0 && r15 != scratch1);
-
- __ movp(FieldOperand(shared, SharedFunctionInfo::kCodeOffset),
- target_builtin);
- __ movp(r14, target_builtin); // Write barrier clobbers r14 below.
- __ RecordWriteField(shared, SharedFunctionInfo::kCodeOffset, r14, r15,
- kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // And second to the target function.
__ movp(FieldOperand(target, JSFunction::kCodeOffset), target_builtin);
__ movp(r14, target_builtin); // Write barrier clobbers r14 below.
@@ -1830,9 +1897,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// -----------------------------------
Label generic_array_code;
- // Get the InternalArray function.
- __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, rdi);
-
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
__ movp(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
@@ -1848,6 +1912,7 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
// tail call a stub
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
InternalArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -1855,14 +1920,12 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
+ // -- rdi : array function
// -- rsp[0] : return address
// -- rsp[8] : last argument
// -----------------------------------
Label generic_array_code;
- // Get the Array function.
- __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, rdi);
-
if (FLAG_debug_code) {
// Initial map for the builtin Array functions should be maps.
__ movp(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
@@ -1874,10 +1937,17 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ Check(equal, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
+ // rbx is the AllocationSite - here undefined.
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
+ // If rdx (new target) is undefined, then this is the 'Call' case, so move
+ // rdi (the constructor) to rdx.
+ Label call;
+ __ cmpp(rdx, rbx);
+ __ j(not_equal, &call);
__ movp(rdx, rdi);
+
// Run the native code for the Array function called as a normal function.
- // tail call a stub
- __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
+ __ bind(&call);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -2218,7 +2288,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Check that the function is not a "classConstructor".
Label class_constructor;
__ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ testl(FieldOperand(rdx, SharedFunctionInfo::kCompilerHintsOffset),
+ __ testl(FieldOperand(rdx, SharedFunctionInfo::kFlagsOffset),
Immediate(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ j(not_zero, &class_constructor);
@@ -2234,7 +2304,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
- __ testl(FieldOperand(rdx, SharedFunctionInfo::kCompilerHintsOffset),
+ __ testl(FieldOperand(rdx, SharedFunctionInfo::kFlagsOffset),
Immediate(SharedFunctionInfo::IsNativeBit::kMask |
SharedFunctionInfo::IsStrictBit::kMask));
__ j(not_zero, &done_convert);
@@ -2479,18 +2549,27 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// -- rdx : the new target (checked to be a constructor)
// -- rdi : the constructor to call (checked to be a JSFunction)
// -----------------------------------
+ __ AssertConstructor(rdi);
__ AssertFunction(rdi);
// Calling convention for function specific ConstructStubs require
// rbx to contain either an AllocationSite or undefined.
__ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
- // Tail call to the function-specific construct stub (still in the caller
- // context at this point).
+ Label call_generic_stub;
+
+ // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
__ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kConstructStubOffset));
- __ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
- __ jmp(rcx);
+ __ testl(FieldOperand(rcx, SharedFunctionInfo::kFlagsOffset),
+ Immediate(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
+ __ j(zero, &call_generic_stub, Label::kNear);
+
+ __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&call_generic_stub);
+ __ Jump(masm->isolate()->builtins()->JSConstructStubGeneric(),
+ RelocInfo::CODE_TARGET);
}
// static
@@ -2500,6 +2579,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// -- rdx : the new target (checked to be a constructor)
// -- rdi : the constructor to call (checked to be a JSBoundFunction)
// -----------------------------------
+ __ AssertConstructor(rdi);
__ AssertBoundFunction(rdi);
// Push the [[BoundArguments]] onto the stack.
@@ -2534,16 +2614,17 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
Label non_constructor, non_proxy;
__ JumpIfSmi(rdi, &non_constructor, Label::kNear);
- // Dispatch based on instance type.
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(equal, BUILTIN_CODE(masm->isolate(), ConstructFunction),
- RelocInfo::CODE_TARGET);
-
// Check if target has a [[Construct]] internal method.
+ __ movq(rcx, FieldOperand(rdi, HeapObject::kMapOffset));
__ testb(FieldOperand(rcx, Map::kBitFieldOffset),
Immediate(Map::IsConstructorBit::kMask));
__ j(zero, &non_constructor, Label::kNear);
+ // Dispatch based on instance type.
+ __ CmpInstanceType(rcx, JS_FUNCTION_TYPE);
+ __ j(equal, BUILTIN_CODE(masm->isolate(), ConstructFunction),
+ RelocInfo::CODE_TARGET);
+
// Only dispatch to bound functions after checking whether they are
// constructors.
__ CmpInstanceType(rcx, JS_BOUND_FUNCTION_TYPE);
@@ -2636,10 +2717,12 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ auto wasm_instance_reg = rsi; // TODO(titzer): put in a common place.
+
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- constexpr Register gp_regs[]{rax, rbx, rcx, rdx, rsi, rdi};
+ constexpr Register gp_regs[]{rax, rbx, rcx, rdx, rdi};
constexpr XMMRegister xmm_regs[]{xmm1, xmm2, xmm3, xmm4, xmm5, xmm6};
for (auto reg : gp_regs) {
@@ -2650,12 +2733,16 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ movdqu(Operand(rsp, 16 * i), xmm_regs[i]);
}
- // Initialize rsi register with kZero, CEntryStub will use it to set the
- // current context on the isolate.
+ // Pass the WASM instance as an explicit argument to WasmCompileLazy.
+ __ Push(wasm_instance_reg);
+ // Initialize the JavaScript context with 0. CEntryStub will use it to
+ // set the current context on the isolate.
__ Move(rsi, Smi::kZero);
__ CallRuntime(Runtime::kWasmCompileLazy);
- // Store returned instruction start in r11.
- __ leap(r11, FieldOperand(rax, Code::kHeaderSize));
+ // The entrypoint address is the first return value.
+ __ movq(r11, kReturnRegister0);
+ // The WASM instance is the second return value.
+ __ movq(wasm_instance_reg, kReturnRegister1);
// Restore registers.
for (int i = arraysize(xmm_regs) - 1; i >= 0; --i) {
@@ -2666,7 +2753,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ Pop(gp_regs[i]);
}
}
- // Now jump to the instructions of the returned code object.
+ // Finally, jump to the entrypoint.
__ jmp(r11);
}