aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/execution
diff options
context:
space:
mode:
authorMyles Borins <mylesborins@google.com>2019-09-24 11:56:38 -0400
committerMyles Borins <myles.borins@gmail.com>2019-10-07 03:19:23 -0400
commitf7f6c928c1c9c136b7926f892b8a2fda11d8b4b2 (patch)
treef5edbccb3ffda2573d70a6e291e7157f290e0ae0 /deps/v8/src/execution
parentffd22e81983056d09c064c59343a0e488236272d (diff)
downloadandroid-node-v8-f7f6c928c1c9c136b7926f892b8a2fda11d8b4b2.tar.gz
android-node-v8-f7f6c928c1c9c136b7926f892b8a2fda11d8b4b2.tar.bz2
android-node-v8-f7f6c928c1c9c136b7926f892b8a2fda11d8b4b2.zip
deps: update V8 to 7.8.279.9
PR-URL: https://github.com/nodejs/node/pull/29694 Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Anna Henningsen <anna@addaleax.net> Reviewed-By: Gus Caplan <me@gus.host> Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Michaël Zasso <targos@protonmail.com> Reviewed-By: Tobias Nießen <tniessen@tnie.de> Reviewed-By: Ujjwal Sharma <usharma1998@gmail.com>
Diffstat (limited to 'deps/v8/src/execution')
-rw-r--r--deps/v8/src/execution/arm/simulator-arm.cc84
-rw-r--r--deps/v8/src/execution/arm64/pointer-auth-arm64.cc269
-rw-r--r--deps/v8/src/execution/arm64/simulator-arm64.cc41
-rw-r--r--deps/v8/src/execution/arm64/simulator-arm64.h39
-rw-r--r--deps/v8/src/execution/frames.cc178
-rw-r--r--deps/v8/src/execution/frames.h170
-rw-r--r--deps/v8/src/execution/futex-emulation.cc24
-rw-r--r--deps/v8/src/execution/futex-emulation.h8
-rw-r--r--deps/v8/src/execution/interrupts-scope.cc2
-rw-r--r--deps/v8/src/execution/interrupts-scope.h6
-rw-r--r--deps/v8/src/execution/isolate-data.h35
-rw-r--r--deps/v8/src/execution/isolate-inl.h27
-rw-r--r--deps/v8/src/execution/isolate.cc170
-rw-r--r--deps/v8/src/execution/isolate.h46
-rw-r--r--deps/v8/src/execution/messages.cc246
-rw-r--r--deps/v8/src/execution/messages.h18
-rw-r--r--deps/v8/src/execution/microtask-queue.cc17
-rw-r--r--deps/v8/src/execution/mips/simulator-mips.cc11
-rw-r--r--deps/v8/src/execution/mips64/simulator-mips64.cc11
-rw-r--r--deps/v8/src/execution/ppc/simulator-ppc.cc27
-rw-r--r--deps/v8/src/execution/protectors-inl.h36
-rw-r--r--deps/v8/src/execution/protectors.cc48
-rw-r--r--deps/v8/src/execution/protectors.h42
-rw-r--r--deps/v8/src/execution/runtime-profiler.cc12
-rw-r--r--deps/v8/src/execution/s390/simulator-s390.cc23
-rw-r--r--deps/v8/src/execution/stack-guard.cc62
-rw-r--r--deps/v8/src/execution/stack-guard.h34
-rw-r--r--deps/v8/src/execution/thread-local-top.cc10
-rw-r--r--deps/v8/src/execution/thread-local-top.h30
-rw-r--r--deps/v8/src/execution/v8threads.cc5
30 files changed, 1471 insertions, 260 deletions
diff --git a/deps/v8/src/execution/arm/simulator-arm.cc b/deps/v8/src/execution/arm/simulator-arm.cc
index 0b3ebcf879..2677135096 100644
--- a/deps/v8/src/execution/arm/simulator-arm.cc
+++ b/deps/v8/src/execution/arm/simulator-arm.cc
@@ -1562,7 +1562,7 @@ using SimulatorRuntimeCall = int64_t (*)(int32_t arg0, int32_t arg1,
int32_t arg2, int32_t arg3,
int32_t arg4, int32_t arg5,
int32_t arg6, int32_t arg7,
- int32_t arg8);
+ int32_t arg8, int32_t arg9);
// These prototypes handle the four types of FP calls.
using SimulatorRuntimeCompareCall = int64_t (*)(double darg0, double darg1);
@@ -1602,7 +1602,8 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
int32_t arg6 = stack_pointer[2];
int32_t arg7 = stack_pointer[3];
int32_t arg8 = stack_pointer[4];
- STATIC_ASSERT(kMaxCParameters == 9);
+ int32_t arg9 = stack_pointer[5];
+ STATIC_ASSERT(kMaxCParameters == 10);
bool fp_call =
(redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
@@ -1761,9 +1762,9 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF(
"Call to host function at %p "
- "args %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x",
+ "args %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x",
reinterpret_cast<void*>(FUNCTION_ADDR(target)), arg0, arg1, arg2,
- arg3, arg4, arg5, arg6, arg7, arg8);
+ arg3, arg4, arg5, arg6, arg7, arg8, arg9);
if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp));
}
@@ -1771,7 +1772,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
CHECK(stack_aligned);
int64_t result =
- target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
+ target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
int32_t lo_res = static_cast<int32_t>(result);
int32_t hi_res = static_cast<int32_t>(result >> 32);
if (::v8::internal::FLAG_trace_sim) {
@@ -4070,6 +4071,39 @@ void ShiftRightAndInsert(Simulator* simulator, int Vd, int Vm, int shift) {
simulator->set_neon_register<T, SIZE>(Vd, dst);
}
+template <typename T, typename S_T, int SIZE>
+void ShiftByRegister(Simulator* simulator, int Vd, int Vm, int Vn) {
+ static const int kElems = SIZE / sizeof(T);
+ T src[kElems];
+ S_T shift[kElems];
+ simulator->get_neon_register<T, SIZE>(Vm, src);
+ simulator->get_neon_register<S_T, SIZE>(Vn, shift);
+ for (int i = 0; i < kElems; i++) {
+ // Take lowest 8 bits of shift value (see F6.1.217 of ARM Architecture
+ // Reference Manual ARMv8), as signed 8-bit value.
+ int8_t shift_value = static_cast<int8_t>(shift[i]);
+ int size = static_cast<int>(sizeof(T) * 8);
+ // When shift value is greater/equal than size, we end up relying on
+ // undefined behavior, handle that and emulate what the hardware does.
+ if ((shift_value) >= 0) {
+ // If the shift value is greater/equal than size, zero out the result.
+ if (shift_value >= size) {
+ src[i] = 0;
+ } else {
+ src[i] <<= shift_value;
+ }
+ } else {
+ // If the shift value is greater/equal than size, always end up with -1.
+ if (-shift_value >= size) {
+ src[i] = -1;
+ } else {
+ src[i] = ArithmeticShiftRight(src[i], -shift_value);
+ }
+ }
+ }
+ simulator->set_neon_register<T, SIZE>(Vd, src);
+}
+
template <typename T, int SIZE>
void CompareEqual(Simulator* simulator, int Vd, int Vm, int Vn) {
static const int kElems = SIZE / sizeof(T);
@@ -4255,6 +4289,25 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
}
break;
}
+ case 0x4: {
+ // vshl s<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ ShiftByRegister<int8_t, int8_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon16:
+ ShiftByRegister<int16_t, int16_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon32:
+ ShiftByRegister<int32_t, int32_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ break;
+ }
case 0x6: {
// vmin/vmax.s<size> Qd, Qm, Qn.
NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
@@ -4644,6 +4697,27 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
}
break;
}
+ case 0x4: {
+ // vshl s<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ ShiftByRegister<uint8_t, int8_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon16:
+ ShiftByRegister<uint16_t, int16_t, kSimd128Size>(this, Vd, Vm,
+ Vn);
+ break;
+ case Neon32:
+ ShiftByRegister<uint32_t, int32_t, kSimd128Size>(this, Vd, Vm,
+ Vn);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ break;
+ }
case 0x6: {
// vmin/vmax.u<size> Qd, Qm, Qn.
NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
diff --git a/deps/v8/src/execution/arm64/pointer-auth-arm64.cc b/deps/v8/src/execution/arm64/pointer-auth-arm64.cc
new file mode 100644
index 0000000000..cb8ff2c740
--- /dev/null
+++ b/deps/v8/src/execution/arm64/pointer-auth-arm64.cc
@@ -0,0 +1,269 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/execution/arm64/simulator-arm64.h"
+
+#if defined(USE_SIMULATOR)
+
+namespace v8 {
+namespace internal {
+
+// Randomly generated example key for simulating only.
+const Simulator::PACKey Simulator::kPACKeyIA = {0xc31718727de20f71,
+ 0xab9fd4e14b2fec51, 0};
+
+namespace {
+
+uint64_t GetNibble(uint64_t in_data, int position) {
+ return (in_data >> position) & 0xf;
+}
+
+uint64_t PACCellShuffle(uint64_t in_data) {
+ static int in_positions[16] = {52, 24, 44, 0, 28, 48, 4, 40,
+ 32, 12, 56, 20, 8, 36, 16, 60};
+ uint64_t out_data = 0;
+ for (int i = 0; i < 16; ++i) {
+ out_data |= GetNibble(in_data, in_positions[i]) << (4 * i);
+ }
+ return out_data;
+}
+
+uint64_t PACCellInvShuffle(uint64_t in_data) {
+ static int in_positions[16] = {12, 24, 48, 36, 56, 44, 4, 16,
+ 32, 52, 28, 8, 20, 0, 40, 60};
+ uint64_t out_data = 0;
+ for (int i = 0; i < 16; ++i) {
+ out_data |= GetNibble(in_data, in_positions[i]) << (4 * i);
+ }
+ return out_data;
+}
+
+uint64_t RotCell(uint64_t in_cell, int amount) {
+ DCHECK((amount >= 1) && (amount <= 3));
+
+ in_cell &= 0xf;
+ uint8_t temp = in_cell << 4 | in_cell;
+ return static_cast<uint64_t>((temp >> (4 - amount)) & 0xf);
+}
+
+uint64_t PACMult(uint64_t s_input) {
+ uint8_t t0;
+ uint8_t t1;
+ uint8_t t2;
+ uint8_t t3;
+ uint64_t s_output = 0;
+
+ for (int i = 0; i < 4; ++i) {
+ uint8_t s12 = (s_input >> (4 * (i + 12))) & 0xf;
+ uint8_t s8 = (s_input >> (4 * (i + 8))) & 0xf;
+ uint8_t s4 = (s_input >> (4 * (i + 4))) & 0xf;
+ uint8_t s0 = (s_input >> (4 * (i + 0))) & 0xf;
+
+ t0 = RotCell(s8, 1) ^ RotCell(s4, 2) ^ RotCell(s0, 1);
+ t1 = RotCell(s12, 1) ^ RotCell(s4, 1) ^ RotCell(s0, 2);
+ t2 = RotCell(s12, 2) ^ RotCell(s8, 1) ^ RotCell(s0, 1);
+ t3 = RotCell(s12, 1) ^ RotCell(s8, 2) ^ RotCell(s4, 1);
+
+ s_output |= static_cast<uint64_t>(t3) << (4 * (i + 0));
+ s_output |= static_cast<uint64_t>(t2) << (4 * (i + 4));
+ s_output |= static_cast<uint64_t>(t1) << (4 * (i + 8));
+ s_output |= static_cast<uint64_t>(t0) << (4 * (i + 12));
+ }
+ return s_output;
+}
+
+uint64_t PACSub(uint64_t t_input) {
+ uint64_t t_output = 0;
+ uint8_t substitutions[16] = {0xb, 0x6, 0x8, 0xf, 0xc, 0x0, 0x9, 0xe,
+ 0x3, 0x7, 0x4, 0x5, 0xd, 0x2, 0x1, 0xa};
+ for (int i = 0; i < 16; ++i) {
+ unsigned index = ((t_input >> (4 * i)) & 0xf);
+ t_output |= static_cast<uint64_t>(substitutions[index]) << (4 * i);
+ }
+ return t_output;
+}
+
+uint64_t PACInvSub(uint64_t t_input) {
+ uint64_t t_output = 0;
+ uint8_t substitutions[16] = {0x5, 0xe, 0xd, 0x8, 0xa, 0xb, 0x1, 0x9,
+ 0x2, 0x6, 0xf, 0x0, 0x4, 0xc, 0x7, 0x3};
+ for (int i = 0; i < 16; ++i) {
+ unsigned index = ((t_input >> (4 * i)) & 0xf);
+ t_output |= static_cast<uint64_t>(substitutions[index]) << (4 * i);
+ }
+ return t_output;
+}
+
+uint64_t TweakCellInvRot(uint64_t in_cell) {
+ uint64_t out_cell = 0;
+ out_cell |= (in_cell & 0x7) << 1;
+ out_cell |= (in_cell & 0x1) ^ ((in_cell >> 3) & 0x1);
+ return out_cell;
+}
+
+uint64_t TweakInvShuffle(uint64_t in_data) {
+ uint64_t out_data = 0;
+ out_data |= TweakCellInvRot(in_data >> 48) << 0;
+ out_data |= ((in_data >> 52) & 0xf) << 4;
+ out_data |= ((in_data >> 20) & 0xff) << 8;
+ out_data |= ((in_data >> 0) & 0xff) << 16;
+ out_data |= TweakCellInvRot(in_data >> 8) << 24;
+ out_data |= ((in_data >> 12) & 0xf) << 28;
+ out_data |= TweakCellInvRot(in_data >> 28) << 32;
+ out_data |= TweakCellInvRot(in_data >> 60) << 36;
+ out_data |= TweakCellInvRot(in_data >> 56) << 40;
+ out_data |= TweakCellInvRot(in_data >> 16) << 44;
+ out_data |= ((in_data >> 32) & 0xfff) << 48;
+ out_data |= TweakCellInvRot(in_data >> 44) << 60;
+ return out_data;
+}
+
+uint64_t TweakCellRot(uint64_t in_cell) {
+ uint64_t out_cell = 0;
+ out_cell |= ((in_cell & 0x1) ^ ((in_cell >> 1) & 0x1)) << 3;
+ out_cell |= (in_cell >> 0x1) & 0x7;
+ return out_cell;
+}
+
+uint64_t TweakShuffle(uint64_t in_data) {
+ uint64_t out_data = 0;
+ out_data |= ((in_data >> 16) & 0xff) << 0;
+ out_data |= TweakCellRot(in_data >> 24) << 8;
+ out_data |= ((in_data >> 28) & 0xf) << 12;
+ out_data |= TweakCellRot(in_data >> 44) << 16;
+ out_data |= ((in_data >> 8) & 0xff) << 20;
+ out_data |= TweakCellRot(in_data >> 32) << 28;
+ out_data |= ((in_data >> 48) & 0xfff) << 32;
+ out_data |= TweakCellRot(in_data >> 60) << 44;
+ out_data |= TweakCellRot(in_data >> 0) << 48;
+ out_data |= ((in_data >> 4) & 0xf) << 52;
+ out_data |= TweakCellRot(in_data >> 40) << 56;
+ out_data |= TweakCellRot(in_data >> 36) << 60;
+ return out_data;
+}
+
+} // namespace
+
+// For a description of QARMA see:
+// The QARMA Block Cipher Family, Roberto Avanzi, Qualcomm Product Security
+// Initiative.
+// The pseudocode is available in ARM DDI 0487D.b, J1-6946.
+uint64_t Simulator::ComputePAC(uint64_t data, uint64_t context, PACKey key) {
+ uint64_t key0 = key.high;
+ uint64_t key1 = key.low;
+ const uint64_t RC[5] = {0x0000000000000000, 0x13198a2e03707344,
+ 0xa4093822299f31d0, 0x082efa98ec4e6c89,
+ 0x452821e638d01377};
+ const uint64_t Alpha = 0xc0ac29B7c97c50dd;
+
+ uint64_t modk0 = ((key0 & 0x1) << 63) | ((key0 >> 2) << 1) |
+ ((key0 >> 63) ^ ((key0 >> 1) & 0x1));
+ uint64_t running_mod = context;
+ uint64_t working_val = data ^ key0;
+ uint64_t round_key;
+ for (int i = 0; i < 5; ++i) {
+ round_key = key1 ^ running_mod;
+ working_val ^= round_key;
+ working_val ^= RC[i];
+ if (i > 0) {
+ working_val = PACCellShuffle(working_val);
+ working_val = PACMult(working_val);
+ }
+ working_val = PACSub(working_val);
+ running_mod = TweakShuffle(running_mod);
+ }
+
+ round_key = modk0 ^ running_mod;
+ working_val ^= round_key;
+ working_val = PACCellShuffle(working_val);
+ working_val = PACMult(working_val);
+ working_val = PACSub(working_val);
+ working_val = PACCellShuffle(working_val);
+ working_val = PACMult(working_val);
+ working_val ^= key1;
+ working_val = PACCellInvShuffle(working_val);
+ working_val = PACInvSub(working_val);
+ working_val = PACMult(working_val);
+ working_val = PACCellInvShuffle(working_val);
+ working_val ^= key0;
+ working_val ^= running_mod;
+
+ for (int i = 0; i < 5; ++i) {
+ working_val = PACInvSub(working_val);
+ if (i < 4) {
+ working_val = PACMult(working_val);
+ working_val = PACCellInvShuffle(working_val);
+ }
+ running_mod = TweakInvShuffle(running_mod);
+ round_key = key1 ^ running_mod;
+ working_val ^= RC[4 - i];
+ working_val ^= round_key;
+ working_val ^= Alpha;
+ }
+
+ return working_val ^ modk0;
+}
+
+// The TTBR is selected by bit 63 or 55 depending on TBI for pointers without
+// codes, but is always 55 once a PAC code is added to a pointer. For this
+// reason, it must be calculated at the call site.
+uint64_t Simulator::CalculatePACMask(uint64_t ptr, PointerType type, int ttbr) {
+ int bottom_pac_bit = GetBottomPACBit(ptr, ttbr);
+ int top_pac_bit = GetTopPACBit(ptr, type);
+ return unsigned_bitextract_64(top_pac_bit, bottom_pac_bit,
+ 0xffffffffffffffff & ~kTTBRMask)
+ << bottom_pac_bit;
+}
+
+uint64_t Simulator::AuthPAC(uint64_t ptr, uint64_t context, PACKey key,
+ PointerType type) {
+ DCHECK((key.number == 0) || (key.number == 1));
+
+ uint64_t pac_mask = CalculatePACMask(ptr, type, (ptr >> 55) & 1);
+ uint64_t original_ptr =
+ ((ptr & kTTBRMask) == 0) ? (ptr & ~pac_mask) : (ptr | pac_mask);
+
+ uint64_t pac = ComputePAC(original_ptr, context, key);
+
+ uint64_t error_code = 1 << key.number;
+ if ((pac & pac_mask) == (ptr & pac_mask)) {
+ return original_ptr;
+ } else {
+ int error_lsb = GetTopPACBit(ptr, type) - 2;
+ uint64_t error_mask = UINT64_C(0x3) << error_lsb;
+ return (original_ptr & ~error_mask) | (error_code << error_lsb);
+ }
+}
+
+uint64_t Simulator::AddPAC(uint64_t ptr, uint64_t context, PACKey key,
+ PointerType type) {
+ int top_pac_bit = GetTopPACBit(ptr, type);
+
+ DCHECK(HasTBI(ptr, type));
+ int ttbr = (ptr >> 55) & 1;
+ uint64_t pac_mask = CalculatePACMask(ptr, type, ttbr);
+ uint64_t ext_ptr = (ttbr == 0) ? (ptr & ~pac_mask) : (ptr | pac_mask);
+
+ uint64_t pac = ComputePAC(ext_ptr, context, key);
+
+ // If the pointer isn't all zeroes or all ones in the PAC bitfield, corrupt
+ // the resulting code.
+ if (((ptr & (pac_mask | kTTBRMask)) != 0x0) &&
+ ((~ptr & (pac_mask | kTTBRMask)) != 0x0)) {
+ pac ^= UINT64_C(1) << (top_pac_bit - 1);
+ }
+
+ uint64_t ttbr_shifted = static_cast<uint64_t>(ttbr) << 55;
+ return (pac & pac_mask) | ttbr_shifted | (ptr & ~pac_mask);
+}
+
+uint64_t Simulator::StripPAC(uint64_t ptr, PointerType type) {
+ uint64_t pac_mask = CalculatePACMask(ptr, type, (ptr >> 55) & 1);
+ return ((ptr & kTTBRMask) == 0) ? (ptr & ~pac_mask) : (ptr | pac_mask);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // USE_SIMULATOR
diff --git a/deps/v8/src/execution/arm64/simulator-arm64.cc b/deps/v8/src/execution/arm64/simulator-arm64.cc
index 8618dd8551..71fedd5b2f 100644
--- a/deps/v8/src/execution/arm64/simulator-arm64.cc
+++ b/deps/v8/src/execution/arm64/simulator-arm64.cc
@@ -390,14 +390,14 @@ using SimulatorRuntimeCall_ReturnPtr = int64_t (*)(int64_t arg0, int64_t arg1,
int64_t arg2, int64_t arg3,
int64_t arg4, int64_t arg5,
int64_t arg6, int64_t arg7,
- int64_t arg8);
+ int64_t arg8, int64_t arg9);
#endif
using SimulatorRuntimeCall = ObjectPair (*)(int64_t arg0, int64_t arg1,
int64_t arg2, int64_t arg3,
int64_t arg4, int64_t arg5,
int64_t arg6, int64_t arg7,
- int64_t arg8);
+ int64_t arg8, int64_t arg9);
using SimulatorRuntimeCompareCall = int64_t (*)(double arg1, double arg2);
using SimulatorRuntimeFPFPCall = double (*)(double arg1, double arg2);
@@ -445,7 +445,8 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
const int64_t arg6 = xreg(6);
const int64_t arg7 = xreg(7);
const int64_t arg8 = stack_pointer[0];
- STATIC_ASSERT(kMaxCParameters == 9);
+ const int64_t arg9 = stack_pointer[1];
+ STATIC_ASSERT(kMaxCParameters == 10);
switch (redirection->type()) {
default:
@@ -477,14 +478,14 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
", "
"0x%016" PRIx64 ", 0x%016" PRIx64
", "
- "0x%016" PRIx64,
- arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
+ "0x%016" PRIx64 ", 0x%016" PRIx64,
+ arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
SimulatorRuntimeCall_ReturnPtr target =
reinterpret_cast<SimulatorRuntimeCall_ReturnPtr>(external);
int64_t result =
- target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
+ target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
TraceSim("Returned: 0x%16\n", result);
#ifdef DEBUG
CorruptAllCallerSavedCPURegisters();
@@ -512,12 +513,12 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
", "
"0x%016" PRIx64 ", 0x%016" PRIx64
", "
- "0x%016" PRIx64,
- arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
+ "0x%016" PRIx64 ", 0x%016" PRIx64,
+ arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
ObjectPair result =
- target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
+ target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
TraceSim("Returned: {%p, %p}\n", reinterpret_cast<void*>(result.x),
reinterpret_cast<void*>(result.y));
#ifdef DEBUG
@@ -3037,11 +3038,31 @@ bool Simulator::FPProcessNaNs(Instruction* instr) {
return done;
}
+// clang-format off
+#define PAUTH_SYSTEM_MODES(V) \
+ V(A1716, 17, xreg(16), kPACKeyIA) \
+ V(ASP, 30, xreg(31, Reg31IsStackPointer), kPACKeyIA)
+// clang-format on
+
void Simulator::VisitSystem(Instruction* instr) {
// Some system instructions hijack their Op and Cp fields to represent a
// range of immediates instead of indicating a different instruction. This
// makes the decoding tricky.
- if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) {
+ if (instr->Mask(SystemPAuthFMask) == SystemPAuthFixed) {
+ switch (instr->Mask(SystemPAuthMask)) {
+#define DEFINE_PAUTH_FUNCS(SUFFIX, DST, MOD, KEY) \
+ case PACI##SUFFIX: \
+ set_xreg(DST, AddPAC(xreg(DST), MOD, KEY, kInstructionPointer)); \
+ break; \
+ case AUTI##SUFFIX: \
+ set_xreg(DST, AuthPAC(xreg(DST), MOD, KEY, kInstructionPointer)); \
+ break;
+
+ PAUTH_SYSTEM_MODES(DEFINE_PAUTH_FUNCS)
+#undef DEFINE_PAUTH_FUNCS
+#undef PAUTH_SYSTEM_MODES
+ }
+ } else if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) {
switch (instr->Mask(SystemSysRegMask)) {
case MRS: {
switch (instr->ImmSystemRegister()) {
diff --git a/deps/v8/src/execution/arm64/simulator-arm64.h b/deps/v8/src/execution/arm64/simulator-arm64.h
index ca1cef61ae..4a493ec696 100644
--- a/deps/v8/src/execution/arm64/simulator-arm64.h
+++ b/deps/v8/src/execution/arm64/simulator-arm64.h
@@ -1273,6 +1273,45 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
static inline const char* VRegNameForCode(unsigned code);
static inline int CodeFromName(const char* name);
+ enum PointerType { kDataPointer, kInstructionPointer };
+
+ struct PACKey {
+ uint64_t high;
+ uint64_t low;
+ int number;
+ };
+
+ static const PACKey kPACKeyIA;
+
+ // Current implementation is that all pointers are tagged.
+ static bool HasTBI(uint64_t ptr, PointerType type) {
+ USE(ptr, type);
+ return true;
+ }
+
+ // Current implementation uses 48-bit virtual addresses.
+ static int GetBottomPACBit(uint64_t ptr, int ttbr) {
+ USE(ptr, ttbr);
+ DCHECK((ttbr == 0) || (ttbr == 1));
+ return 48;
+ }
+
+ // The top PAC bit is 55 for the purposes of relative bit fields with TBI,
+ // however bit 55 is the TTBR bit regardless of TBI so isn't part of the PAC
+ // codes in pointers.
+ static int GetTopPACBit(uint64_t ptr, PointerType type) {
+ return HasTBI(ptr, type) ? 55 : 63;
+ }
+
+ // Armv8.3 Pointer authentication helpers.
+ static uint64_t CalculatePACMask(uint64_t ptr, PointerType type, int ext_bit);
+ static uint64_t ComputePAC(uint64_t data, uint64_t context, PACKey key);
+ static uint64_t AuthPAC(uint64_t ptr, uint64_t context, PACKey key,
+ PointerType type);
+ static uint64_t AddPAC(uint64_t ptr, uint64_t context, PACKey key,
+ PointerType type);
+ static uint64_t StripPAC(uint64_t ptr, PointerType type);
+
protected:
// Simulation helpers ------------------------------------
bool ConditionPassed(Condition cond) {
diff --git a/deps/v8/src/execution/frames.cc b/deps/v8/src/execution/frames.cc
index 126cb9530e..3b334739da 100644
--- a/deps/v8/src/execution/frames.cc
+++ b/deps/v8/src/execution/frames.cc
@@ -8,6 +8,7 @@
#include <sstream>
#include "src/base/bits.h"
+#include "src/codegen/interface-descriptors.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/safepoint-table.h"
@@ -270,6 +271,7 @@ SafeStackFrameIterator::SafeStackFrameIterator(Isolate* isolate, Address pc,
low_bound_(sp),
high_bound_(js_entry_sp),
top_frame_type_(StackFrame::NONE),
+ top_context_address_(kNullAddress),
external_callback_scope_(isolate->external_callback_scope()),
top_link_register_(lr) {
StackFrame::State state;
@@ -342,6 +344,13 @@ SafeStackFrameIterator::SafeStackFrameIterator(Isolate* isolate, Address pc,
if (type != StackFrame::INTERPRETED) {
advance_frame = true;
}
+ MSAN_MEMORY_IS_INITIALIZED(
+ fp + CommonFrameConstants::kContextOrFrameTypeOffset,
+ kSystemPointerSize);
+ Address type_or_context_address =
+ Memory<Address>(fp + CommonFrameConstants::kContextOrFrameTypeOffset);
+ if (!StackFrame::IsTypeMarker(type_or_context_address))
+ top_context_address_ = type_or_context_address;
} else {
// Mark the frame as OPTIMIZED if we cannot determine its type.
// We chose OPTIMIZED rather than INTERPRETED because it's closer to
@@ -579,6 +588,8 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
return OPTIMIZED;
case Code::JS_TO_WASM_FUNCTION:
return JS_TO_WASM;
+ case Code::JS_TO_JS_FUNCTION:
+ return STUB;
case Code::C_WASM_ENTRY:
return C_WASM_ENTRY;
case Code::WASM_FUNCTION:
@@ -1136,11 +1147,11 @@ void JavaScriptFrame::Summarize(std::vector<FrameSummary>* functions) const {
DCHECK(functions->empty());
Code code = LookupCode();
int offset = static_cast<int>(pc() - code.InstructionStart());
- AbstractCode abstract_code = AbstractCode::cast(code);
+ Handle<AbstractCode> abstract_code(AbstractCode::cast(code), isolate());
Handle<FixedArray> params = GetParameters();
FrameSummary::JavaScriptFrameSummary summary(
- isolate(), receiver(), function(), abstract_code, offset, IsConstructor(),
- *params);
+ isolate(), receiver(), function(), *abstract_code, offset,
+ IsConstructor(), *params);
functions->push_back(summary);
}
@@ -1813,10 +1824,11 @@ void InterpretedFrame::WriteInterpreterRegister(int register_index,
void InterpretedFrame::Summarize(std::vector<FrameSummary>* functions) const {
DCHECK(functions->empty());
- AbstractCode abstract_code = AbstractCode::cast(GetBytecodeArray());
+ Handle<AbstractCode> abstract_code(AbstractCode::cast(GetBytecodeArray()),
+ isolate());
Handle<FixedArray> params = GetParameters();
FrameSummary::JavaScriptFrameSummary summary(
- isolate(), receiver(), function(), abstract_code, GetBytecodeOffset(),
+ isolate(), receiver(), function(), *abstract_code, GetBytecodeOffset(),
IsConstructor(), *params);
functions->push_back(summary);
}
@@ -2258,5 +2270,161 @@ InnerPointerToCodeCache::GetCacheEntry(Address inner_pointer) {
}
return entry;
}
+
+// Frame layout helper class implementation.
+// -------------------------------------------------------------------------
+
+namespace {
+
+int ArgumentPaddingSlots(int arg_count) {
+ return ShouldPadArguments(arg_count) ? 1 : 0;
+}
+
+// Some architectures need to push padding together with the TOS register
+// in order to maintain stack alignment.
+constexpr int TopOfStackRegisterPaddingSlots() { return kPadArguments ? 1 : 0; }
+
+bool BuiltinContinuationModeIsWithCatch(BuiltinContinuationMode mode) {
+ switch (mode) {
+ case BuiltinContinuationMode::STUB:
+ case BuiltinContinuationMode::JAVASCRIPT:
+ return false;
+ case BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH:
+ case BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION:
+ return true;
+ }
+ UNREACHABLE();
+}
+
+} // namespace
+
+InterpretedFrameInfo::InterpretedFrameInfo(int parameters_count_with_receiver,
+ int translation_height,
+ bool is_topmost,
+ FrameInfoKind frame_info_kind) {
+ const int locals_count = translation_height;
+
+ register_stack_slot_count_ =
+ InterpreterFrameConstants::RegisterStackSlotCount(locals_count);
+
+ static constexpr int kTheAccumulator = 1;
+ static constexpr int kTopOfStackPadding = TopOfStackRegisterPaddingSlots();
+ int maybe_additional_slots =
+ (is_topmost || frame_info_kind == FrameInfoKind::kConservative)
+ ? (kTheAccumulator + kTopOfStackPadding)
+ : 0;
+ frame_size_in_bytes_without_fixed_ =
+ (register_stack_slot_count_ + maybe_additional_slots) *
+ kSystemPointerSize;
+
+ // The 'fixed' part of the frame consists of the incoming parameters and
+ // the part described by InterpreterFrameConstants. This will include
+ // argument padding, when needed.
+ const int parameter_padding_slots =
+ ArgumentPaddingSlots(parameters_count_with_receiver);
+ const int fixed_frame_size =
+ InterpreterFrameConstants::kFixedFrameSize +
+ (parameters_count_with_receiver + parameter_padding_slots) *
+ kSystemPointerSize;
+ frame_size_in_bytes_ = frame_size_in_bytes_without_fixed_ + fixed_frame_size;
+}
+
+ArgumentsAdaptorFrameInfo::ArgumentsAdaptorFrameInfo(int translation_height) {
+ // Note: This is according to the Translation's notion of 'parameters' which
+ // differs to that of the SharedFunctionInfo, e.g. by including the receiver.
+ const int parameters_count = translation_height;
+ frame_size_in_bytes_without_fixed_ =
+ (parameters_count + ArgumentPaddingSlots(parameters_count)) *
+ kSystemPointerSize;
+ frame_size_in_bytes_ = frame_size_in_bytes_without_fixed_ +
+ ArgumentsAdaptorFrameConstants::kFixedFrameSize;
+}
+
+ConstructStubFrameInfo::ConstructStubFrameInfo(int translation_height,
+ bool is_topmost,
+ FrameInfoKind frame_info_kind) {
+ // Note: This is according to the Translation's notion of 'parameters' which
+ // differs to that of the SharedFunctionInfo, e.g. by including the receiver.
+ const int parameters_count = translation_height;
+
+ // If the construct frame appears to be topmost we should ensure that the
+ // value of result register is preserved during continuation execution.
+ // We do this here by "pushing" the result of the constructor function to
+ // the top of the reconstructed stack and popping it in
+ // {Builtins::kNotifyDeoptimized}.
+
+ static constexpr int kTopOfStackPadding = TopOfStackRegisterPaddingSlots();
+ static constexpr int kTheResult = 1;
+ const int argument_padding = ArgumentPaddingSlots(parameters_count);
+
+ const int adjusted_height =
+ (is_topmost || frame_info_kind == FrameInfoKind::kConservative)
+ ? parameters_count + argument_padding + kTheResult +
+ kTopOfStackPadding
+ : parameters_count + argument_padding;
+ frame_size_in_bytes_without_fixed_ = adjusted_height * kSystemPointerSize;
+ frame_size_in_bytes_ = frame_size_in_bytes_without_fixed_ +
+ ConstructFrameConstants::kFixedFrameSize;
+}
+
+BuiltinContinuationFrameInfo::BuiltinContinuationFrameInfo(
+ int translation_height,
+ const CallInterfaceDescriptor& continuation_descriptor,
+ const RegisterConfiguration* register_config, bool is_topmost,
+ DeoptimizeKind deopt_kind, BuiltinContinuationMode continuation_mode,
+ FrameInfoKind frame_info_kind) {
+ const bool is_conservative = frame_info_kind == FrameInfoKind::kConservative;
+
+ // Note: This is according to the Translation's notion of 'parameters' which
+ // differs to that of the SharedFunctionInfo, e.g. by including the receiver.
+ const int parameters_count = translation_height;
+ frame_has_result_stack_slot_ =
+ !is_topmost || deopt_kind == DeoptimizeKind::kLazy;
+ const int result_slot_count =
+ (frame_has_result_stack_slot_ || is_conservative) ? 1 : 0;
+
+ const int exception_slot_count =
+ (BuiltinContinuationModeIsWithCatch(continuation_mode) || is_conservative)
+ ? 1
+ : 0;
+
+ const int allocatable_register_count =
+ register_config->num_allocatable_general_registers();
+ const int padding_slot_count =
+ BuiltinContinuationFrameConstants::PaddingSlotCount(
+ allocatable_register_count);
+
+ const int register_parameter_count =
+ continuation_descriptor.GetRegisterParameterCount();
+ translated_stack_parameter_count_ =
+ parameters_count - register_parameter_count;
+ stack_parameter_count_ = translated_stack_parameter_count_ +
+ result_slot_count + exception_slot_count;
+ const int stack_param_pad_count =
+ ArgumentPaddingSlots(stack_parameter_count_);
+
+ // If the builtins frame appears to be topmost we should ensure that the
+ // value of result register is preserved during continuation execution.
+ // We do this here by "pushing" the result of callback function to the
+ // top of the reconstructed stack and popping it in
+ // {Builtins::kNotifyDeoptimized}.
+ static constexpr int kTopOfStackPadding = TopOfStackRegisterPaddingSlots();
+ static constexpr int kTheResult = 1;
+ const int push_result_count =
+ (is_topmost || is_conservative) ? kTheResult + kTopOfStackPadding : 0;
+
+ frame_size_in_bytes_ =
+ kSystemPointerSize * (stack_parameter_count_ + stack_param_pad_count +
+ allocatable_register_count + padding_slot_count +
+ push_result_count) +
+ BuiltinContinuationFrameConstants::kFixedFrameSize;
+
+ frame_size_in_bytes_above_fp_ =
+ kSystemPointerSize * (allocatable_register_count + padding_slot_count +
+ push_result_count) +
+ (BuiltinContinuationFrameConstants::kFixedFrameSize -
+ BuiltinContinuationFrameConstants::kFixedFrameSizeAboveFp);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/frames.h b/deps/v8/src/execution/frames.h
index 1f83984f97..d1e7a7890d 100644
--- a/deps/v8/src/execution/frames.h
+++ b/deps/v8/src/execution/frames.h
@@ -1285,6 +1285,7 @@ class SafeStackFrameIterator : public StackFrameIteratorBase {
void Advance();
StackFrame::Type top_frame_type() const { return top_frame_type_; }
+ Address top_context_address() const { return top_context_address_; }
private:
void AdvanceOneFrame();
@@ -1308,9 +1309,178 @@ class SafeStackFrameIterator : public StackFrameIteratorBase {
const Address low_bound_;
const Address high_bound_;
StackFrame::Type top_frame_type_;
+ Address top_context_address_;
ExternalCallbackScope* external_callback_scope_;
Address top_link_register_;
};
+
+// Frame layout helper classes. Used by the deoptimizer and instruction
+// selector.
+// -------------------------------------------------------------------------
+
+// How to calculate the frame layout information. Precise, when all information
+// is available during deoptimization. Conservative, when an overapproximation
+// is fine.
+// TODO(jgruber): Investigate whether the conservative kind can be removed. It
+// seems possible: 1. is_topmost should be known through the outer_state chain
+// of FrameStateDescriptor; 2. the deopt_kind may be a property of the bailout
+// id; 3. for continuation_mode, we only care whether it is a mode with catch,
+// and that is likewise known at compile-time.
+// There is nothing specific blocking this, the investigation just requires time
+// and it is not that important to get the exact frame height at compile-time.
+enum class FrameInfoKind {
+ kPrecise,
+ kConservative,
+};
+
+// Used by the deoptimizer. Corresponds to frame kinds:
+enum class BuiltinContinuationMode {
+ STUB, // BuiltinContinuationFrame
+ JAVASCRIPT, // JavaScriptBuiltinContinuationFrame
+ JAVASCRIPT_WITH_CATCH, // JavaScriptBuiltinContinuationWithCatchFrame
+ JAVASCRIPT_HANDLE_EXCEPTION // JavaScriptBuiltinContinuationWithCatchFrame
+};
+
+class InterpretedFrameInfo {
+ public:
+ static InterpretedFrameInfo Precise(int parameters_count_with_receiver,
+ int translation_height, bool is_topmost) {
+ return {parameters_count_with_receiver, translation_height, is_topmost,
+ FrameInfoKind::kPrecise};
+ }
+
+ static InterpretedFrameInfo Conservative(int parameters_count_with_receiver,
+ int locals_count) {
+ return {parameters_count_with_receiver, locals_count, false,
+ FrameInfoKind::kConservative};
+ }
+
+ uint32_t register_stack_slot_count() const {
+ return register_stack_slot_count_;
+ }
+ uint32_t frame_size_in_bytes_without_fixed() const {
+ return frame_size_in_bytes_without_fixed_;
+ }
+ uint32_t frame_size_in_bytes() const { return frame_size_in_bytes_; }
+
+ private:
+ InterpretedFrameInfo(int parameters_count_with_receiver,
+ int translation_height, bool is_topmost,
+ FrameInfoKind frame_info_kind);
+
+ uint32_t register_stack_slot_count_;
+ uint32_t frame_size_in_bytes_without_fixed_;
+ uint32_t frame_size_in_bytes_;
+};
+
+class ArgumentsAdaptorFrameInfo {
+ public:
+ static ArgumentsAdaptorFrameInfo Precise(int translation_height) {
+ return ArgumentsAdaptorFrameInfo{translation_height};
+ }
+
+ static ArgumentsAdaptorFrameInfo Conservative(int parameters_count) {
+ return ArgumentsAdaptorFrameInfo{parameters_count};
+ }
+
+ uint32_t frame_size_in_bytes_without_fixed() const {
+ return frame_size_in_bytes_without_fixed_;
+ }
+ uint32_t frame_size_in_bytes() const { return frame_size_in_bytes_; }
+
+ private:
+ explicit ArgumentsAdaptorFrameInfo(int translation_height);
+
+ uint32_t frame_size_in_bytes_without_fixed_;
+ uint32_t frame_size_in_bytes_;
+};
+
+class ConstructStubFrameInfo {
+ public:
+ static ConstructStubFrameInfo Precise(int translation_height,
+ bool is_topmost) {
+ return {translation_height, is_topmost, FrameInfoKind::kPrecise};
+ }
+
+ static ConstructStubFrameInfo Conservative(int parameters_count) {
+ return {parameters_count, false, FrameInfoKind::kConservative};
+ }
+
+ uint32_t frame_size_in_bytes_without_fixed() const {
+ return frame_size_in_bytes_without_fixed_;
+ }
+ uint32_t frame_size_in_bytes() const { return frame_size_in_bytes_; }
+
+ private:
+ ConstructStubFrameInfo(int translation_height, bool is_topmost,
+ FrameInfoKind frame_info_kind);
+
+ uint32_t frame_size_in_bytes_without_fixed_;
+ uint32_t frame_size_in_bytes_;
+};
+
+// Used by BuiltinContinuationFrameInfo.
+class CallInterfaceDescriptor;
+class RegisterConfiguration;
+
+class BuiltinContinuationFrameInfo {
+ public:
+ static BuiltinContinuationFrameInfo Precise(
+ int translation_height,
+ const CallInterfaceDescriptor& continuation_descriptor,
+ const RegisterConfiguration* register_config, bool is_topmost,
+ DeoptimizeKind deopt_kind, BuiltinContinuationMode continuation_mode) {
+ return {translation_height,
+ continuation_descriptor,
+ register_config,
+ is_topmost,
+ deopt_kind,
+ continuation_mode,
+ FrameInfoKind::kPrecise};
+ }
+
+ static BuiltinContinuationFrameInfo Conservative(
+ int parameters_count,
+ const CallInterfaceDescriptor& continuation_descriptor,
+ const RegisterConfiguration* register_config) {
+ // It doesn't matter what we pass as is_topmost, deopt_kind and
+ // continuation_mode; these values are ignored in conservative mode.
+ return {parameters_count,
+ continuation_descriptor,
+ register_config,
+ false,
+ DeoptimizeKind::kEager,
+ BuiltinContinuationMode::STUB,
+ FrameInfoKind::kConservative};
+ }
+
+ bool frame_has_result_stack_slot() const {
+ return frame_has_result_stack_slot_;
+ }
+ uint32_t translated_stack_parameter_count() const {
+ return translated_stack_parameter_count_;
+ }
+ uint32_t stack_parameter_count() const { return stack_parameter_count_; }
+ uint32_t frame_size_in_bytes() const { return frame_size_in_bytes_; }
+ uint32_t frame_size_in_bytes_above_fp() const {
+ return frame_size_in_bytes_above_fp_;
+ }
+
+ private:
+ BuiltinContinuationFrameInfo(
+ int translation_height,
+ const CallInterfaceDescriptor& continuation_descriptor,
+ const RegisterConfiguration* register_config, bool is_topmost,
+ DeoptimizeKind deopt_kind, BuiltinContinuationMode continuation_mode,
+ FrameInfoKind frame_info_kind);
+
+ bool frame_has_result_stack_slot_;
+ uint32_t translated_stack_parameter_count_;
+ uint32_t stack_parameter_count_;
+ uint32_t frame_size_in_bytes_;
+ uint32_t frame_size_in_bytes_above_fp_;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/futex-emulation.cc b/deps/v8/src/execution/futex-emulation.cc
index 7482807921..8c3b54c2a7 100644
--- a/deps/v8/src/execution/futex-emulation.cc
+++ b/deps/v8/src/execution/futex-emulation.cc
@@ -11,6 +11,7 @@
#include "src/execution/isolate.h"
#include "src/handles/handles-inl.h"
#include "src/numbers/conversions.h"
+#include "src/objects/bigint.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/objects-inl.h"
@@ -80,10 +81,9 @@ void AtomicsWaitWakeHandle::Wake() {
enum WaitReturnValue : int { kOk = 0, kNotEqual = 1, kTimedOut = 2 };
-Object FutexEmulation::WaitJs(Isolate* isolate,
- Handle<JSArrayBuffer> array_buffer, size_t addr,
- int32_t value, double rel_timeout_ms) {
- Object res = Wait32(isolate, array_buffer, addr, value, rel_timeout_ms);
+namespace {
+
+Object WaitJsTranslateReturn(Isolate* isolate, Object res) {
if (res.IsSmi()) {
int val = Smi::ToInt(res);
switch (val) {
@@ -100,6 +100,22 @@ Object FutexEmulation::WaitJs(Isolate* isolate,
return res;
}
+} // namespace
+
+Object FutexEmulation::WaitJs32(Isolate* isolate,
+ Handle<JSArrayBuffer> array_buffer, size_t addr,
+ int32_t value, double rel_timeout_ms) {
+ Object res = Wait32(isolate, array_buffer, addr, value, rel_timeout_ms);
+ return WaitJsTranslateReturn(isolate, res);
+}
+
+Object FutexEmulation::WaitJs64(Isolate* isolate,
+ Handle<JSArrayBuffer> array_buffer, size_t addr,
+ int64_t value, double rel_timeout_ms) {
+ Object res = Wait64(isolate, array_buffer, addr, value, rel_timeout_ms);
+ return WaitJsTranslateReturn(isolate, res);
+}
+
Object FutexEmulation::Wait32(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer, size_t addr,
int32_t value, double rel_timeout_ms) {
diff --git a/deps/v8/src/execution/futex-emulation.h b/deps/v8/src/execution/futex-emulation.h
index c6fee5c3f7..052b3c9c17 100644
--- a/deps/v8/src/execution/futex-emulation.h
+++ b/deps/v8/src/execution/futex-emulation.h
@@ -117,8 +117,12 @@ class FutexEmulation : public AllStatic {
// |rel_timeout_ms| can be Infinity.
// If woken, return "ok", otherwise return "timed-out". The initial check and
// the decision to wait happen atomically.
- static Object WaitJs(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
- size_t addr, int32_t value, double rel_timeout_ms);
+ static Object WaitJs32(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
+ size_t addr, int32_t value, double rel_timeout_ms);
+
+ // An version of WaitJs32 for int64_t values.
+ static Object WaitJs64(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
+ size_t addr, int64_t value, double rel_timeout_ms);
// Same as WaitJs above except it returns 0 (ok), 1 (not equal) and 2 (timed
// out) as expected by Wasm.
diff --git a/deps/v8/src/execution/interrupts-scope.cc b/deps/v8/src/execution/interrupts-scope.cc
index cf8611f8d6..7bf9821685 100644
--- a/deps/v8/src/execution/interrupts-scope.cc
+++ b/deps/v8/src/execution/interrupts-scope.cc
@@ -9,7 +9,7 @@
namespace v8 {
namespace internal {
-InterruptsScope::InterruptsScope(Isolate* isolate, int intercept_mask,
+InterruptsScope::InterruptsScope(Isolate* isolate, intptr_t intercept_mask,
Mode mode)
: stack_guard_(isolate->stack_guard()),
intercept_mask_(intercept_mask),
diff --git a/deps/v8/src/execution/interrupts-scope.h b/deps/v8/src/execution/interrupts-scope.h
index 3d74850a84..6419ee2d99 100644
--- a/deps/v8/src/execution/interrupts-scope.h
+++ b/deps/v8/src/execution/interrupts-scope.h
@@ -18,7 +18,7 @@ class InterruptsScope {
public:
enum Mode { kPostponeInterrupts, kRunInterrupts, kNoop };
- V8_EXPORT_PRIVATE InterruptsScope(Isolate* isolate, int intercept_mask,
+ V8_EXPORT_PRIVATE InterruptsScope(Isolate* isolate, intptr_t intercept_mask,
Mode mode);
virtual ~InterruptsScope() {
@@ -33,8 +33,8 @@ class InterruptsScope {
private:
StackGuard* stack_guard_;
- int intercept_mask_;
- int intercepted_flags_;
+ intptr_t intercept_mask_;
+ intptr_t intercepted_flags_;
Mode mode_;
InterruptsScope* prev_;
diff --git a/deps/v8/src/execution/isolate-data.h b/deps/v8/src/execution/isolate-data.h
index adeb7f54d3..6eb23db2a2 100644
--- a/deps/v8/src/execution/isolate-data.h
+++ b/deps/v8/src/execution/isolate-data.h
@@ -8,6 +8,7 @@
#include "src/builtins/builtins.h"
#include "src/codegen/constants-arch.h"
#include "src/codegen/external-reference-table.h"
+#include "src/execution/stack-guard.h"
#include "src/execution/thread-local-top.h"
#include "src/roots/roots.h"
#include "src/utils/utils.h"
@@ -27,7 +28,7 @@ class Isolate;
// register.
class IsolateData final {
public:
- IsolateData() = default;
+ explicit IsolateData(Isolate* isolate) : stack_guard_(isolate) {}
static constexpr intptr_t kIsolateRootBias = kRootRegisterBias;
@@ -81,6 +82,7 @@ class IsolateData final {
// The FP and PC that are saved right before TurboAssembler::CallCFunction.
Address* fast_c_call_caller_fp_address() { return &fast_c_call_caller_fp_; }
Address* fast_c_call_caller_pc_address() { return &fast_c_call_caller_pc_; }
+ StackGuard* stack_guard() { return &stack_guard_; }
uint8_t* stack_is_iterable_address() { return &stack_is_iterable_; }
Address fast_c_call_caller_fp() { return fast_c_call_caller_fp_; }
Address fast_c_call_caller_pc() { return fast_c_call_caller_pc_; }
@@ -109,20 +111,27 @@ class IsolateData final {
Address* builtins() { return builtins_; }
private:
-// Static layout definition.
+ // Static layout definition.
+ //
+ // Note: The location of fields within IsolateData is significant. The
+ // closer they are to the value of kRootRegister (i.e.: isolate_root()), the
+ // cheaper it is to access them. See also: https://crbug.com/993264.
+ // The recommend guideline is to put frequently-accessed fields close to the
+ // beginning of IsolateData.
#define FIELDS(V) \
V(kEmbedderDataOffset, Internals::kNumIsolateDataSlots* kSystemPointerSize) \
V(kExternalMemoryOffset, kInt64Size) \
V(kExternalMemoryLlimitOffset, kInt64Size) \
V(kExternalMemoryAtLastMarkCompactOffset, kInt64Size) \
+ V(kFastCCallCallerFPOffset, kSystemPointerSize) \
+ V(kFastCCallCallerPCOffset, kSystemPointerSize) \
+ V(kStackGuardOffset, StackGuard::kSizeInBytes) \
V(kRootsTableOffset, RootsTable::kEntriesCount* kSystemPointerSize) \
V(kExternalReferenceTableOffset, ExternalReferenceTable::kSizeInBytes) \
V(kThreadLocalTopOffset, ThreadLocalTop::kSizeInBytes) \
V(kBuiltinEntryTableOffset, Builtins::builtin_count* kSystemPointerSize) \
V(kBuiltinsTableOffset, Builtins::builtin_count* kSystemPointerSize) \
V(kVirtualCallTargetRegisterOffset, kSystemPointerSize) \
- V(kFastCCallCallerFPOffset, kSystemPointerSize) \
- V(kFastCCallCallerPCOffset, kSystemPointerSize) \
V(kStackIsIterableOffset, kUInt8Size) \
/* This padding aligns IsolateData size by 8 bytes. */ \
V(kPaddingOffset, \
@@ -150,6 +159,17 @@ class IsolateData final {
// Caches the amount of external memory registered at the last MC.
int64_t external_memory_at_last_mark_compact_ = 0;
+ // Stores the state of the caller for TurboAssembler::CallCFunction so that
+ // the sampling CPU profiler can iterate the stack during such calls. These
+ // are stored on IsolateData so that they can be stored to with only one move
+ // instruction in compiled code.
+ Address fast_c_call_caller_fp_ = kNullAddress;
+ Address fast_c_call_caller_pc_ = kNullAddress;
+
+ // Fields related to the system and JS stack. In particular, this contains the
+ // stack limit used by stack checks in generated code.
+ StackGuard stack_guard_;
+
RootsTable roots_;
ExternalReferenceTable external_reference_table_;
@@ -169,12 +189,6 @@ class IsolateData final {
// ia32 (otherwise the arguments adaptor call runs out of registers).
void* virtual_call_target_register_ = nullptr;
- // Stores the state of the caller for TurboAssembler::CallCFunction so that
- // the sampling CPU profiler can iterate the stack during such calls. These
- // are stored on IsolateData so that they can be stored to with only one move
- // instruction in compiled code.
- Address fast_c_call_caller_fp_ = kNullAddress;
- Address fast_c_call_caller_pc_ = kNullAddress;
// Whether the SafeStackFrameIterator can successfully iterate the current
// stack. Only valid values are 0 or 1.
uint8_t stack_is_iterable_ = 1;
@@ -225,6 +239,7 @@ void IsolateData::AssertPredictableLayout() {
kFastCCallCallerFPOffset);
STATIC_ASSERT(offsetof(IsolateData, fast_c_call_caller_pc_) ==
kFastCCallCallerPCOffset);
+ STATIC_ASSERT(offsetof(IsolateData, stack_guard_) == kStackGuardOffset);
STATIC_ASSERT(offsetof(IsolateData, stack_is_iterable_) ==
kStackIsIterableOffset);
STATIC_ASSERT(sizeof(IsolateData) == IsolateData::kSize);
diff --git a/deps/v8/src/execution/isolate-inl.h b/deps/v8/src/execution/isolate-inl.h
index 7e037fb410..e1b021b921 100644
--- a/deps/v8/src/execution/isolate-inl.h
+++ b/deps/v8/src/execution/isolate-inl.h
@@ -119,25 +119,6 @@ bool Isolate::IsArrayConstructorIntact() {
return array_constructor_cell.value() == Smi::FromInt(kProtectorValid);
}
-bool Isolate::IsArraySpeciesLookupChainIntact() {
- // Note: It would be nice to have debug checks to make sure that the
- // species protector is accurate, but this would be hard to do for most of
- // what the protector stands for:
- // - You'd need to traverse the heap to check that no Array instance has
- // a constructor property
- // - To check that Array[Symbol.species] == Array, JS code has to execute,
- // but JS cannot be invoked in callstack overflow situations
- // All that could be checked reliably is that
- // Array.prototype.constructor == Array. Given that limitation, no check is
- // done here. In place, there are mjsunit tests harmony/array-species* which
- // ensure that behavior is correct in various invalid protector cases.
-
- PropertyCell species_cell =
- PropertyCell::cast(root(RootIndex::kArraySpeciesProtector));
- return species_cell.value().IsSmi() &&
- Smi::ToInt(species_cell.value()) == kProtectorValid;
-}
-
bool Isolate::IsTypedArraySpeciesLookupChainIntact() {
PropertyCell species_cell =
PropertyCell::cast(root(RootIndex::kTypedArraySpeciesProtector));
@@ -145,14 +126,6 @@ bool Isolate::IsTypedArraySpeciesLookupChainIntact() {
Smi::ToInt(species_cell.value()) == kProtectorValid;
}
-bool Isolate::IsRegExpSpeciesLookupChainIntact(
- Handle<NativeContext> native_context) {
- DCHECK_EQ(*native_context, this->raw_native_context());
- PropertyCell species_cell = native_context->regexp_species_protector();
- return species_cell.value().IsSmi() &&
- Smi::ToInt(species_cell.value()) == kProtectorValid;
-}
-
bool Isolate::IsPromiseSpeciesLookupChainIntact() {
PropertyCell species_cell =
PropertyCell::cast(root(RootIndex::kPromiseSpeciesProtector));
diff --git a/deps/v8/src/execution/isolate.cc b/deps/v8/src/execution/isolate.cc
index 2b3551cdfb..d090ed5260 100644
--- a/deps/v8/src/execution/isolate.cc
+++ b/deps/v8/src/execution/isolate.cc
@@ -56,6 +56,7 @@
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-generator-inl.h"
+#include "src/objects/js-weak-refs-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/promise-inl.h"
#include "src/objects/prototype.h"
@@ -85,9 +86,9 @@
#include "unicode/uobject.h"
#endif // V8_INTL_SUPPORT
-#if defined(V8_OS_WIN_X64)
+#if defined(V8_OS_WIN64)
#include "src/diagnostics/unwinding-info-win64.h"
-#endif
+#endif // V8_OS_WIN64
extern "C" const uint8_t* v8_Default_embedded_blob_;
extern "C" uint32_t v8_Default_embedded_blob_size_;
@@ -2022,7 +2023,7 @@ void Isolate::CancelScheduledExceptionFromTryCatch(v8::TryCatch* handler) {
DCHECK_EQ(scheduled_exception(),
ReadOnlyRoots(heap()).termination_exception());
// Clear termination once we returned from all V8 frames.
- if (handle_scope_implementer()->CallDepthIsZero()) {
+ if (thread_local_top()->CallDepthIsZero()) {
thread_local_top()->external_caught_exception_ = false;
clear_scheduled_exception();
}
@@ -2648,21 +2649,12 @@ Handle<Context> Isolate::GetIncumbentContext() {
char* Isolate::ArchiveThread(char* to) {
MemCopy(to, reinterpret_cast<char*>(thread_local_top()),
sizeof(ThreadLocalTop));
- InitializeThreadLocal();
- clear_pending_exception();
- clear_pending_message();
- clear_scheduled_exception();
return to + sizeof(ThreadLocalTop);
}
char* Isolate::RestoreThread(char* from) {
MemCopy(reinterpret_cast<char*>(thread_local_top()), from,
sizeof(ThreadLocalTop));
-// This might be just paranoia, but it seems to be needed in case a
-// thread_local_top_ is restored on a separate OS thread.
-#ifdef USE_SIMULATOR
- thread_local_top()->simulator_ = Simulator::current(this);
-#endif
DCHECK(context().is_null() || context().IsContext());
return from + sizeof(ThreadLocalTop);
}
@@ -2884,9 +2876,9 @@ v8::PageAllocator* Isolate::page_allocator() {
}
Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator)
- : isolate_allocator_(std::move(isolate_allocator)),
+ : isolate_data_(this),
+ isolate_allocator_(std::move(isolate_allocator)),
id_(isolate_counter.fetch_add(1, std::memory_order_relaxed)),
- stack_guard_(this),
allocator_(FLAG_trace_zone_stats
? new VerboseAccountingAllocator(&heap_, 256 * KB)
: new AccountingAllocator()),
@@ -2925,6 +2917,14 @@ void Isolate::CheckIsolateLayout() {
CHECK_EQ(OFFSET_OF(Isolate, isolate_data_), 0);
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, isolate_data_.embedder_data_)),
Internals::kIsolateEmbedderDataOffset);
+ CHECK_EQ(static_cast<int>(
+ OFFSET_OF(Isolate, isolate_data_.fast_c_call_caller_fp_)),
+ Internals::kIsolateFastCCallCallerFpOffset);
+ CHECK_EQ(static_cast<int>(
+ OFFSET_OF(Isolate, isolate_data_.fast_c_call_caller_pc_)),
+ Internals::kIsolateFastCCallCallerPcOffset);
+ CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, isolate_data_.stack_guard_)),
+ Internals::kIsolateStackGuardOffset);
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, isolate_data_.roots_)),
Internals::kIsolateRootsOffset);
CHECK_EQ(Internals::kExternalMemoryOffset % 8, 0);
@@ -2961,7 +2961,7 @@ void Isolate::Deinit() {
heap_profiler()->StopSamplingHeapProfiler();
}
-#if defined(V8_OS_WIN_X64)
+#if defined(V8_OS_WIN64)
if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
heap()->memory_allocator()) {
const base::AddressRegion& code_range =
@@ -2969,7 +2969,7 @@ void Isolate::Deinit() {
void* start = reinterpret_cast<void*>(code_range.begin());
win64_unwindinfo::UnregisterNonABICompliantCodeRange(start);
}
-#endif
+#endif // V8_OS_WIN64
debug()->Unload();
@@ -3139,7 +3139,12 @@ Isolate::~Isolate() {
default_microtask_queue_ = nullptr;
}
-void Isolate::InitializeThreadLocal() { thread_local_top()->Initialize(this); }
+void Isolate::InitializeThreadLocal() {
+ thread_local_top()->Initialize(this);
+ clear_pending_exception();
+ clear_pending_message();
+ clear_scheduled_exception();
+}
void Isolate::SetTerminationOnExternalTryCatch() {
if (try_catch_handler() == nullptr) return;
@@ -3308,19 +3313,31 @@ bool Isolate::InitWithSnapshot(ReadOnlyDeserializer* read_only_deserializer,
return Init(read_only_deserializer, startup_deserializer);
}
-static void AddCrashKeysForIsolateAndHeapPointers(Isolate* isolate) {
- v8::Platform* platform = V8::GetCurrentPlatform();
+static std::string AddressToString(uintptr_t address) {
+ std::stringstream stream_address;
+ stream_address << "0x" << std::hex << address;
+ return stream_address.str();
+}
+
+void Isolate::AddCrashKeysForIsolateAndHeapPointers() {
+ DCHECK_NOT_NULL(add_crash_key_callback_);
- const int id = isolate->id();
- platform->AddCrashKey(id, "isolate", reinterpret_cast<uintptr_t>(isolate));
+ const uintptr_t isolate_address = reinterpret_cast<uintptr_t>(this);
+ add_crash_key_callback_(v8::CrashKeyId::kIsolateAddress,
+ AddressToString(isolate_address));
- auto heap = isolate->heap();
- platform->AddCrashKey(id, "ro_space",
- reinterpret_cast<uintptr_t>(heap->read_only_space()->first_page()));
- platform->AddCrashKey(id, "map_space",
- reinterpret_cast<uintptr_t>(heap->map_space()->first_page()));
- platform->AddCrashKey(id, "code_space",
- reinterpret_cast<uintptr_t>(heap->code_space()->first_page()));
+ const uintptr_t ro_space_firstpage_address =
+ reinterpret_cast<uintptr_t>(heap()->read_only_space()->first_page());
+ add_crash_key_callback_(v8::CrashKeyId::kReadonlySpaceFirstPageAddress,
+ AddressToString(ro_space_firstpage_address));
+ const uintptr_t map_space_firstpage_address =
+ reinterpret_cast<uintptr_t>(heap()->map_space()->first_page());
+ add_crash_key_callback_(v8::CrashKeyId::kMapSpaceFirstPageAddress,
+ AddressToString(map_space_firstpage_address));
+ const uintptr_t code_space_firstpage_address =
+ reinterpret_cast<uintptr_t>(heap()->code_space()->first_page());
+ add_crash_key_callback_(v8::CrashKeyId::kCodeSpaceFirstPageAddress,
+ AddressToString(code_space_firstpage_address));
}
bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
@@ -3343,9 +3360,6 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
// The initialization process does not handle memory exhaustion.
AlwaysAllocateScope always_allocate(this);
- // Safe after setting Heap::isolate_, and initializing StackGuard
- heap_.SetStackLimits();
-
#define ASSIGN_ELEMENT(CamelName, hacker_name) \
isolate_addresses_[IsolateAddressId::k##CamelName##Address] = \
reinterpret_cast<Address>(hacker_name##_address());
@@ -3379,7 +3393,7 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
// will ensure this too, but we don't have to use lockers if we are only
// using one thread.
ExecutionAccess lock(this);
- stack_guard_.InitThread(lock);
+ stack_guard()->InitThread(lock);
}
// SetUp the object heap.
@@ -3524,10 +3538,6 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
clear_pending_message();
clear_scheduled_exception();
- // Deserializing may put strange things in the root array's copy of the
- // stack guard.
- heap_.SetStackLimits();
-
// Quiet the heap NaN if needed on target platform.
if (!create_heap_objects)
Assembler::QuietNaN(ReadOnlyRoots(this).nan_value());
@@ -3553,7 +3563,7 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
sampling_flags);
}
-#if defined(V8_OS_WIN_X64)
+#if defined(V8_OS_WIN64)
if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
const base::AddressRegion& code_range =
heap()->memory_allocator()->code_range();
@@ -3561,14 +3571,13 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
size_t size_in_bytes = code_range.size();
win64_unwindinfo::RegisterNonABICompliantCodeRange(start, size_in_bytes);
}
-#endif
+#endif // V8_OS_WIN64
if (create_heap_objects && FLAG_profile_deserialization) {
double ms = timer.Elapsed().InMillisecondsF();
PrintF("[Initializing isolate from scratch took %0.3f ms]\n", ms);
}
- AddCrashKeysForIsolateAndHeapPointers(this);
return true;
}
@@ -3990,15 +3999,6 @@ void Isolate::InvalidateArrayConstructorProtector() {
DCHECK(!IsArrayConstructorIntact());
}
-void Isolate::InvalidateArraySpeciesProtector() {
- DCHECK(factory()->array_species_protector()->value().IsSmi());
- DCHECK(IsArraySpeciesLookupChainIntact());
- PropertyCell::SetValueWithInvalidation(
- this, "array_species_protector", factory()->array_species_protector(),
- handle(Smi::FromInt(kProtectorInvalid), this));
- DCHECK(!IsArraySpeciesLookupChainIntact());
-}
-
void Isolate::InvalidateTypedArraySpeciesProtector() {
DCHECK(factory()->typed_array_species_protector()->value().IsSmi());
DCHECK(IsTypedArraySpeciesLookupChainIntact());
@@ -4009,19 +4009,6 @@ void Isolate::InvalidateTypedArraySpeciesProtector() {
DCHECK(!IsTypedArraySpeciesLookupChainIntact());
}
-void Isolate::InvalidateRegExpSpeciesProtector(
- Handle<NativeContext> native_context) {
- DCHECK_EQ(*native_context, this->raw_native_context());
- DCHECK(native_context->regexp_species_protector().value().IsSmi());
- DCHECK(IsRegExpSpeciesLookupChainIntact(native_context));
- Handle<PropertyCell> species_cell(native_context->regexp_species_protector(),
- this);
- PropertyCell::SetValueWithInvalidation(
- this, "regexp_species_protector", species_cell,
- handle(Smi::FromInt(kProtectorInvalid), this));
- DCHECK(!IsRegExpSpeciesLookupChainIntact(native_context));
-}
-
void Isolate::InvalidatePromiseSpeciesProtector() {
DCHECK(factory()->promise_species_protector()->value().IsSmi());
DCHECK(IsPromiseSpeciesLookupChainIntact());
@@ -4189,7 +4176,7 @@ Handle<Symbol> Isolate::SymbolFor(RootIndex dictionary_index,
PropertyDetails::Empty(), &entry);
switch (dictionary_index) {
case RootIndex::kPublicSymbolTable:
- symbol->set_is_public(true);
+ symbol->set_is_in_public_symbol_table(true);
heap()->set_public_symbol_table(*dictionary);
break;
case RootIndex::kApiSymbolTable:
@@ -4237,7 +4224,7 @@ void Isolate::RemoveCallCompletedCallback(CallCompletedCallback callback) {
}
void Isolate::FireCallCompletedCallback(MicrotaskQueue* microtask_queue) {
- if (!handle_scope_implementer()->CallDepthIsZero()) return;
+ if (!thread_local_top()->CallDepthIsZero()) return;
bool run_microtasks =
microtask_queue && microtask_queue->size() &&
@@ -4246,12 +4233,6 @@ void Isolate::FireCallCompletedCallback(MicrotaskQueue* microtask_queue) {
if (run_microtasks) {
microtask_queue->RunMicrotasks(this);
- } else {
- // TODO(marja): (spec) The discussion about when to clear the KeepDuringJob
- // set is still open (whether to clear it after every microtask or once
- // during a microtask checkpoint). See also
- // https://github.com/tc39/proposal-weakrefs/issues/39 .
- heap()->ClearKeptObjects();
}
if (call_completed_callbacks_.empty()) return;
@@ -4330,6 +4311,23 @@ MaybeHandle<JSPromise> Isolate::RunHostImportModuleDynamicallyCallback(
return v8::Utils::OpenHandle(*promise);
}
+void Isolate::ClearKeptObjects() { heap()->ClearKeptObjects(); }
+
+void Isolate::SetHostCleanupFinalizationGroupCallback(
+ HostCleanupFinalizationGroupCallback callback) {
+ host_cleanup_finalization_group_callback_ = callback;
+}
+
+void Isolate::RunHostCleanupFinalizationGroupCallback(
+ Handle<JSFinalizationGroup> fg) {
+ if (host_cleanup_finalization_group_callback_ != nullptr) {
+ v8::Local<v8::Context> api_context =
+ v8::Utils::ToLocal(handle(Context::cast(fg->native_context()), this));
+ host_cleanup_finalization_group_callback_(api_context,
+ v8::Utils::ToLocal(fg));
+ }
+}
+
void Isolate::SetHostImportModuleDynamicallyCallback(
HostImportModuleDynamicallyCallback callback) {
host_import_module_dynamically_callback_ = callback;
@@ -4337,7 +4335,7 @@ void Isolate::SetHostImportModuleDynamicallyCallback(
Handle<JSObject> Isolate::RunHostInitializeImportMetaObjectCallback(
Handle<SourceTextModule> module) {
- Handle<Object> host_meta(module->import_meta(), this);
+ Handle<HeapObject> host_meta(module->import_meta(), this);
if (host_meta->IsTheHole(this)) {
host_meta = factory()->NewJSObjectWithNullProto();
if (host_initialize_import_meta_object_callback_ != nullptr) {
@@ -4399,7 +4397,7 @@ void Isolate::PrepareBuiltinSourcePositionMap() {
}
}
-#if defined(V8_OS_WIN_X64)
+#if defined(V8_OS_WIN64)
void Isolate::SetBuiltinUnwindData(
int builtin_index,
const win64_unwindinfo::BuiltinUnwindInfo& unwinding_info) {
@@ -4407,7 +4405,7 @@ void Isolate::SetBuiltinUnwindData(
embedded_file_writer_->SetBuiltinUnwindData(builtin_index, unwinding_info);
}
}
-#endif
+#endif // V8_OS_WIN64
void Isolate::SetPrepareStackTraceCallback(PrepareStackTraceCallback callback) {
prepare_stack_trace_callback_ = callback;
@@ -4417,6 +4415,13 @@ bool Isolate::HasPrepareStackTraceCallback() const {
return prepare_stack_trace_callback_ != nullptr;
}
+void Isolate::SetAddCrashKeyCallback(AddCrashKeyCallback callback) {
+ add_crash_key_callback_ = callback;
+
+ // Log the initial set of data.
+ AddCrashKeysForIsolateAndHeapPointers();
+}
+
void Isolate::SetAtomicsWaitCallback(v8::Isolate::AtomicsWaitCallback callback,
void* data) {
atomics_wait_callback_ = callback;
@@ -4663,6 +4668,27 @@ void Isolate::SetIdle(bool is_idle) {
}
}
+void Isolate::CollectSourcePositionsForAllBytecodeArrays() {
+ HandleScope scope(this);
+ std::vector<Handle<SharedFunctionInfo>> sfis;
+ {
+ DisallowHeapAllocation no_gc;
+ HeapObjectIterator iterator(heap());
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
+ if (obj.IsSharedFunctionInfo()) {
+ SharedFunctionInfo sfi = SharedFunctionInfo::cast(obj);
+ if (sfi.HasBytecodeArray()) {
+ sfis.push_back(Handle<SharedFunctionInfo>(sfi, this));
+ }
+ }
+ }
+ }
+ for (auto sfi : sfis) {
+ SharedFunctionInfo::EnsureSourcePositionsAvailable(this, sfi);
+ }
+}
+
#ifdef V8_INTL_SUPPORT
icu::UMemory* Isolate::get_cached_icu_object(ICUObjectCacheType cache_type) {
return icu_object_cache_[cache_type].get();
diff --git a/deps/v8/src/execution/isolate.h b/deps/v8/src/execution/isolate.h
index 2ead7bf844..4eadb42438 100644
--- a/deps/v8/src/execution/isolate.h
+++ b/deps/v8/src/execution/isolate.h
@@ -404,6 +404,7 @@ using DebugObjectCache = std::vector<Handle<HeapObject>>;
V(ExtensionCallback, wasm_instance_callback, &NoExtension) \
V(WasmStreamingCallback, wasm_streaming_callback, nullptr) \
V(WasmThreadsEnabledCallback, wasm_threads_enabled_callback, nullptr) \
+ V(WasmLoadSourceMapCallback, wasm_load_source_map_callback, nullptr) \
/* State for Relocatable. */ \
V(Relocatable*, relocatable_top, nullptr) \
V(DebugObjectCache*, string_stream_debug_object_cache, nullptr) \
@@ -902,7 +903,7 @@ class Isolate final : private HiddenFactory {
DCHECK_NOT_NULL(logger_);
return logger_;
}
- StackGuard* stack_guard() { return &stack_guard_; }
+ StackGuard* stack_guard() { return isolate_data()->stack_guard(); }
Heap* heap() { return &heap_; }
ReadOnlyHeap* read_only_heap() const { return read_only_heap_; }
static Isolate* FromHeap(Heap* heap) {
@@ -959,6 +960,7 @@ class Isolate final : private HiddenFactory {
void set_deoptimizer_lazy_throw(bool value) {
deoptimizer_lazy_throw_ = value;
}
+ void InitializeThreadLocal();
ThreadLocalTop* thread_local_top() {
return &isolate_data_.thread_local_top_;
}
@@ -1174,10 +1176,7 @@ class Isolate final : private HiddenFactory {
bool IsArrayOrObjectOrStringPrototype(Object object);
- inline bool IsArraySpeciesLookupChainIntact();
inline bool IsTypedArraySpeciesLookupChainIntact();
- inline bool IsRegExpSpeciesLookupChainIntact(
- Handle<NativeContext> native_context);
// Check that the @@species protector is intact, which guards the lookup of
// "constructor" on JSPromise instances, whose [[Prototype]] is the initial
@@ -1264,7 +1263,6 @@ class Isolate final : private HiddenFactory {
void TraceProtectorInvalidation(const char* protector_name);
void InvalidateArrayConstructorProtector();
- void InvalidateArraySpeciesProtector();
void InvalidateTypedArraySpeciesProtector();
void InvalidateRegExpSpeciesProtector(Handle<NativeContext> native_context);
void InvalidatePromiseSpeciesProtector();
@@ -1473,6 +1471,11 @@ class Isolate final : private HiddenFactory {
bool IsInAnyContext(Object object, uint32_t index);
+ void ClearKeptObjects();
+ void SetHostCleanupFinalizationGroupCallback(
+ HostCleanupFinalizationGroupCallback callback);
+ void RunHostCleanupFinalizationGroupCallback(Handle<JSFinalizationGroup> fg);
+
void SetHostImportModuleDynamicallyCallback(
HostImportModuleDynamicallyCallback callback);
V8_EXPORT_PRIVATE MaybeHandle<JSPromise>
@@ -1497,11 +1500,11 @@ class Isolate final : private HiddenFactory {
// annotate the builtin blob with debugging information.
void PrepareBuiltinSourcePositionMap();
-#if defined(V8_OS_WIN_X64)
+#if defined(V8_OS_WIN64)
void SetBuiltinUnwindData(
int builtin_index,
const win64_unwindinfo::BuiltinUnwindInfo& unwinding_info);
-#endif
+#endif // V8_OS_WIN64
void SetPrepareStackTraceCallback(PrepareStackTraceCallback callback);
MaybeHandle<Object> RunPrepareStackTraceCallback(Handle<Context>,
@@ -1509,6 +1512,8 @@ class Isolate final : private HiddenFactory {
Handle<JSArray> sites);
bool HasPrepareStackTraceCallback() const;
+ void SetAddCrashKeyCallback(AddCrashKeyCallback callback);
+
void SetRAILMode(RAILMode rail_mode);
RAILMode rail_mode() { return rail_mode_.load(); }
@@ -1558,6 +1563,11 @@ class Isolate final : private HiddenFactory {
V8_EXPORT_PRIVATE void SetIdle(bool is_idle);
+ // Changing various modes can cause differences in generated bytecode which
+ // interferes with lazy source positions, so this should be called immediately
+ // before such a mode change to ensure that this cannot happen.
+ V8_EXPORT_PRIVATE void CollectSourcePositionsForAllBytecodeArrays();
+
private:
explicit Isolate(std::unique_ptr<IsolateAllocator> isolate_allocator);
~Isolate();
@@ -1622,8 +1632,6 @@ class Isolate final : private HiddenFactory {
static void SetIsolateThreadLocals(Isolate* isolate,
PerIsolateThreadData* data);
- void InitializeThreadLocal();
-
void MarkCompactPrologue(bool is_compacting,
ThreadLocalTop* archived_thread_data);
void MarkCompactEpilogue(bool is_compacting,
@@ -1653,6 +1661,8 @@ class Isolate final : private HiddenFactory {
return "";
}
+ void AddCrashKeysForIsolateAndHeapPointers();
+
// This class contains a collection of data accessible from both C++ runtime
// and compiled code (including assembly stubs, builtins, interpreter bytecode
// handlers and optimized code).
@@ -1673,7 +1683,6 @@ class Isolate final : private HiddenFactory {
std::shared_ptr<Counters> async_counters_;
base::RecursiveMutex break_access_;
Logger* logger_ = nullptr;
- StackGuard stack_guard_;
StubCache* load_stub_cache_ = nullptr;
StubCache* store_stub_cache_ = nullptr;
DeoptimizerData* deoptimizer_data_ = nullptr;
@@ -1710,6 +1719,8 @@ class Isolate final : private HiddenFactory {
v8::Isolate::AtomicsWaitCallback atomics_wait_callback_ = nullptr;
void* atomics_wait_callback_data_ = nullptr;
PromiseHook promise_hook_ = nullptr;
+ HostCleanupFinalizationGroupCallback
+ host_cleanup_finalization_group_callback_ = nullptr;
HostImportModuleDynamicallyCallback host_import_module_dynamically_callback_ =
nullptr;
HostInitializeImportMetaObjectCallback
@@ -1770,6 +1781,8 @@ class Isolate final : private HiddenFactory {
interpreter::Interpreter* interpreter_ = nullptr;
compiler::PerIsolateCompilerCache* compiler_cache_ = nullptr;
+ // The following zone is for compiler-related objects that should live
+ // through all compilations (and thus all JSHeapBroker instances).
Zone* compiler_zone_ = nullptr;
CompilerDispatcher* compiler_dispatcher_ = nullptr;
@@ -1877,6 +1890,11 @@ class Isolate final : private HiddenFactory {
base::Mutex thread_data_table_mutex_;
ThreadDataTable thread_data_table_;
+ // Enables the host application to provide a mechanism for recording a
+ // predefined set of data as crash keys to be used in postmortem debugging
+ // in case of a crash.
+ AddCrashKeyCallback add_crash_key_callback_ = nullptr;
+
// Delete new/delete operators to ensure that Isolate::New() and
// Isolate::Delete() are used for Isolate creation and deletion.
void* operator new(size_t, void* ptr) { return ptr; }
@@ -1930,6 +1948,14 @@ class V8_EXPORT_PRIVATE SaveAndSwitchContext : public SaveContext {
SaveAndSwitchContext(Isolate* isolate, Context new_context);
};
+// A scope which sets the given isolate's context to null for its lifetime to
+// ensure that code does not make assumptions on a context being available.
+class NullContextScope : public SaveAndSwitchContext {
+ public:
+ explicit NullContextScope(Isolate* isolate)
+ : SaveAndSwitchContext(isolate, Context()) {}
+};
+
class AssertNoContextChange {
#ifdef DEBUG
public:
diff --git a/deps/v8/src/execution/messages.cc b/deps/v8/src/execution/messages.cc
index d216d3bc39..63d1e2be1f 100644
--- a/deps/v8/src/execution/messages.cc
+++ b/deps/v8/src/execution/messages.cc
@@ -7,8 +7,11 @@
#include <memory>
#include "src/api/api-inl.h"
+#include "src/ast/ast.h"
+#include "src/ast/prettyprinter.h"
#include "src/base/v8-fallthrough.h"
#include "src/execution/execution.h"
+#include "src/execution/frames-inl.h"
#include "src/execution/frames.h"
#include "src/execution/isolate-inl.h"
#include "src/logging/counters.h"
@@ -18,6 +21,9 @@
#include "src/objects/keys.h"
#include "src/objects/stack-frame-info-inl.h"
#include "src/objects/struct-inl.h"
+#include "src/parsing/parse-info.h"
+#include "src/parsing/parsing.h"
+#include "src/roots/roots.h"
#include "src/strings/string-builder-inl.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"
@@ -314,6 +320,10 @@ Handle<Object> StackFrameBase::GetWasmModuleName() {
return isolate_->factory()->undefined_value();
}
+Handle<Object> StackFrameBase::GetWasmInstance() {
+ return isolate_->factory()->undefined_value();
+}
+
int StackFrameBase::GetScriptId() const {
if (!HasScript()) return kNone;
return GetScript()->id();
@@ -332,6 +342,7 @@ void JSStackFrame::FromFrameArray(Isolate* isolate, Handle<FrameArray> array,
function_ = handle(array->Function(frame_ix), isolate);
code_ = handle(array->Code(frame_ix), isolate);
offset_ = array->Offset(frame_ix).value();
+ cached_position_ = base::nullopt;
const int flags = array->Flags(frame_ix).value();
is_constructor_ = (flags & FrameArray::kIsConstructor) != 0;
@@ -348,6 +359,7 @@ JSStackFrame::JSStackFrame(Isolate* isolate, Handle<Object> receiver,
function_(function),
code_(code),
offset_(offset),
+ cached_position_(base::nullopt),
is_async_(false),
is_constructor_(false),
is_strict_(false) {}
@@ -512,9 +524,12 @@ bool JSStackFrame::IsToplevel() {
}
int JSStackFrame::GetPosition() const {
+ if (cached_position_) return *cached_position_;
+
Handle<SharedFunctionInfo> shared = handle(function_->shared(), isolate_);
SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate_, shared);
- return code_->SourcePosition(offset_);
+ cached_position_ = code_->SourcePosition(offset_);
+ return *cached_position_;
}
bool JSStackFrame::HasScript() const {
@@ -575,6 +590,8 @@ Handle<Object> WasmStackFrame::GetWasmModuleName() {
return module_name;
}
+Handle<Object> WasmStackFrame::GetWasmInstance() { return wasm_instance_; }
+
int WasmStackFrame::GetPosition() const {
return IsInterpreted()
? offset_
@@ -1155,5 +1172,232 @@ MaybeHandle<Object> ErrorUtils::MakeGenericError(
no_caller, StackTraceCollection::kDetailed);
}
+namespace {
+
+bool ComputeLocation(Isolate* isolate, MessageLocation* target) {
+ JavaScriptFrameIterator it(isolate);
+ if (!it.done()) {
+ // Compute the location from the function and the relocation info of the
+ // baseline code. For optimized code this will use the deoptimization
+ // information to get canonical location information.
+ std::vector<FrameSummary> frames;
+ it.frame()->Summarize(&frames);
+ auto& summary = frames.back().AsJavaScript();
+ Handle<SharedFunctionInfo> shared(summary.function()->shared(), isolate);
+ Handle<Object> script(shared->script(), isolate);
+ SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate, shared);
+ int pos = summary.abstract_code()->SourcePosition(summary.code_offset());
+ if (script->IsScript() &&
+ !(Handle<Script>::cast(script)->source().IsUndefined(isolate))) {
+ Handle<Script> casted_script = Handle<Script>::cast(script);
+ *target = MessageLocation(casted_script, pos, pos + 1, shared);
+ return true;
+ }
+ }
+ return false;
+}
+
+Handle<String> BuildDefaultCallSite(Isolate* isolate, Handle<Object> object) {
+ IncrementalStringBuilder builder(isolate);
+
+ builder.AppendString(Object::TypeOf(isolate, object));
+ if (object->IsString()) {
+ builder.AppendCString(" \"");
+ builder.AppendString(Handle<String>::cast(object));
+ builder.AppendCString("\"");
+ } else if (object->IsNull(isolate)) {
+ builder.AppendCString(" ");
+ builder.AppendString(isolate->factory()->null_string());
+ } else if (object->IsTrue(isolate)) {
+ builder.AppendCString(" ");
+ builder.AppendString(isolate->factory()->true_string());
+ } else if (object->IsFalse(isolate)) {
+ builder.AppendCString(" ");
+ builder.AppendString(isolate->factory()->false_string());
+ } else if (object->IsNumber()) {
+ builder.AppendCString(" ");
+ builder.AppendString(isolate->factory()->NumberToString(object));
+ }
+
+ return builder.Finish().ToHandleChecked();
+}
+
+Handle<String> RenderCallSite(Isolate* isolate, Handle<Object> object,
+ MessageLocation* location,
+ CallPrinter::ErrorHint* hint) {
+ if (ComputeLocation(isolate, location)) {
+ ParseInfo info(isolate, location->shared());
+ if (parsing::ParseAny(&info, location->shared(), isolate)) {
+ info.ast_value_factory()->Internalize(isolate);
+ CallPrinter printer(isolate, location->shared()->IsUserJavaScript());
+ Handle<String> str = printer.Print(info.literal(), location->start_pos());
+ *hint = printer.GetErrorHint();
+ if (str->length() > 0) return str;
+ } else {
+ isolate->clear_pending_exception();
+ }
+ }
+ return BuildDefaultCallSite(isolate, object);
+}
+
+MessageTemplate UpdateErrorTemplate(CallPrinter::ErrorHint hint,
+ MessageTemplate default_id) {
+ switch (hint) {
+ case CallPrinter::ErrorHint::kNormalIterator:
+ return MessageTemplate::kNotIterable;
+
+ case CallPrinter::ErrorHint::kCallAndNormalIterator:
+ return MessageTemplate::kNotCallableOrIterable;
+
+ case CallPrinter::ErrorHint::kAsyncIterator:
+ return MessageTemplate::kNotAsyncIterable;
+
+ case CallPrinter::ErrorHint::kCallAndAsyncIterator:
+ return MessageTemplate::kNotCallableOrAsyncIterable;
+
+ case CallPrinter::ErrorHint::kNone:
+ return default_id;
+ }
+ return default_id;
+}
+
+} // namespace
+
+Handle<Object> ErrorUtils::NewIteratorError(Isolate* isolate,
+ Handle<Object> source) {
+ MessageLocation location;
+ CallPrinter::ErrorHint hint = CallPrinter::kNone;
+ Handle<String> callsite = RenderCallSite(isolate, source, &location, &hint);
+ MessageTemplate id = MessageTemplate::kNotIterableNoSymbolLoad;
+
+ if (hint == CallPrinter::kNone) {
+ Handle<Symbol> iterator_symbol = isolate->factory()->iterator_symbol();
+ return isolate->factory()->NewTypeError(id, callsite, iterator_symbol);
+ }
+
+ id = UpdateErrorTemplate(hint, id);
+ return isolate->factory()->NewTypeError(id, callsite);
+}
+
+Handle<Object> ErrorUtils::NewCalledNonCallableError(Isolate* isolate,
+ Handle<Object> source) {
+ MessageLocation location;
+ CallPrinter::ErrorHint hint = CallPrinter::kNone;
+ Handle<String> callsite = RenderCallSite(isolate, source, &location, &hint);
+ MessageTemplate id = MessageTemplate::kCalledNonCallable;
+ id = UpdateErrorTemplate(hint, id);
+ return isolate->factory()->NewTypeError(id, callsite);
+}
+
+Handle<Object> ErrorUtils::NewConstructedNonConstructable(
+ Isolate* isolate, Handle<Object> source) {
+ MessageLocation location;
+ CallPrinter::ErrorHint hint = CallPrinter::kNone;
+ Handle<String> callsite = RenderCallSite(isolate, source, &location, &hint);
+ MessageTemplate id = MessageTemplate::kNotConstructor;
+ return isolate->factory()->NewTypeError(id, callsite);
+}
+
+Object ErrorUtils::ThrowLoadFromNullOrUndefined(Isolate* isolate,
+ Handle<Object> object) {
+ return ThrowLoadFromNullOrUndefined(isolate, object, MaybeHandle<Object>());
+}
+Object ErrorUtils::ThrowLoadFromNullOrUndefined(Isolate* isolate,
+ Handle<Object> object,
+ MaybeHandle<Object> key) {
+ DCHECK(object->IsNullOrUndefined());
+
+ MaybeHandle<String> maybe_property_name;
+
+ // Try to extract the property name from the given key, if any.
+ Handle<Object> key_handle;
+ if (key.ToHandle(&key_handle)) {
+ if (key_handle->IsString()) {
+ maybe_property_name = Handle<String>::cast(key_handle);
+ }
+ }
+
+ Handle<String> callsite;
+
+ // Inline the RenderCallSite logic here so that we can additonally access the
+ // destructuring property.
+ bool location_computed = false;
+ bool is_destructuring = false;
+ MessageLocation location;
+ if (ComputeLocation(isolate, &location)) {
+ location_computed = true;
+
+ ParseInfo info(isolate, location.shared());
+ if (parsing::ParseAny(&info, location.shared(), isolate)) {
+ info.ast_value_factory()->Internalize(isolate);
+ CallPrinter printer(isolate, location.shared()->IsUserJavaScript());
+ Handle<String> str = printer.Print(info.literal(), location.start_pos());
+
+ int pos = -1;
+ is_destructuring = printer.destructuring_assignment() != nullptr;
+
+ if (is_destructuring) {
+ // If we don't have one yet, try to extract the property name from the
+ // destructuring property in the AST.
+ ObjectLiteralProperty* destructuring_prop =
+ printer.destructuring_prop();
+ if (maybe_property_name.is_null() && destructuring_prop != nullptr &&
+ destructuring_prop->key()->IsPropertyName()) {
+ maybe_property_name = destructuring_prop->key()
+ ->AsLiteral()
+ ->AsRawPropertyName()
+ ->string();
+ // Change the message location to point at the property name.
+ pos = destructuring_prop->key()->position();
+ }
+ if (maybe_property_name.is_null()) {
+ // Change the message location to point at the destructured value.
+ pos = printer.destructuring_assignment()->value()->position();
+ }
+
+ // If we updated the pos to a valid pos, rewrite the location.
+ if (pos != -1) {
+ location = MessageLocation(location.script(), pos, pos + 1,
+ location.shared());
+ }
+ }
+
+ if (str->length() > 0) callsite = str;
+ } else {
+ isolate->clear_pending_exception();
+ }
+ }
+
+ if (callsite.is_null()) {
+ callsite = BuildDefaultCallSite(isolate, object);
+ }
+
+ Handle<Object> error;
+ Handle<String> property_name;
+ if (is_destructuring) {
+ if (maybe_property_name.ToHandle(&property_name)) {
+ error = isolate->factory()->NewTypeError(
+ MessageTemplate::kNonCoercibleWithProperty, property_name, callsite,
+ object);
+ } else {
+ error = isolate->factory()->NewTypeError(MessageTemplate::kNonCoercible,
+ callsite, object);
+ }
+ } else {
+ Handle<Object> key_handle;
+ if (!key.ToHandle(&key_handle)) {
+ key_handle = ReadOnlyRoots(isolate).undefined_value_handle();
+ }
+ if (*key_handle == ReadOnlyRoots(isolate).iterator_symbol()) {
+ error = NewIteratorError(isolate, object);
+ } else {
+ error = isolate->factory()->NewTypeError(
+ MessageTemplate::kNonObjectPropertyLoad, key_handle, object);
+ }
+ }
+
+ return isolate->Throw(*error, location_computed ? &location : nullptr);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/messages.h b/deps/v8/src/execution/messages.h
index 23f32c2fe1..5da2d3a9eb 100644
--- a/deps/v8/src/execution/messages.h
+++ b/deps/v8/src/execution/messages.h
@@ -12,6 +12,7 @@
#include <memory>
+#include "src/base/optional.h"
#include "src/common/message-template.h"
#include "src/handles/handles.h"
@@ -72,6 +73,7 @@ class StackFrameBase {
virtual Handle<Object> GetTypeName() = 0;
virtual Handle<Object> GetEvalOrigin();
virtual Handle<Object> GetWasmModuleName();
+ virtual Handle<Object> GetWasmInstance();
// Returns the script ID if one is attached, -1 otherwise.
int GetScriptId() const;
@@ -146,6 +148,7 @@ class JSStackFrame : public StackFrameBase {
Handle<JSFunction> function_;
Handle<AbstractCode> code_;
int offset_;
+ mutable base::Optional<int> cached_position_;
bool is_async_ : 1;
bool is_constructor_ : 1;
@@ -168,12 +171,13 @@ class WasmStackFrame : public StackFrameBase {
Handle<Object> GetMethodName() override { return Null(); }
Handle<Object> GetTypeName() override { return Null(); }
Handle<Object> GetWasmModuleName() override;
+ Handle<Object> GetWasmInstance() override;
int GetPosition() const override;
int GetLineNumber() override { return wasm_func_index_; }
int GetColumnNumber() override;
- int GetPromiseIndex() const override { return kNone; }
+ int GetPromiseIndex() const override { return GetPosition(); }
bool IsNative() override { return false; }
bool IsToplevel() override { return false; }
@@ -279,6 +283,18 @@ class ErrorUtils : public AllStatic {
static MaybeHandle<Object> FormatStackTrace(Isolate* isolate,
Handle<JSObject> error,
Handle<Object> stack_trace);
+
+ static Handle<Object> NewIteratorError(Isolate* isolate,
+ Handle<Object> source);
+ static Handle<Object> NewCalledNonCallableError(Isolate* isolate,
+ Handle<Object> source);
+ static Handle<Object> NewConstructedNonConstructable(Isolate* isolate,
+ Handle<Object> source);
+ static Object ThrowLoadFromNullOrUndefined(Isolate* isolate,
+ Handle<Object> object);
+ static Object ThrowLoadFromNullOrUndefined(Isolate* isolate,
+ Handle<Object> object,
+ MaybeHandle<Object> key);
};
class MessageFormatter {
diff --git a/deps/v8/src/execution/microtask-queue.cc b/deps/v8/src/execution/microtask-queue.cc
index 3cc95205fa..ed76e9d79c 100644
--- a/deps/v8/src/execution/microtask-queue.cc
+++ b/deps/v8/src/execution/microtask-queue.cc
@@ -159,10 +159,13 @@ int MicrotaskQueue::RunMicrotasks(Isolate* isolate) {
HandleScopeImplementer::EnteredContextRewindScope rewind_scope(
isolate->handle_scope_implementer());
TRACE_EVENT_BEGIN0("v8.execute", "RunMicrotasks");
- TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.RunMicrotasks");
- maybe_result = Execution::TryRunMicrotasks(isolate, this, &maybe_exception);
- processed_microtask_count =
- static_cast<int>(finished_microtask_count_ - base_count);
+ {
+ TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.RunMicrotasks");
+ maybe_result = Execution::TryRunMicrotasks(isolate, this,
+ &maybe_exception);
+ processed_microtask_count =
+ static_cast<int>(finished_microtask_count_ - base_count);
+ }
TRACE_EVENT_END1("v8.execute", "RunMicrotasks", "microtask_count",
processed_microtask_count);
}
@@ -249,12 +252,6 @@ Microtask MicrotaskQueue::get(intptr_t index) const {
}
void MicrotaskQueue::OnCompleted(Isolate* isolate) {
- // TODO(marja): (spec) The discussion about when to clear the KeepDuringJob
- // set is still open (whether to clear it after every microtask or once
- // during a microtask checkpoint). See also
- // https://github.com/tc39/proposal-weakrefs/issues/39 .
- isolate->heap()->ClearKeptObjects();
-
FireMicrotasksCompletedCallback(isolate);
}
diff --git a/deps/v8/src/execution/mips/simulator-mips.cc b/deps/v8/src/execution/mips/simulator-mips.cc
index 6a3a160ec3..2d9a924c14 100644
--- a/deps/v8/src/execution/mips/simulator-mips.cc
+++ b/deps/v8/src/execution/mips/simulator-mips.cc
@@ -2152,7 +2152,7 @@ using SimulatorRuntimeCall = int64_t (*)(int32_t arg0, int32_t arg1,
int32_t arg2, int32_t arg3,
int32_t arg4, int32_t arg5,
int32_t arg6, int32_t arg7,
- int32_t arg8);
+ int32_t arg8, int32_t arg9);
// These prototypes handle the four types of FP calls.
using SimulatorRuntimeCompareCall = int64_t (*)(double darg0, double darg1);
@@ -2194,7 +2194,8 @@ void Simulator::SoftwareInterrupt() {
int32_t arg6 = stack_pointer[6];
int32_t arg7 = stack_pointer[7];
int32_t arg8 = stack_pointer[8];
- STATIC_ASSERT(kMaxCParameters == 9);
+ int32_t arg9 = stack_pointer[9];
+ STATIC_ASSERT(kMaxCParameters == 10);
bool fp_call =
(redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
@@ -2378,12 +2379,12 @@ void Simulator::SoftwareInterrupt() {
if (::v8::internal::FLAG_trace_sim) {
PrintF(
"Call to host function at %p "
- "args %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x\n",
+ "args %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x\n",
reinterpret_cast<void*>(FUNCTION_ADDR(target)), arg0, arg1, arg2,
- arg3, arg4, arg5, arg6, arg7, arg8);
+ arg3, arg4, arg5, arg6, arg7, arg8, arg9);
}
int64_t result =
- target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
+ target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
set_register(v0, static_cast<int32_t>(result));
set_register(v1, static_cast<int32_t>(result >> 32));
}
diff --git a/deps/v8/src/execution/mips64/simulator-mips64.cc b/deps/v8/src/execution/mips64/simulator-mips64.cc
index 3fbf1961a8..78dbc29a0b 100644
--- a/deps/v8/src/execution/mips64/simulator-mips64.cc
+++ b/deps/v8/src/execution/mips64/simulator-mips64.cc
@@ -2159,7 +2159,7 @@ using SimulatorRuntimeCall = ObjectPair (*)(int64_t arg0, int64_t arg1,
int64_t arg2, int64_t arg3,
int64_t arg4, int64_t arg5,
int64_t arg6, int64_t arg7,
- int64_t arg8);
+ int64_t arg8, int64_t arg9);
// These prototypes handle the four types of FP calls.
using SimulatorRuntimeCompareCall = int64_t (*)(double darg0, double darg1);
@@ -2200,7 +2200,8 @@ void Simulator::SoftwareInterrupt() {
int64_t arg6 = get_register(a6);
int64_t arg7 = get_register(a7);
int64_t arg8 = stack_pointer[0];
- STATIC_ASSERT(kMaxCParameters == 9);
+ int64_t arg9 = stack_pointer[1];
+ STATIC_ASSERT(kMaxCParameters == 10);
bool fp_call =
(redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
@@ -2372,12 +2373,12 @@ void Simulator::SoftwareInterrupt() {
"Call to host function at %p "
"args %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64
" , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64
- " , %08" PRIx64 " \n",
+ " , %08" PRIx64 " , %08" PRIx64 " \n",
reinterpret_cast<void*>(FUNCTION_ADDR(target)), arg0, arg1, arg2,
- arg3, arg4, arg5, arg6, arg7, arg8);
+ arg3, arg4, arg5, arg6, arg7, arg8, arg9);
}
ObjectPair result =
- target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
+ target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
set_register(v0, (int64_t)(result.x));
set_register(v1, (int64_t)(result.y));
}
diff --git a/deps/v8/src/execution/ppc/simulator-ppc.cc b/deps/v8/src/execution/ppc/simulator-ppc.cc
index 96308f7f5b..ab8786713b 100644
--- a/deps/v8/src/execution/ppc/simulator-ppc.cc
+++ b/deps/v8/src/execution/ppc/simulator-ppc.cc
@@ -931,10 +931,12 @@ using SimulatorRuntimeCall = intptr_t (*)(intptr_t arg0, intptr_t arg1,
intptr_t arg2, intptr_t arg3,
intptr_t arg4, intptr_t arg5,
intptr_t arg6, intptr_t arg7,
- intptr_t arg8);
+ intptr_t arg8, intptr_t arg9);
using SimulatorRuntimePairCall = ObjectPair (*)(intptr_t arg0, intptr_t arg1,
intptr_t arg2, intptr_t arg3,
- intptr_t arg4, intptr_t arg5);
+ intptr_t arg4, intptr_t arg5,
+ intptr_t arg6, intptr_t arg7,
+ intptr_t arg8, intptr_t arg9);
// These prototypes handle the four types of FP calls.
using SimulatorRuntimeCompareCall = int (*)(double darg0, double darg1);
@@ -964,7 +966,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
(get_register(sp) & (::v8::internal::FLAG_sim_stack_alignment - 1)) ==
0;
Redirection* redirection = Redirection::FromInstruction(instr);
- const int kArgCount = 9;
+ const int kArgCount = 10;
const int kRegisterArgCount = 8;
int arg0_regnum = 3;
intptr_t result_buffer = 0;
@@ -982,9 +984,11 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
intptr_t* stack_pointer = reinterpret_cast<intptr_t*>(get_register(sp));
// Remaining argument on stack
- arg[kRegisterArgCount] = stack_pointer[kStackFrameExtraParamSlot];
- STATIC_ASSERT(kArgCount == kRegisterArgCount + 1);
- STATIC_ASSERT(kMaxCParameters == 9);
+ for (int i = kRegisterArgCount, j = 0; i < kArgCount; i++, j++) {
+ arg[i] = stack_pointer[kStackFrameExtraParamSlot + j];
+ }
+ STATIC_ASSERT(kArgCount == kRegisterArgCount + 2);
+ STATIC_ASSERT(kMaxCParameters == kArgCount);
bool fp_call =
(redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
(redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
@@ -1161,9 +1165,10 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
"Call to host function at %p,\n"
"\t\t\t\targs %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
", %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
- ", %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR,
+ ", %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
+ ", %08" V8PRIxPTR,
reinterpret_cast<void*>(FUNCTION_ADDR(target)), arg[0], arg[1],
- arg[2], arg[3], arg[4], arg[5], arg[6], arg[7], arg[8]);
+ arg[2], arg[3], arg[4], arg[5], arg[6], arg[7], arg[8], arg[9]);
if (!stack_aligned) {
PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
get_register(sp));
@@ -1174,8 +1179,8 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
if (redirection->type() == ExternalReference::BUILTIN_CALL_PAIR) {
SimulatorRuntimePairCall target =
reinterpret_cast<SimulatorRuntimePairCall>(external);
- ObjectPair result =
- target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
+ ObjectPair result = target(arg[0], arg[1], arg[2], arg[3], arg[4],
+ arg[5], arg[6], arg[7], arg[8], arg[9]);
intptr_t x;
intptr_t y;
decodeObjectPair(&result, &x, &y);
@@ -1195,7 +1200,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
intptr_t result = target(arg[0], arg[1], arg[2], arg[3], arg[4],
- arg[5], arg[6], arg[7], arg[8]);
+ arg[5], arg[6], arg[7], arg[8], arg[9]);
if (::v8::internal::FLAG_trace_sim) {
PrintF("Returned %08" V8PRIxPTR "\n", result);
}
diff --git a/deps/v8/src/execution/protectors-inl.h b/deps/v8/src/execution/protectors-inl.h
new file mode 100644
index 0000000000..b2428063e1
--- /dev/null
+++ b/deps/v8/src/execution/protectors-inl.h
@@ -0,0 +1,36 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXECUTION_PROTECTORS_INL_H_
+#define V8_EXECUTION_PROTECTORS_INL_H_
+
+#include "src/execution/protectors.h"
+#include "src/objects/contexts-inl.h"
+#include "src/objects/property-cell-inl.h"
+#include "src/objects/smi.h"
+
+namespace v8 {
+namespace internal {
+
+#define DEFINE_PROTECTOR_ON_NATIVE_CONTEXT_CHECK(name, cell) \
+ bool Protectors::Is##name##Intact(Handle<NativeContext> native_context) { \
+ PropertyCell species_cell = native_context->cell(); \
+ return species_cell.value().IsSmi() && \
+ Smi::ToInt(species_cell.value()) == kProtectorValid; \
+ }
+DECLARED_PROTECTORS_ON_NATIVE_CONTEXT(DEFINE_PROTECTOR_ON_NATIVE_CONTEXT_CHECK)
+
+#define DEFINE_PROTECTOR_ON_ISOLATE_CHECK(name, root_index, unused_cell) \
+ bool Protectors::Is##name##Intact(Isolate* isolate) { \
+ PropertyCell cell = \
+ PropertyCell::cast(isolate->root(RootIndex::k##root_index)); \
+ return cell.value().IsSmi() && \
+ Smi::ToInt(cell.value()) == kProtectorValid; \
+ }
+DECLARED_PROTECTORS_ON_ISOLATE(DEFINE_PROTECTOR_ON_ISOLATE_CHECK)
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_EXECUTION_PROTECTORS_INL_H_
diff --git a/deps/v8/src/execution/protectors.cc b/deps/v8/src/execution/protectors.cc
new file mode 100644
index 0000000000..3ac07eede3
--- /dev/null
+++ b/deps/v8/src/execution/protectors.cc
@@ -0,0 +1,48 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/execution/protectors.h"
+
+#include "src/execution/isolate-inl.h"
+#include "src/execution/protectors-inl.h"
+#include "src/handles/handles-inl.h"
+#include "src/objects/contexts.h"
+#include "src/objects/property-cell.h"
+#include "src/objects/smi.h"
+#include "src/tracing/trace-event.h"
+#include "src/utils/utils.h"
+
+namespace v8 {
+namespace internal {
+
+#define INVALIDATE_PROTECTOR_ON_NATIVE_CONTEXT_DEFINITION(name, cell) \
+ void Protectors::Invalidate##name(Isolate* isolate, \
+ Handle<NativeContext> native_context) { \
+ DCHECK_EQ(*native_context, isolate->raw_native_context()); \
+ DCHECK(native_context->cell().value().IsSmi()); \
+ DCHECK(Is##name##Intact(native_context)); \
+ Handle<PropertyCell> species_cell(native_context->cell(), isolate); \
+ PropertyCell::SetValueWithInvalidation( \
+ isolate, #cell, species_cell, \
+ handle(Smi::FromInt(kProtectorInvalid), isolate)); \
+ DCHECK(!Is##name##Intact(native_context)); \
+ }
+DECLARED_PROTECTORS_ON_NATIVE_CONTEXT(
+ INVALIDATE_PROTECTOR_ON_NATIVE_CONTEXT_DEFINITION)
+#undef INVALIDATE_PROTECTOR_ON_NATIVE_CONTEXT_DEFINITION
+
+#define INVALIDATE_PROTECTOR_ON_ISOLATE_DEFINITION(name, unused_index, cell) \
+ void Protectors::Invalidate##name(Isolate* isolate) { \
+ DCHECK(isolate->factory()->cell()->value().IsSmi()); \
+ DCHECK(Is##name##Intact(isolate)); \
+ PropertyCell::SetValueWithInvalidation( \
+ isolate, #cell, isolate->factory()->cell(), \
+ handle(Smi::FromInt(kProtectorInvalid), isolate)); \
+ DCHECK(!Is##name##Intact(isolate)); \
+ }
+DECLARED_PROTECTORS_ON_ISOLATE(INVALIDATE_PROTECTOR_ON_ISOLATE_DEFINITION)
+#undef INVALIDATE_PROTECTOR_ON_ISOLATE_DEFINITION
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/execution/protectors.h b/deps/v8/src/execution/protectors.h
new file mode 100644
index 0000000000..5c54613bb1
--- /dev/null
+++ b/deps/v8/src/execution/protectors.h
@@ -0,0 +1,42 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXECUTION_PROTECTORS_H_
+#define V8_EXECUTION_PROTECTORS_H_
+
+#include "src/handles/handles.h"
+
+namespace v8 {
+namespace internal {
+
+class Protectors : public AllStatic {
+ public:
+ static const int kProtectorValid = 1;
+ static const int kProtectorInvalid = 0;
+
+#define DECLARED_PROTECTORS_ON_NATIVE_CONTEXT(V) \
+ V(RegExpSpeciesLookupChainProtector, regexp_species_protector)
+
+#define DECLARED_PROTECTORS_ON_ISOLATE(V) \
+ V(ArraySpeciesLookupChain, ArraySpeciesProtector, array_species_protector)
+
+#define DECLARE_PROTECTOR_ON_NATIVE_CONTEXT(name, unused_cell) \
+ static inline bool Is##name##Intact(Handle<NativeContext> native_context); \
+ static void Invalidate##name(Isolate* isolate, \
+ Handle<NativeContext> native_context);
+ DECLARED_PROTECTORS_ON_NATIVE_CONTEXT(DECLARE_PROTECTOR_ON_NATIVE_CONTEXT)
+#undef DECLARE_PROTECTOR_ON_NATIVE_CONTEXT
+
+#define DECLARE_PROTECTOR_ON_ISOLATE(name, unused_root_index, unused_cell) \
+ static inline bool Is##name##Intact(Isolate* isolate); \
+ static void Invalidate##name(Isolate* isolate);
+
+ DECLARED_PROTECTORS_ON_ISOLATE(DECLARE_PROTECTOR_ON_ISOLATE)
+#undef DECLARE_PROTECTOR_ON_ISOLATE
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_EXECUTION_PROTECTORS_H_
diff --git a/deps/v8/src/execution/runtime-profiler.cc b/deps/v8/src/execution/runtime-profiler.cc
index 0ed36cbe10..65476e346f 100644
--- a/deps/v8/src/execution/runtime-profiler.cc
+++ b/deps/v8/src/execution/runtime-profiler.cc
@@ -8,6 +8,7 @@
#include "src/codegen/assembler.h"
#include "src/codegen/compilation-cache.h"
#include "src/codegen/compiler.h"
+#include "src/codegen/pending-optimization-table.h"
#include "src/execution/execution.h"
#include "src/execution/frames-inl.h"
#include "src/handles/global-handles.h"
@@ -119,6 +120,17 @@ void RuntimeProfiler::MaybeOptimize(JSFunction function,
}
return;
}
+ if (FLAG_testing_d8_test_runner) {
+ if (!PendingOptimizationTable::IsHeuristicOptimizationAllowed(isolate_,
+ function)) {
+ if (FLAG_trace_opt_verbose) {
+ PrintF("[function ");
+ function.PrintName();
+ PrintF(" has been marked manually for optimization]\n");
+ }
+ return;
+ }
+ }
if (FLAG_always_osr) {
AttemptOnStackReplacement(frame, AbstractCode::kMaxLoopNestingMarker);
diff --git a/deps/v8/src/execution/s390/simulator-s390.cc b/deps/v8/src/execution/s390/simulator-s390.cc
index 8a82e32243..985a941874 100644
--- a/deps/v8/src/execution/s390/simulator-s390.cc
+++ b/deps/v8/src/execution/s390/simulator-s390.cc
@@ -1858,10 +1858,12 @@ using SimulatorRuntimeCall = intptr_t (*)(intptr_t arg0, intptr_t arg1,
intptr_t arg2, intptr_t arg3,
intptr_t arg4, intptr_t arg5,
intptr_t arg6, intptr_t arg7,
- intptr_t arg8);
+ intptr_t arg8, intptr_t arg9);
using SimulatorRuntimePairCall = ObjectPair (*)(intptr_t arg0, intptr_t arg1,
intptr_t arg2, intptr_t arg3,
- intptr_t arg4, intptr_t arg5);
+ intptr_t arg4, intptr_t arg5,
+ intptr_t arg6, intptr_t arg7,
+ intptr_t arg8, intptr_t arg9);
// These prototypes handle the four types of FP calls.
using SimulatorRuntimeCompareCall = int (*)(double darg0, double darg1);
@@ -1891,7 +1893,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
(get_register(sp) & (::v8::internal::FLAG_sim_stack_alignment - 1)) ==
0;
Redirection* redirection = Redirection::FromInstruction(instr);
- const int kArgCount = 9;
+ const int kArgCount = 10;
const int kRegisterArgCount = 5;
int arg0_regnum = 2;
intptr_t result_buffer = 0;
@@ -1913,8 +1915,8 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
arg[i] = stack_pointer[(kCalleeRegisterSaveAreaSize / kPointerSize) +
(i - kRegisterArgCount)];
}
- STATIC_ASSERT(kArgCount == kRegisterArgCount + 4);
- STATIC_ASSERT(kMaxCParameters == 9);
+ STATIC_ASSERT(kArgCount == kRegisterArgCount + 5);
+ STATIC_ASSERT(kMaxCParameters == kArgCount);
bool fp_call =
(redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
(redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
@@ -2094,9 +2096,10 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
"Call to host function at %p,\n"
"\t\t\t\targs %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
", %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
- ", %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR,
+ ", %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
+ ", %08" V8PRIxPTR,
reinterpret_cast<void*>(FUNCTION_ADDR(target)), arg[0], arg[1],
- arg[2], arg[3], arg[4], arg[5], arg[6], arg[7], arg[8]);
+ arg[2], arg[3], arg[4], arg[5], arg[6], arg[7], arg[8], arg[9]);
if (!stack_aligned) {
PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
static_cast<intptr_t>(get_register(sp)));
@@ -2107,8 +2110,8 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
if (redirection->type() == ExternalReference::BUILTIN_CALL_PAIR) {
SimulatorRuntimePairCall target =
reinterpret_cast<SimulatorRuntimePairCall>(external);
- ObjectPair result =
- target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
+ ObjectPair result = target(arg[0], arg[1], arg[2], arg[3], arg[4],
+ arg[5], arg[6], arg[7], arg[8], arg[9]);
intptr_t x;
intptr_t y;
decodeObjectPair(&result, &x, &y);
@@ -2128,7 +2131,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
intptr_t result = target(arg[0], arg[1], arg[2], arg[3], arg[4],
- arg[5], arg[6], arg[7], arg[8]);
+ arg[5], arg[6], arg[7], arg[8], arg[9]);
if (::v8::internal::FLAG_trace_sim) {
PrintF("Returned %08" V8PRIxPTR "\n", result);
}
diff --git a/deps/v8/src/execution/stack-guard.cc b/deps/v8/src/execution/stack-guard.cc
index e5c24cef1e..1cf4c4605a 100644
--- a/deps/v8/src/execution/stack-guard.cc
+++ b/deps/v8/src/execution/stack-guard.cc
@@ -21,14 +21,12 @@ void StackGuard::set_interrupt_limits(const ExecutionAccess& lock) {
DCHECK_NOT_NULL(isolate_);
thread_local_.set_jslimit(kInterruptLimit);
thread_local_.set_climit(kInterruptLimit);
- isolate_->heap()->SetStackLimits();
}
void StackGuard::reset_limits(const ExecutionAccess& lock) {
DCHECK_NOT_NULL(isolate_);
thread_local_.set_jslimit(thread_local_.real_jslimit_);
thread_local_.set_climit(thread_local_.real_climit_);
- isolate_->heap()->SetStackLimits();
}
void StackGuard::SetStackLimit(uintptr_t limit) {
@@ -54,7 +52,6 @@ void StackGuard::AdjustStackLimitForSimulator() {
uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(isolate_, climit);
if (thread_local_.jslimit() == thread_local_.real_jslimit_) {
thread_local_.set_jslimit(jslimit);
- isolate_->heap()->SetStackLimits();
}
}
@@ -75,7 +72,8 @@ void StackGuard::PushInterruptsScope(InterruptsScope* scope) {
DCHECK_NE(scope->mode_, InterruptsScope::kNoop);
if (scope->mode_ == InterruptsScope::kPostponeInterrupts) {
// Intercept already requested interrupts.
- int intercepted = thread_local_.interrupt_flags_ & scope->intercept_mask_;
+ intptr_t intercepted =
+ thread_local_.interrupt_flags_ & scope->intercept_mask_;
scope->intercepted_flags_ = intercepted;
thread_local_.interrupt_flags_ &= ~intercepted;
} else {
@@ -124,7 +122,7 @@ void StackGuard::PopInterruptsScope() {
bool StackGuard::CheckInterrupt(InterruptFlag flag) {
ExecutionAccess access(isolate_);
- return thread_local_.interrupt_flags_ & flag;
+ return (thread_local_.interrupt_flags_ & flag) != 0;
}
void StackGuard::RequestInterrupt(InterruptFlag flag) {
@@ -160,7 +158,7 @@ int StackGuard::FetchAndClearInterrupts() {
ExecutionAccess access(isolate_);
int result = 0;
- if (thread_local_.interrupt_flags_ & TERMINATE_EXECUTION) {
+ if ((thread_local_.interrupt_flags_ & TERMINATE_EXECUTION) != 0) {
// The TERMINATE_EXECUTION interrupt is special, since it terminates
// execution but should leave V8 in a resumable state. If it exists, we only
// fetch and clear that bit. On resume, V8 can continue processing other
@@ -169,7 +167,7 @@ int StackGuard::FetchAndClearInterrupts() {
thread_local_.interrupt_flags_ &= ~TERMINATE_EXECUTION;
if (!has_pending_interrupts(access)) reset_limits(access);
} else {
- result = thread_local_.interrupt_flags_;
+ result = static_cast<int>(thread_local_.interrupt_flags_);
thread_local_.interrupt_flags_ = 0;
reset_limits(access);
}
@@ -180,23 +178,13 @@ int StackGuard::FetchAndClearInterrupts() {
char* StackGuard::ArchiveStackGuard(char* to) {
ExecutionAccess access(isolate_);
MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
- ThreadLocal blank;
-
- // Set the stack limits using the old thread_local_.
- // TODO(isolates): This was the old semantics of constructing a ThreadLocal
- // (as the ctor called SetStackLimits, which looked at the
- // current thread_local_ from StackGuard)-- but is this
- // really what was intended?
- isolate_->heap()->SetStackLimits();
- thread_local_ = blank;
-
+ thread_local_ = {};
return to + sizeof(ThreadLocal);
}
char* StackGuard::RestoreStackGuard(char* from) {
ExecutionAccess access(isolate_);
MemCopy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
- isolate_->heap()->SetStackLimits();
return from + sizeof(ThreadLocal);
}
@@ -206,39 +194,21 @@ void StackGuard::FreeThreadResources() {
per_thread->set_stack_limit(thread_local_.real_climit_);
}
-void StackGuard::ThreadLocal::Clear() {
- real_jslimit_ = kIllegalLimit;
- set_jslimit(kIllegalLimit);
- real_climit_ = kIllegalLimit;
- set_climit(kIllegalLimit);
+void StackGuard::ThreadLocal::Initialize(Isolate* isolate,
+ const ExecutionAccess& lock) {
+ const uintptr_t kLimitSize = FLAG_stack_size * KB;
+ DCHECK_GT(GetCurrentStackPosition(), kLimitSize);
+ uintptr_t limit = GetCurrentStackPosition() - kLimitSize;
+ real_jslimit_ = SimulatorStack::JsLimitFromCLimit(isolate, limit);
+ set_jslimit(SimulatorStack::JsLimitFromCLimit(isolate, limit));
+ real_climit_ = limit;
+ set_climit(limit);
interrupt_scopes_ = nullptr;
interrupt_flags_ = 0;
}
-bool StackGuard::ThreadLocal::Initialize(Isolate* isolate) {
- bool should_set_stack_limits = false;
- if (real_climit_ == kIllegalLimit) {
- const uintptr_t kLimitSize = FLAG_stack_size * KB;
- DCHECK_GT(GetCurrentStackPosition(), kLimitSize);
- uintptr_t limit = GetCurrentStackPosition() - kLimitSize;
- real_jslimit_ = SimulatorStack::JsLimitFromCLimit(isolate, limit);
- set_jslimit(SimulatorStack::JsLimitFromCLimit(isolate, limit));
- real_climit_ = limit;
- set_climit(limit);
- should_set_stack_limits = true;
- }
- interrupt_scopes_ = nullptr;
- interrupt_flags_ = 0;
- return should_set_stack_limits;
-}
-
-void StackGuard::ClearThread(const ExecutionAccess& lock) {
- thread_local_.Clear();
- isolate_->heap()->SetStackLimits();
-}
-
void StackGuard::InitThread(const ExecutionAccess& lock) {
- if (thread_local_.Initialize(isolate_)) isolate_->heap()->SetStackLimits();
+ thread_local_.Initialize(isolate_, lock);
Isolate::PerIsolateThreadData* per_thread =
isolate_->FindOrAllocatePerThreadDataForThisThread();
uintptr_t stored_limit = per_thread->stack_limit();
diff --git a/deps/v8/src/execution/stack-guard.h b/deps/v8/src/execution/stack-guard.h
index d7477f1623..febd1ecb0a 100644
--- a/deps/v8/src/execution/stack-guard.h
+++ b/deps/v8/src/execution/stack-guard.h
@@ -7,6 +7,7 @@
#include "include/v8-internal.h"
#include "src/base/atomicops.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
@@ -37,12 +38,8 @@ class V8_EXPORT_PRIVATE StackGuard final {
char* RestoreStackGuard(char* from);
static int ArchiveSpacePerThread() { return sizeof(ThreadLocal); }
void FreeThreadResources();
- // Sets up the default stack guard for this thread if it has not
- // already been set up.
+ // Sets up the default stack guard for this thread.
void InitThread(const ExecutionAccess& lock);
- // Clears the stack guard for this thread so it does not look as if
- // it has been set up.
- void ClearThread(const ExecutionAccess& lock);
#define INTERRUPT_LIST(V) \
V(TERMINATE_EXECUTION, TerminateExecution, 0) \
@@ -89,6 +86,8 @@ class V8_EXPORT_PRIVATE StackGuard final {
// stack overflow, then handle the interruption accordingly.
Object HandleInterrupts();
+ static constexpr int kSizeInBytes = 7 * kSystemPointerSize;
+
private:
bool CheckInterrupt(InterruptFlag flag);
void RequestInterrupt(InterruptFlag flag);
@@ -124,13 +123,9 @@ class V8_EXPORT_PRIVATE StackGuard final {
class ThreadLocal final {
public:
- ThreadLocal() { Clear(); }
- // You should hold the ExecutionAccess lock when you call Initialize or
- // Clear.
- void Clear();
+ ThreadLocal() {}
- // Returns true if the heap's stack limits should be set, false if not.
- bool Initialize(Isolate* isolate);
+ void Initialize(Isolate* isolate, const ExecutionAccess& lock);
// The stack limit is split into a JavaScript and a C++ stack limit. These
// two are the same except when running on a simulator where the C++ and
@@ -141,13 +136,16 @@ class V8_EXPORT_PRIVATE StackGuard final {
// break or preemption) in which case it is lowered to make stack checks
// fail. Both the generated code and the runtime system check against the
// one without the real_ prefix.
- uintptr_t real_jslimit_; // Actual JavaScript stack limit set for the VM.
- uintptr_t real_climit_; // Actual C++ stack limit set for the VM.
+
+ // Actual JavaScript stack limit set for the VM.
+ uintptr_t real_jslimit_ = kIllegalLimit;
+ // Actual C++ stack limit set for the VM.
+ uintptr_t real_climit_ = kIllegalLimit;
// jslimit_ and climit_ can be read without any lock.
// Writing requires the ExecutionAccess lock.
- base::AtomicWord jslimit_;
- base::AtomicWord climit_;
+ base::AtomicWord jslimit_ = kIllegalLimit;
+ base::AtomicWord climit_ = kIllegalLimit;
uintptr_t jslimit() {
return bit_cast<uintptr_t>(base::Relaxed_Load(&jslimit_));
@@ -164,8 +162,8 @@ class V8_EXPORT_PRIVATE StackGuard final {
static_cast<base::AtomicWord>(limit));
}
- InterruptsScope* interrupt_scopes_;
- int interrupt_flags_;
+ InterruptsScope* interrupt_scopes_ = nullptr;
+ intptr_t interrupt_flags_ = 0;
};
// TODO(isolates): Technically this could be calculated directly from a
@@ -180,6 +178,8 @@ class V8_EXPORT_PRIVATE StackGuard final {
DISALLOW_COPY_AND_ASSIGN(StackGuard);
};
+STATIC_ASSERT(StackGuard::kSizeInBytes == sizeof(StackGuard));
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/thread-local-top.cc b/deps/v8/src/execution/thread-local-top.cc
index 569333f276..cb69fb56ef 100644
--- a/deps/v8/src/execution/thread-local-top.cc
+++ b/deps/v8/src/execution/thread-local-top.cc
@@ -26,5 +26,15 @@ void ThreadLocalTop::Free() {
while (promise_on_stack_) isolate_->PopPromise();
}
+#if defined(USE_SIMULATOR)
+void ThreadLocalTop::StoreCurrentStackPosition() {
+ last_api_entry_ = simulator_->get_sp();
+}
+#elif defined(V8_USE_ADDRESS_SANITIZER)
+void ThreadLocalTop::StoreCurrentStackPosition() {
+ last_api_entry_ = reinterpret_cast<Address>(GetCurrentStackPosition());
+}
+#endif
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/thread-local-top.h b/deps/v8/src/execution/thread-local-top.h
index 625fcc41dd..57166299c5 100644
--- a/deps/v8/src/execution/thread-local-top.h
+++ b/deps/v8/src/execution/thread-local-top.h
@@ -8,6 +8,7 @@
#include "src/common/globals.h"
#include "src/execution/thread-id.h"
#include "src/objects/contexts.h"
+#include "src/utils/utils.h"
namespace v8 {
@@ -25,7 +26,7 @@ class ThreadLocalTop {
// TODO(all): This is not particularly beautiful. We should probably
// refactor this to really consist of just Addresses and 32-bit
// integer fields.
- static constexpr uint32_t kSizeInBytes = 23 * kSystemPointerSize;
+ static constexpr uint32_t kSizeInBytes = 24 * kSystemPointerSize;
// Does early low-level initialization that does not depend on the
// isolate being present.
@@ -56,6 +57,31 @@ class ThreadLocalTop {
v8::TryCatch::JSStackComparableAddress(try_catch_handler_));
}
+ // Call depth represents nested v8 api calls. Instead of storing the nesting
+ // level as an integer, we store the stack height of the last API entry. This
+ // additional information is used when we decide whether to trigger a debug
+ // break at a function entry.
+ template <typename Scope>
+ void IncrementCallDepth(Scope* stack_allocated_scope) {
+ stack_allocated_scope->previous_stack_height_ = last_api_entry_;
+#if defined(USE_SIMULATOR) || defined(V8_USE_ADDRESS_SANITIZER)
+ StoreCurrentStackPosition();
+#else
+ last_api_entry_ = reinterpret_cast<i::Address>(stack_allocated_scope);
+#endif
+ }
+
+#if defined(USE_SIMULATOR) || defined(V8_USE_ADDRESS_SANITIZER)
+ void StoreCurrentStackPosition();
+#endif
+
+ template <typename Scope>
+ void DecrementCallDepth(Scope* stack_allocated_scope) {
+ last_api_entry_ = stack_allocated_scope->previous_stack_height_;
+ }
+
+ bool CallDepthIsZero() const { return last_api_entry_ == kNullAddress; }
+
void Free();
Isolate* isolate_ = nullptr;
@@ -77,6 +103,8 @@ class ThreadLocalTop {
Address pending_handler_fp_ = kNullAddress;
Address pending_handler_sp_ = kNullAddress;
+ Address last_api_entry_ = kNullAddress;
+
// Communication channel between Isolate::Throw and message consumers.
Object pending_message_obj_;
bool rethrowing_message_ = false;
diff --git a/deps/v8/src/execution/v8threads.cc b/deps/v8/src/execution/v8threads.cc
index 6b99b81ef7..e16988b275 100644
--- a/deps/v8/src/execution/v8threads.cc
+++ b/deps/v8/src/execution/v8threads.cc
@@ -40,10 +40,6 @@ void Locker::Initialize(v8::Isolate* isolate) {
// get the saved state for this thread and restore it.
if (isolate_->thread_manager()->RestoreThread()) {
top_level_ = false;
- } else {
- internal::ExecutionAccess access(isolate_);
- isolate_->stack_guard()->ClearThread(access);
- isolate_->thread_manager()->InitThread(access);
}
}
DCHECK(isolate_->thread_manager()->IsLockedByCurrentThread());
@@ -88,6 +84,7 @@ Unlocker::~Unlocker() {
namespace internal {
void ThreadManager::InitThread(const ExecutionAccess& lock) {
+ isolate_->InitializeThreadLocal();
isolate_->stack_guard()->InitThread(lock);
isolate_->debug()->InitThread(lock);
}