summaryrefslogtreecommitdiff
path: root/deps/v8/src/execution
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/execution')
-rw-r--r--deps/v8/src/execution/OWNERS5
-rw-r--r--deps/v8/src/execution/arm/frame-constants-arm.cc4
-rw-r--r--deps/v8/src/execution/arm64/frame-constants-arm64.cc6
-rw-r--r--deps/v8/src/execution/execution.cc350
-rw-r--r--deps/v8/src/execution/execution.h177
-rw-r--r--deps/v8/src/execution/frame-constants.h9
-rw-r--r--deps/v8/src/execution/frames-inl.h33
-rw-r--r--deps/v8/src/execution/frames.cc46
-rw-r--r--deps/v8/src/execution/frames.h25
-rw-r--r--deps/v8/src/execution/ia32/frame-constants-ia32.cc6
-rw-r--r--deps/v8/src/execution/interrupts-scope.cc42
-rw-r--r--deps/v8/src/execution/interrupts-scope.h72
-rw-r--r--deps/v8/src/execution/isolate-data.h8
-rw-r--r--deps/v8/src/execution/isolate-inl.h7
-rw-r--r--deps/v8/src/execution/isolate-utils-inl.h64
-rw-r--r--deps/v8/src/execution/isolate-utils.h31
-rw-r--r--deps/v8/src/execution/isolate.cc231
-rw-r--r--deps/v8/src/execution/isolate.h83
-rw-r--r--deps/v8/src/execution/message-template.h591
-rw-r--r--deps/v8/src/execution/messages.cc312
-rw-r--r--deps/v8/src/execution/messages.h21
-rw-r--r--deps/v8/src/execution/microtask-queue.cc2
-rw-r--r--deps/v8/src/execution/mips/frame-constants-mips.cc7
-rw-r--r--deps/v8/src/execution/mips/simulator-mips.cc372
-rw-r--r--deps/v8/src/execution/mips/simulator-mips.h18
-rw-r--r--deps/v8/src/execution/mips64/frame-constants-mips64.cc3
-rw-r--r--deps/v8/src/execution/mips64/simulator-mips64.cc372
-rw-r--r--deps/v8/src/execution/mips64/simulator-mips64.h20
-rw-r--r--deps/v8/src/execution/ppc/simulator-ppc.cc2
-rw-r--r--deps/v8/src/execution/s390/simulator-s390.cc90
-rw-r--r--deps/v8/src/execution/stack-guard.cc345
-rw-r--r--deps/v8/src/execution/stack-guard.h186
-rw-r--r--deps/v8/src/execution/x64/frame-constants-x64.cc1
33 files changed, 1592 insertions, 1949 deletions
diff --git a/deps/v8/src/execution/OWNERS b/deps/v8/src/execution/OWNERS
index a62d530e1a..75c1a1b30e 100644
--- a/deps/v8/src/execution/OWNERS
+++ b/deps/v8/src/execution/OWNERS
@@ -1,10 +1,13 @@
-binji@chromium.org
bmeurer@chromium.org
ishell@chromium.org
jgruber@chromium.org
jkummerow@chromium.org
mstarzinger@chromium.org
+mythria@chromium.org
+delphick@chromium.org
petermarshall@chromium.org
szuend@chromium.org
verwaest@chromium.org
yangguo@chromium.org
+
+# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/execution/arm/frame-constants-arm.cc b/deps/v8/src/execution/arm/frame-constants-arm.cc
index af04813301..602242ac97 100644
--- a/deps/v8/src/execution/arm/frame-constants-arm.cc
+++ b/deps/v8/src/execution/arm/frame-constants-arm.cc
@@ -6,9 +6,9 @@
#include "src/execution/arm/frame-constants-arm.h"
-#include "src/codegen/assembler-inl.h"
-#include "src/codegen/macro-assembler.h"
+#include "src/codegen/arm/assembler-arm-inl.h"
#include "src/execution/frame-constants.h"
+#include "src/execution/frames.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/execution/arm64/frame-constants-arm64.cc b/deps/v8/src/execution/arm64/frame-constants-arm64.cc
index 89a5259e2b..607081a562 100644
--- a/deps/v8/src/execution/arm64/frame-constants-arm64.cc
+++ b/deps/v8/src/execution/arm64/frame-constants-arm64.cc
@@ -6,11 +6,11 @@
#if V8_TARGET_ARCH_ARM64
+#include "src/execution/arm64/frame-constants-arm64.h"
+
#include "src/codegen/arm64/assembler-arm64-inl.h"
-#include "src/codegen/arm64/assembler-arm64.h"
#include "src/codegen/assembler.h"
-
-#include "src/execution/arm64/frame-constants-arm64.h"
+#include "src/execution/frames.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/execution/execution.cc b/deps/v8/src/execution/execution.cc
index 285b4b2134..06c4e3a6cc 100644
--- a/deps/v8/src/execution/execution.cc
+++ b/deps/v8/src/execution/execution.cc
@@ -5,32 +5,15 @@
#include "src/execution/execution.h"
#include "src/api/api-inl.h"
-#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
-#include "src/debug/debug.h"
+#include "src/compiler/wasm-compiler.h" // Only for static asserts.
+#include "src/execution/frames.h"
#include "src/execution/isolate-inl.h"
-#include "src/execution/runtime-profiler.h"
#include "src/execution/vm-state-inl.h"
-#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
-#include "src/wasm/wasm-engine.h"
namespace v8 {
namespace internal {
-void StackGuard::set_interrupt_limits(const ExecutionAccess& lock) {
- DCHECK_NOT_NULL(isolate_);
- thread_local_.set_jslimit(kInterruptLimit);
- thread_local_.set_climit(kInterruptLimit);
- isolate_->heap()->SetStackLimits();
-}
-
-void StackGuard::reset_limits(const ExecutionAccess& lock) {
- DCHECK_NOT_NULL(isolate_);
- thread_local_.set_jslimit(thread_local_.real_jslimit_);
- thread_local_.set_climit(thread_local_.real_climit_);
- isolate_->heap()->SetStackLimits();
-}
-
namespace {
Handle<Object> NormalizeReceiver(Isolate* isolate, Handle<Object> receiver) {
@@ -235,6 +218,22 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Invoke(Isolate* isolate,
return isolate->factory()->undefined_value();
}
+ if (params.execution_target == Execution::Target::kCallable) {
+ Handle<Context> context = isolate->native_context();
+ if (!context->script_execution_callback().IsUndefined(isolate)) {
+ v8::Context::AbortScriptExecutionCallback callback =
+ v8::ToCData<v8::Context::AbortScriptExecutionCallback>(
+ context->script_execution_callback());
+ v8::Isolate* api_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+ v8::Local<v8::Context> api_context = v8::Utils::ToLocal(context);
+ callback(api_isolate, api_context);
+ DCHECK(!isolate->has_scheduled_exception());
+ // Always throw an exception to abort execution, if callback exists.
+ isolate->ThrowIllegalOperation();
+ return MaybeHandle<Object>();
+ }
+ }
+
// Placeholder for return value.
Object value;
@@ -406,271 +405,68 @@ MaybeHandle<Object> Execution::TryRunMicrotasks(
exception_out));
}
-void StackGuard::SetStackLimit(uintptr_t limit) {
- ExecutionAccess access(isolate_);
- // If the current limits are special (e.g. due to a pending interrupt) then
- // leave them alone.
- uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(isolate_, limit);
- if (thread_local_.jslimit() == thread_local_.real_jslimit_) {
- thread_local_.set_jslimit(jslimit);
- }
- if (thread_local_.climit() == thread_local_.real_climit_) {
- thread_local_.set_climit(limit);
- }
- thread_local_.real_climit_ = limit;
- thread_local_.real_jslimit_ = jslimit;
-}
-
-void StackGuard::AdjustStackLimitForSimulator() {
- ExecutionAccess access(isolate_);
- uintptr_t climit = thread_local_.real_climit_;
- // If the current limits are special (e.g. due to a pending interrupt) then
- // leave them alone.
- uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(isolate_, climit);
- if (thread_local_.jslimit() == thread_local_.real_jslimit_) {
- thread_local_.set_jslimit(jslimit);
- isolate_->heap()->SetStackLimits();
- }
-}
-
-void StackGuard::EnableInterrupts() {
- ExecutionAccess access(isolate_);
- if (has_pending_interrupts(access)) {
- set_interrupt_limits(access);
- }
-}
-
-void StackGuard::DisableInterrupts() {
- ExecutionAccess access(isolate_);
- reset_limits(access);
-}
-
-void StackGuard::PushInterruptsScope(InterruptsScope* scope) {
- ExecutionAccess access(isolate_);
- DCHECK_NE(scope->mode_, InterruptsScope::kNoop);
- if (scope->mode_ == InterruptsScope::kPostponeInterrupts) {
- // Intercept already requested interrupts.
- int intercepted = thread_local_.interrupt_flags_ & scope->intercept_mask_;
- scope->intercepted_flags_ = intercepted;
- thread_local_.interrupt_flags_ &= ~intercepted;
- } else {
- DCHECK_EQ(scope->mode_, InterruptsScope::kRunInterrupts);
- // Restore postponed interrupts.
- int restored_flags = 0;
- for (InterruptsScope* current = thread_local_.interrupt_scopes_;
- current != nullptr; current = current->prev_) {
- restored_flags |= (current->intercepted_flags_ & scope->intercept_mask_);
- current->intercepted_flags_ &= ~scope->intercept_mask_;
- }
- thread_local_.interrupt_flags_ |= restored_flags;
+struct StackHandlerMarker {
+ Address next;
+ Address padding;
+};
+STATIC_ASSERT(offsetof(StackHandlerMarker, next) ==
+ StackHandlerConstants::kNextOffset);
+STATIC_ASSERT(offsetof(StackHandlerMarker, padding) ==
+ StackHandlerConstants::kPaddingOffset);
+STATIC_ASSERT(sizeof(StackHandlerMarker) == StackHandlerConstants::kSize);
+
+void Execution::CallWasm(Isolate* isolate, Handle<Code> wrapper_code,
+ Address wasm_call_target, Handle<Object> object_ref,
+ Address packed_args) {
+ using WasmEntryStub = GeneratedCode<Address(
+ Address target, Address object_ref, Address argv, Address c_entry_fp)>;
+ WasmEntryStub stub_entry =
+ WasmEntryStub::FromAddress(isolate, wrapper_code->InstructionStart());
+
+ // Save and restore context around invocation and block the
+ // allocation of handles without explicit handle scopes.
+ SaveContext save(isolate);
+ SealHandleScope shs(isolate);
+
+ Address saved_c_entry_fp = *isolate->c_entry_fp_address();
+ Address saved_js_entry_sp = *isolate->js_entry_sp_address();
+ if (saved_js_entry_sp == kNullAddress) {
+ *isolate->js_entry_sp_address() = GetCurrentStackPosition();
}
- if (!has_pending_interrupts(access)) reset_limits(access);
- // Add scope to the chain.
- scope->prev_ = thread_local_.interrupt_scopes_;
- thread_local_.interrupt_scopes_ = scope;
-}
+ StackHandlerMarker stack_handler;
+ stack_handler.next = isolate->thread_local_top()->handler_;
+#ifdef V8_USE_ADDRESS_SANITIZER
+ stack_handler.padding = GetCurrentStackPosition();
+#else
+ stack_handler.padding = 0;
+#endif
+ isolate->thread_local_top()->handler_ =
+ reinterpret_cast<Address>(&stack_handler);
+ trap_handler::SetThreadInWasm();
-void StackGuard::PopInterruptsScope() {
- ExecutionAccess access(isolate_);
- InterruptsScope* top = thread_local_.interrupt_scopes_;
- DCHECK_NE(top->mode_, InterruptsScope::kNoop);
- if (top->mode_ == InterruptsScope::kPostponeInterrupts) {
- // Make intercepted interrupts active.
- DCHECK_EQ(thread_local_.interrupt_flags_ & top->intercept_mask_, 0);
- thread_local_.interrupt_flags_ |= top->intercepted_flags_;
- } else {
- DCHECK_EQ(top->mode_, InterruptsScope::kRunInterrupts);
- // Postpone existing interupts if needed.
- if (top->prev_) {
- for (int interrupt = 1; interrupt < ALL_INTERRUPTS;
- interrupt = interrupt << 1) {
- InterruptFlag flag = static_cast<InterruptFlag>(interrupt);
- if ((thread_local_.interrupt_flags_ & flag) &&
- top->prev_->Intercept(flag)) {
- thread_local_.interrupt_flags_ &= ~flag;
- }
- }
+ {
+ RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kJS_Execution);
+ STATIC_ASSERT(compiler::CWasmEntryParameters::kCodeEntry == 0);
+ STATIC_ASSERT(compiler::CWasmEntryParameters::kObjectRef == 1);
+ STATIC_ASSERT(compiler::CWasmEntryParameters::kArgumentsBuffer == 2);
+ STATIC_ASSERT(compiler::CWasmEntryParameters::kCEntryFp == 3);
+ Address result = stub_entry.Call(wasm_call_target, object_ref->ptr(),
+ packed_args, saved_c_entry_fp);
+ if (result != kNullAddress) {
+ isolate->set_pending_exception(Object(result));
}
}
- if (has_pending_interrupts(access)) set_interrupt_limits(access);
- // Remove scope from chain.
- thread_local_.interrupt_scopes_ = top->prev_;
-}
-
-bool StackGuard::CheckInterrupt(InterruptFlag flag) {
- ExecutionAccess access(isolate_);
- return thread_local_.interrupt_flags_ & flag;
-}
-void StackGuard::RequestInterrupt(InterruptFlag flag) {
- ExecutionAccess access(isolate_);
- // Check the chain of InterruptsScope for interception.
- if (thread_local_.interrupt_scopes_ &&
- thread_local_.interrupt_scopes_->Intercept(flag)) {
- return;
+ // If there was an exception, then the thread-in-wasm flag is cleared
+ // already.
+ if (trap_handler::IsThreadInWasm()) {
+ trap_handler::ClearThreadInWasm();
}
-
- // Not intercepted. Set as active interrupt flag.
- thread_local_.interrupt_flags_ |= flag;
- set_interrupt_limits(access);
-
- // If this isolate is waiting in a futex, notify it to wake up.
- isolate_->futex_wait_list_node()->NotifyWake();
-}
-
-void StackGuard::ClearInterrupt(InterruptFlag flag) {
- ExecutionAccess access(isolate_);
- // Clear the interrupt flag from the chain of InterruptsScope.
- for (InterruptsScope* current = thread_local_.interrupt_scopes_;
- current != nullptr; current = current->prev_) {
- current->intercepted_flags_ &= ~flag;
+ isolate->thread_local_top()->handler_ = stack_handler.next;
+ if (saved_js_entry_sp == kNullAddress) {
+ *isolate->js_entry_sp_address() = saved_js_entry_sp;
}
-
- // Clear the interrupt flag from the active interrupt flags.
- thread_local_.interrupt_flags_ &= ~flag;
- if (!has_pending_interrupts(access)) reset_limits(access);
-}
-
-bool StackGuard::CheckAndClearInterrupt(InterruptFlag flag) {
- ExecutionAccess access(isolate_);
- bool result = (thread_local_.interrupt_flags_ & flag);
- thread_local_.interrupt_flags_ &= ~flag;
- if (!has_pending_interrupts(access)) reset_limits(access);
- return result;
-}
-
-char* StackGuard::ArchiveStackGuard(char* to) {
- ExecutionAccess access(isolate_);
- MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
- ThreadLocal blank;
-
- // Set the stack limits using the old thread_local_.
- // TODO(isolates): This was the old semantics of constructing a ThreadLocal
- // (as the ctor called SetStackLimits, which looked at the
- // current thread_local_ from StackGuard)-- but is this
- // really what was intended?
- isolate_->heap()->SetStackLimits();
- thread_local_ = blank;
-
- return to + sizeof(ThreadLocal);
-}
-
-char* StackGuard::RestoreStackGuard(char* from) {
- ExecutionAccess access(isolate_);
- MemCopy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
- isolate_->heap()->SetStackLimits();
- return from + sizeof(ThreadLocal);
-}
-
-void StackGuard::FreeThreadResources() {
- Isolate::PerIsolateThreadData* per_thread =
- isolate_->FindOrAllocatePerThreadDataForThisThread();
- per_thread->set_stack_limit(thread_local_.real_climit_);
-}
-
-void StackGuard::ThreadLocal::Clear() {
- real_jslimit_ = kIllegalLimit;
- set_jslimit(kIllegalLimit);
- real_climit_ = kIllegalLimit;
- set_climit(kIllegalLimit);
- interrupt_scopes_ = nullptr;
- interrupt_flags_ = 0;
-}
-
-bool StackGuard::ThreadLocal::Initialize(Isolate* isolate) {
- bool should_set_stack_limits = false;
- if (real_climit_ == kIllegalLimit) {
- const uintptr_t kLimitSize = FLAG_stack_size * KB;
- DCHECK_GT(GetCurrentStackPosition(), kLimitSize);
- uintptr_t limit = GetCurrentStackPosition() - kLimitSize;
- real_jslimit_ = SimulatorStack::JsLimitFromCLimit(isolate, limit);
- set_jslimit(SimulatorStack::JsLimitFromCLimit(isolate, limit));
- real_climit_ = limit;
- set_climit(limit);
- should_set_stack_limits = true;
- }
- interrupt_scopes_ = nullptr;
- interrupt_flags_ = 0;
- return should_set_stack_limits;
-}
-
-void StackGuard::ClearThread(const ExecutionAccess& lock) {
- thread_local_.Clear();
- isolate_->heap()->SetStackLimits();
-}
-
-void StackGuard::InitThread(const ExecutionAccess& lock) {
- if (thread_local_.Initialize(isolate_)) isolate_->heap()->SetStackLimits();
- Isolate::PerIsolateThreadData* per_thread =
- isolate_->FindOrAllocatePerThreadDataForThisThread();
- uintptr_t stored_limit = per_thread->stack_limit();
- // You should hold the ExecutionAccess lock when you call this.
- if (stored_limit != 0) {
- SetStackLimit(stored_limit);
- }
-}
-
-// --- C a l l s t o n a t i v e s ---
-
-Object StackGuard::HandleInterrupts() {
- TRACE_EVENT0("v8.execute", "V8.HandleInterrupts");
-
- if (FLAG_verify_predictable) {
- // Advance synthetic time by making a time request.
- isolate_->heap()->MonotonicallyIncreasingTimeInMs();
- }
-
- if (CheckAndClearInterrupt(GC_REQUEST)) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "V8.GCHandleGCRequest");
- isolate_->heap()->HandleGCRequest();
- }
-
- if (CheckAndClearInterrupt(GROW_SHARED_MEMORY)) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
- "V8.WasmGrowSharedMemory");
- isolate_->wasm_engine()->memory_tracker()->UpdateSharedMemoryInstances(
- isolate_);
- }
-
- if (CheckAndClearInterrupt(TERMINATE_EXECUTION)) {
- TRACE_EVENT0("v8.execute", "V8.TerminateExecution");
- return isolate_->TerminateExecution();
- }
-
- if (CheckAndClearInterrupt(DEOPT_MARKED_ALLOCATION_SITES)) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
- "V8.GCDeoptMarkedAllocationSites");
- isolate_->heap()->DeoptMarkedAllocationSites();
- }
-
- if (CheckAndClearInterrupt(INSTALL_CODE)) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.InstallOptimizedFunctions");
- DCHECK(isolate_->concurrent_recompilation_enabled());
- isolate_->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
- }
-
- if (CheckAndClearInterrupt(API_INTERRUPT)) {
- TRACE_EVENT0("v8.execute", "V8.InvokeApiInterruptCallbacks");
- // Callbacks must be invoked outside of ExecutionAccess lock.
- isolate_->InvokeApiInterruptCallbacks();
- }
-
- if (CheckAndClearInterrupt(LOG_WASM_CODE)) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "LogCode");
- isolate_->wasm_engine()->LogOutstandingCodesForIsolate(isolate_);
- }
-
- if (CheckAndClearInterrupt(WASM_CODE_GC)) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "WasmCodeGC");
- isolate_->wasm_engine()->ReportLiveCodeFromStackForGC(isolate_);
- }
-
- isolate_->counters()->stack_interrupts()->Increment();
- isolate_->counters()->runtime_profiler_ticks()->Increment();
- isolate_->runtime_profiler()->MarkCandidatesForOptimization();
-
- return ReadOnlyRoots(isolate_).undefined_value();
+ *isolate->c_entry_fp_address() = saved_c_entry_fp;
}
} // namespace internal
diff --git a/deps/v8/src/execution/execution.h b/deps/v8/src/execution/execution.h
index 48a8d64424..3b8ecf038d 100644
--- a/deps/v8/src/execution/execution.h
+++ b/deps/v8/src/execution/execution.h
@@ -5,7 +5,6 @@
#ifndef V8_EXECUTION_EXECUTION_H_
#define V8_EXECUTION_EXECUTION_H_
-#include "src/base/atomicops.h"
#include "src/common/globals.h"
namespace v8 {
@@ -60,174 +59,16 @@ class Execution final : public AllStatic {
static MaybeHandle<Object> TryRunMicrotasks(
Isolate* isolate, MicrotaskQueue* microtask_queue,
MaybeHandle<Object>* exception_out);
-};
-
-class ExecutionAccess;
-class InterruptsScope;
-
-// StackGuard contains the handling of the limits that are used to limit the
-// number of nested invocations of JavaScript and the stack size used in each
-// invocation.
-class V8_EXPORT_PRIVATE StackGuard final {
- public:
- explicit StackGuard(Isolate* isolate) : isolate_(isolate) {}
-
- // Pass the address beyond which the stack should not grow. The stack
- // is assumed to grow downwards.
- void SetStackLimit(uintptr_t limit);
-
- // The simulator uses a separate JS stack. Limits on the JS stack might have
- // to be adjusted in order to reflect overflows of the C stack, because we
- // cannot rely on the interleaving of frames on the simulator.
- void AdjustStackLimitForSimulator();
-
- // Threading support.
- char* ArchiveStackGuard(char* to);
- char* RestoreStackGuard(char* from);
- static int ArchiveSpacePerThread() { return sizeof(ThreadLocal); }
- void FreeThreadResources();
- // Sets up the default stack guard for this thread if it has not
- // already been set up.
- void InitThread(const ExecutionAccess& lock);
- // Clears the stack guard for this thread so it does not look as if
- // it has been set up.
- void ClearThread(const ExecutionAccess& lock);
-
-#define INTERRUPT_LIST(V) \
- V(TERMINATE_EXECUTION, TerminateExecution, 0) \
- V(GC_REQUEST, GC, 1) \
- V(INSTALL_CODE, InstallCode, 2) \
- V(API_INTERRUPT, ApiInterrupt, 3) \
- V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites, 4) \
- V(GROW_SHARED_MEMORY, GrowSharedMemory, 5) \
- V(LOG_WASM_CODE, LogWasmCode, 6) \
- V(WASM_CODE_GC, WasmCodeGC, 7)
-
-#define V(NAME, Name, id) \
- inline bool Check##Name() { return CheckInterrupt(NAME); } \
- inline bool CheckAndClear##Name() { return CheckAndClearInterrupt(NAME); } \
- inline void Request##Name() { RequestInterrupt(NAME); } \
- inline void Clear##Name() { ClearInterrupt(NAME); }
- INTERRUPT_LIST(V)
-#undef V
-
- // Flag used to set the interrupt causes.
- enum InterruptFlag {
-#define V(NAME, Name, id) NAME = (1 << id),
- INTERRUPT_LIST(V)
-#undef V
-#define V(NAME, Name, id) NAME |
- ALL_INTERRUPTS = INTERRUPT_LIST(V) 0
-#undef V
- };
-
- uintptr_t climit() { return thread_local_.climit(); }
- uintptr_t jslimit() { return thread_local_.jslimit(); }
- // This provides an asynchronous read of the stack limits for the current
- // thread. There are no locks protecting this, but it is assumed that you
- // have the global V8 lock if you are using multiple V8 threads.
- uintptr_t real_climit() { return thread_local_.real_climit_; }
- uintptr_t real_jslimit() { return thread_local_.real_jslimit_; }
- Address address_of_jslimit() {
- return reinterpret_cast<Address>(&thread_local_.jslimit_);
- }
- Address address_of_real_jslimit() {
- return reinterpret_cast<Address>(&thread_local_.real_jslimit_);
- }
-
- // If the stack guard is triggered, but it is not an actual
- // stack overflow, then handle the interruption accordingly.
- Object HandleInterrupts();
-
- private:
- bool CheckInterrupt(InterruptFlag flag);
- void RequestInterrupt(InterruptFlag flag);
- void ClearInterrupt(InterruptFlag flag);
- bool CheckAndClearInterrupt(InterruptFlag flag);
-
- // You should hold the ExecutionAccess lock when calling this method.
- bool has_pending_interrupts(const ExecutionAccess& lock) {
- return thread_local_.interrupt_flags_ != 0;
- }
-
- // You should hold the ExecutionAccess lock when calling this method.
- inline void set_interrupt_limits(const ExecutionAccess& lock);
-
- // Reset limits to actual values. For example after handling interrupt.
- // You should hold the ExecutionAccess lock when calling this method.
- inline void reset_limits(const ExecutionAccess& lock);
-
- // Enable or disable interrupts.
- void EnableInterrupts();
- void DisableInterrupts();
-
-#if V8_TARGET_ARCH_64_BIT
- static const uintptr_t kInterruptLimit = uintptr_t{0xfffffffffffffffe};
- static const uintptr_t kIllegalLimit = uintptr_t{0xfffffffffffffff8};
-#else
- static const uintptr_t kInterruptLimit = 0xfffffffe;
- static const uintptr_t kIllegalLimit = 0xfffffff8;
-#endif
-
- void PushInterruptsScope(InterruptsScope* scope);
- void PopInterruptsScope();
-
- class ThreadLocal final {
- public:
- ThreadLocal() { Clear(); }
- // You should hold the ExecutionAccess lock when you call Initialize or
- // Clear.
- void Clear();
-
- // Returns true if the heap's stack limits should be set, false if not.
- bool Initialize(Isolate* isolate);
-
- // The stack limit is split into a JavaScript and a C++ stack limit. These
- // two are the same except when running on a simulator where the C++ and
- // JavaScript stacks are separate. Each of the two stack limits have two
- // values. The one eith the real_ prefix is the actual stack limit
- // set for the VM. The one without the real_ prefix has the same value as
- // the actual stack limit except when there is an interruption (e.g. debug
- // break or preemption) in which case it is lowered to make stack checks
- // fail. Both the generated code and the runtime system check against the
- // one without the real_ prefix.
- uintptr_t real_jslimit_; // Actual JavaScript stack limit set for the VM.
- uintptr_t real_climit_; // Actual C++ stack limit set for the VM.
-
- // jslimit_ and climit_ can be read without any lock.
- // Writing requires the ExecutionAccess lock.
- base::AtomicWord jslimit_;
- base::AtomicWord climit_;
-
- uintptr_t jslimit() {
- return bit_cast<uintptr_t>(base::Relaxed_Load(&jslimit_));
- }
- void set_jslimit(uintptr_t limit) {
- return base::Relaxed_Store(&jslimit_,
- static_cast<base::AtomicWord>(limit));
- }
- uintptr_t climit() {
- return bit_cast<uintptr_t>(base::Relaxed_Load(&climit_));
- }
- void set_climit(uintptr_t limit) {
- return base::Relaxed_Store(&climit_,
- static_cast<base::AtomicWord>(limit));
- }
-
- InterruptsScope* interrupt_scopes_;
- int interrupt_flags_;
- };
-
- // TODO(isolates): Technically this could be calculated directly from a
- // pointer to StackGuard.
- Isolate* isolate_;
- ThreadLocal thread_local_;
-
- friend class Isolate;
- friend class StackLimitCheck;
- friend class InterruptsScope;
- DISALLOW_COPY_AND_ASSIGN(StackGuard);
+ // Call a Wasm function identified by {wasm_call_target} through the
+ // provided {wrapper_code}, which must match the function's signature.
+ // Upon return, either isolate->has_pending_exception() is true, or
+ // the function's return values are in {packed_args}.
+ V8_EXPORT_PRIVATE static void CallWasm(Isolate* isolate,
+ Handle<Code> wrapper_code,
+ Address wasm_call_target,
+ Handle<Object> object_ref,
+ Address packed_args);
};
} // namespace internal
diff --git a/deps/v8/src/execution/frame-constants.h b/deps/v8/src/execution/frame-constants.h
index 7ddee5689e..a6e5c9522c 100644
--- a/deps/v8/src/execution/frame-constants.h
+++ b/deps/v8/src/execution/frame-constants.h
@@ -249,6 +249,13 @@ class ConstructFrameConstants : public TypedFrameConstants {
DEFINE_TYPED_FRAME_SIZES(5);
};
+class CWasmEntryFrameConstants : public TypedFrameConstants {
+ public:
+ // FP-relative:
+ static constexpr int kCEntryFPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ DEFINE_TYPED_FRAME_SIZES(1);
+};
+
class WasmCompiledFrameConstants : public TypedFrameConstants {
public:
// FP-relative.
@@ -271,7 +278,7 @@ class BuiltinContinuationFrameConstants : public TypedFrameConstants {
TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
static constexpr int kBuiltinContextOffset =
TYPED_FRAME_PUSHED_VALUE_OFFSET(2);
- static constexpr int kBuiltinOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(3);
+ static constexpr int kBuiltinIndexOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(3);
// The argument count is in the first allocatable register, stored below the
// fixed part of the frame and therefore is not part of the fixed frame size.
diff --git a/deps/v8/src/execution/frames-inl.h b/deps/v8/src/execution/frames-inl.h
index aeb43fe0a6..52f38857cc 100644
--- a/deps/v8/src/execution/frames-inl.h
+++ b/deps/v8/src/execution/frames-inl.h
@@ -5,7 +5,7 @@
#ifndef V8_EXECUTION_FRAMES_INL_H_
#define V8_EXECUTION_FRAMES_INL_H_
-#include "src/common/v8memory.h"
+#include "src/base/memory.h"
#include "src/execution/frame-constants.h"
#include "src/execution/frames.h"
#include "src/execution/isolate.h"
@@ -48,11 +48,11 @@ inline Address StackHandler::address() const {
inline StackHandler* StackHandler::next() const {
const int offset = StackHandlerConstants::kNextOffset;
- return FromAddress(Memory<Address>(address() + offset));
+ return FromAddress(base::Memory<Address>(address() + offset));
}
inline Address StackHandler::next_address() const {
- return Memory<Address>(address() + StackHandlerConstants::kNextOffset);
+ return base::Memory<Address>(address() + StackHandlerConstants::kNextOffset);
}
inline StackHandler* StackHandler::FromAddress(Address address) {
@@ -112,21 +112,22 @@ inline Object BuiltinExitFrame::receiver_slot_object() const {
const int receiverOffset = BuiltinExitFrameConstants::kNewTargetOffset +
(argc - 1) * kSystemPointerSize;
- return Object(Memory<Address>(fp() + receiverOffset));
+ return Object(base::Memory<Address>(fp() + receiverOffset));
}
inline Object BuiltinExitFrame::argc_slot_object() const {
- return Object(Memory<Address>(fp() + BuiltinExitFrameConstants::kArgcOffset));
+ return Object(
+ base::Memory<Address>(fp() + BuiltinExitFrameConstants::kArgcOffset));
}
inline Object BuiltinExitFrame::target_slot_object() const {
return Object(
- Memory<Address>(fp() + BuiltinExitFrameConstants::kTargetOffset));
+ base::Memory<Address>(fp() + BuiltinExitFrameConstants::kTargetOffset));
}
inline Object BuiltinExitFrame::new_target_slot_object() const {
- return Object(
- Memory<Address>(fp() + BuiltinExitFrameConstants::kNewTargetOffset));
+ return Object(base::Memory<Address>(
+ fp() + BuiltinExitFrameConstants::kNewTargetOffset));
}
inline StandardFrame::StandardFrame(StackFrameIteratorBase* iterator)
@@ -134,20 +135,20 @@ inline StandardFrame::StandardFrame(StackFrameIteratorBase* iterator)
}
inline Object StandardFrame::GetExpression(int index) const {
- return Object(Memory<Address>(GetExpressionAddress(index)));
+ return Object(base::Memory<Address>(GetExpressionAddress(index)));
}
inline void StandardFrame::SetExpression(int index, Object value) {
- Memory<Address>(GetExpressionAddress(index)) = value.ptr();
+ base::Memory<Address>(GetExpressionAddress(index)) = value.ptr();
}
inline Address StandardFrame::caller_fp() const {
- return Memory<Address>(fp() + StandardFrameConstants::kCallerFPOffset);
+ return base::Memory<Address>(fp() + StandardFrameConstants::kCallerFPOffset);
}
inline Address StandardFrame::caller_pc() const {
- return Memory<Address>(ComputePCAddress(fp()));
+ return base::Memory<Address>(ComputePCAddress(fp()));
}
@@ -163,14 +164,14 @@ inline Address StandardFrame::ComputeConstantPoolAddress(Address fp) {
inline bool StandardFrame::IsArgumentsAdaptorFrame(Address fp) {
intptr_t frame_type =
- Memory<intptr_t>(fp + TypedFrameConstants::kFrameTypeOffset);
+ base::Memory<intptr_t>(fp + TypedFrameConstants::kFrameTypeOffset);
return frame_type == StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR);
}
inline bool StandardFrame::IsConstructFrame(Address fp) {
intptr_t frame_type =
- Memory<intptr_t>(fp + TypedFrameConstants::kFrameTypeOffset);
+ base::Memory<intptr_t>(fp + TypedFrameConstants::kFrameTypeOffset);
return frame_type == StackFrame::TypeToMarker(StackFrame::CONSTRUCT);
}
@@ -187,7 +188,7 @@ Address JavaScriptFrame::GetParameterSlot(int index) const {
}
inline void JavaScriptFrame::set_receiver(Object value) {
- Memory<Address>(GetParameterSlot(-1)) = value.ptr();
+ base::Memory<Address>(GetParameterSlot(-1)) = value.ptr();
}
inline bool JavaScriptFrame::has_adapted_arguments() const {
@@ -196,7 +197,7 @@ inline bool JavaScriptFrame::has_adapted_arguments() const {
inline Object JavaScriptFrame::function_slot_object() const {
const int offset = JavaScriptFrameConstants::kFunctionOffset;
- return Object(Memory<Address>(fp() + offset));
+ return Object(base::Memory<Address>(fp() + offset));
}
inline StubFrame::StubFrame(StackFrameIteratorBase* iterator)
diff --git a/deps/v8/src/execution/frames.cc b/deps/v8/src/execution/frames.cc
index af660a338e..126cb9530e 100644
--- a/deps/v8/src/execution/frames.cc
+++ b/deps/v8/src/execution/frames.cc
@@ -33,6 +33,23 @@ namespace internal {
ReturnAddressLocationResolver StackFrame::return_address_location_resolver_ =
nullptr;
+namespace {
+
+Address AddressOf(const StackHandler* handler) {
+ Address raw = handler->address();
+#ifdef V8_USE_ADDRESS_SANITIZER
+ // ASan puts C++-allocated StackHandler markers onto its fake stack.
+ // We work around that by storing the real stack address in the "padding"
+ // field. StackHandlers allocated from generated code have 0 as padding.
+ Address padding =
+ base::Memory<Address>(raw + StackHandlerConstants::kPaddingOffset);
+ if (padding != 0) return padding;
+#endif
+ return raw;
+}
+
+} // namespace
+
// Iterator that supports traversing the stack handlers of a
// particular frame. Needs to know the top of the handler chain.
class StackHandlerIterator {
@@ -40,12 +57,18 @@ class StackHandlerIterator {
StackHandlerIterator(const StackFrame* frame, StackHandler* handler)
: limit_(frame->fp()), handler_(handler) {
// Make sure the handler has already been unwound to this frame.
- DCHECK(frame->sp() <= handler->address());
+ DCHECK(frame->sp() <= AddressOf(handler));
+ // For CWasmEntry frames, the handler was registered by the last C++
+ // frame (Execution::CallWasm), so even though its address is already
+ // beyond the limit, we know we always want to unwind one handler.
+ if (frame->type() == StackFrame::C_WASM_ENTRY) {
+ handler_ = handler_->next();
+ }
}
StackHandler* handler() const { return handler_; }
- bool done() { return handler_ == nullptr || handler_->address() > limit_; }
+ bool done() { return handler_ == nullptr || AddressOf(handler_) > limit_; }
void Advance() {
DCHECK(!done());
handler_ = handler_->next();
@@ -146,7 +169,7 @@ StackTraceFrameIterator::StackTraceFrameIterator(Isolate* isolate)
}
StackTraceFrameIterator::StackTraceFrameIterator(Isolate* isolate,
- StackFrame::Id id)
+ StackFrameId id)
: StackTraceFrameIterator(isolate) {
while (!done() && frame()->id() != id) Advance();
}
@@ -255,6 +278,11 @@ SafeStackFrameIterator::SafeStackFrameIterator(Isolate* isolate, Address pc,
bool advance_frame = true;
Address fast_c_fp = isolate->isolate_data()->fast_c_call_caller_fp();
+ uint8_t stack_is_iterable = isolate->isolate_data()->stack_is_iterable();
+ if (!stack_is_iterable) {
+ frame_ = nullptr;
+ return;
+ }
// 'Fast C calls' are a special type of C call where we call directly from JS
// to C without an exit frame inbetween. The CEntryStub is responsible for
// setting Isolate::c_entry_fp, meaning that it won't be set for fast C calls.
@@ -637,6 +665,12 @@ StackFrame::Type EntryFrame::GetCallerState(State* state) const {
return ExitFrame::GetStateForFramePointer(fp, state);
}
+StackFrame::Type CWasmEntryFrame::GetCallerState(State* state) const {
+ const int offset = CWasmEntryFrameConstants::kCEntryFPOffset;
+ Address fp = Memory<Address>(this->fp() + offset);
+ return ExitFrame::GetStateForFramePointer(fp, state);
+}
+
Code ConstructEntryFrame::unchecked_code() const {
return isolate()->heap()->builtin(Builtins::kJSConstructEntry);
}
@@ -972,7 +1006,6 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
parameters_limit);
}
- DEFINE_ROOT_VALUE(isolate());
// Visit pointer spill slots and locals.
uint8_t* safepoint_bits = safepoint_entry.bits();
for (unsigned index = 0; index < stack_slots; index++) {
@@ -992,7 +1025,7 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
if (!HAS_SMI_TAG(compressed_value)) {
// We don't need to update smi values.
*spill_slot.location() =
- DecompressTaggedPointer(ROOT_VALUE, compressed_value);
+ DecompressTaggedPointer(isolate(), compressed_value);
}
#endif
v->VisitRootPointer(Root::kTop, nullptr, spill_slot);
@@ -1910,7 +1943,8 @@ int WasmCompiledFrame::LookupExceptionHandlerInTable(int* stack_slots) {
wasm::WasmCode* code =
isolate()->wasm_engine()->code_manager()->LookupCode(pc());
if (!code->IsAnonymous() && code->handler_table_size() > 0) {
- HandlerTable table(code->handler_table(), code->handler_table_size());
+ HandlerTable table(code->handler_table(), code->handler_table_size(),
+ HandlerTable::kReturnAddressBasedEncoding);
int pc_offset = static_cast<int>(pc() - code->instruction_start());
*stack_slots = static_cast<int>(code->stack_slots());
return table.LookupReturn(pc_offset);
diff --git a/deps/v8/src/execution/frames.h b/deps/v8/src/execution/frames.h
index 982716db93..1f83984f97 100644
--- a/deps/v8/src/execution/frames.h
+++ b/deps/v8/src/execution/frames.h
@@ -6,6 +6,7 @@
#define V8_EXECUTION_FRAMES_H_
#include "src/codegen/safepoint-table.h"
+#include "src/common/globals.h"
#include "src/handles/handles.h"
#include "src/objects/code.h"
#include "src/objects/objects.h"
@@ -98,12 +99,6 @@ class StackFrame {
};
#undef DECLARE_TYPE
- // Opaque data type for identifying stack frames. Used extensively
- // by the debugger.
- // ID_MIN_VALUE and ID_MAX_VALUE are specified to ensure that enumeration type
- // has correct value range (see Issue 830 for more details).
- enum Id { ID_MIN_VALUE = kMinInt, ID_MAX_VALUE = kMaxInt, NO_ID = 0 };
-
// Used to mark the outermost JS entry frame.
//
// The mark is an opaque value that should be pushed onto the stack directly,
@@ -112,7 +107,9 @@ class StackFrame {
INNER_JSENTRY_FRAME = (0 << kSmiTagSize) | kSmiTag,
OUTERMOST_JSENTRY_FRAME = (1 << kSmiTagSize) | kSmiTag
};
+ // NOLINTNEXTLINE(runtime/references) (false positive)
STATIC_ASSERT((INNER_JSENTRY_FRAME & kHeapObjectTagMask) != kHeapObjectTag);
+ // NOLINTNEXTLINE(runtime/references) (false positive)
STATIC_ASSERT((OUTERMOST_JSENTRY_FRAME & kHeapObjectTagMask) !=
kHeapObjectTag);
@@ -145,7 +142,13 @@ class StackFrame {
// the type of the value on the stack.
static Type MarkerToType(intptr_t marker) {
DCHECK(IsTypeMarker(marker));
- return static_cast<Type>(marker >> kSmiTagSize);
+ intptr_t type = marker >> kSmiTagSize;
+ // TODO(petermarshall): There is a bug in the arm simulators that causes
+ // invalid frame markers.
+#if !(defined(USE_SIMULATOR) && (V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM))
+ DCHECK_LT(static_cast<uintptr_t>(type), Type::NUMBER_OF_TYPES);
+#endif
+ return static_cast<Type>(type);
}
// Check if a marker is a stack frame type marker or a tagged pointer.
@@ -172,10 +175,7 @@ class StackFrame {
bool is_optimized() const { return type() == OPTIMIZED; }
bool is_interpreted() const { return type() == INTERPRETED; }
bool is_wasm_compiled() const { return type() == WASM_COMPILED; }
- bool is_wasm_exit() const { return type() == WASM_EXIT; }
bool is_wasm_compile_lazy() const { return type() == WASM_COMPILE_LAZY; }
- bool is_wasm_to_js() const { return type() == WASM_TO_JS; }
- bool is_js_to_wasm() const { return type() == JS_TO_WASM; }
bool is_wasm_interpreter_entry() const {
return type() == WASM_INTERPRETER_ENTRY;
}
@@ -234,7 +234,7 @@ class StackFrame {
}
// Get the id of this stack frame.
- Id id() const { return static_cast<Id>(caller_sp()); }
+ StackFrameId id() const { return static_cast<StackFrameId>(caller_sp()); }
// Get the top handler from the current stack iterator.
inline StackHandler* top_handler() const;
@@ -1052,6 +1052,7 @@ class CWasmEntryFrame : public StubFrame {
private:
friend class StackFrameIteratorBase;
+ Type GetCallerState(State* state) const override;
};
class WasmCompileLazyFrame : public StandardFrame {
@@ -1259,7 +1260,7 @@ class V8_EXPORT_PRIVATE StackTraceFrameIterator {
public:
explicit StackTraceFrameIterator(Isolate* isolate);
// Skip frames until the frame with the given id is reached.
- StackTraceFrameIterator(Isolate* isolate, StackFrame::Id id);
+ StackTraceFrameIterator(Isolate* isolate, StackFrameId id);
bool done() const { return iterator_.done(); }
void Advance();
void AdvanceOneFrame() { iterator_.Advance(); }
diff --git a/deps/v8/src/execution/ia32/frame-constants-ia32.cc b/deps/v8/src/execution/ia32/frame-constants-ia32.cc
index e5e3855c79..7faecdb858 100644
--- a/deps/v8/src/execution/ia32/frame-constants-ia32.cc
+++ b/deps/v8/src/execution/ia32/frame-constants-ia32.cc
@@ -4,12 +4,12 @@
#if V8_TARGET_ARCH_IA32
+#include "src/execution/ia32/frame-constants-ia32.h"
+
#include "src/codegen/assembler.h"
#include "src/codegen/ia32/assembler-ia32-inl.h"
-#include "src/codegen/ia32/assembler-ia32.h"
#include "src/execution/frame-constants.h"
-
-#include "src/execution/ia32/frame-constants-ia32.h"
+#include "src/execution/frames.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/execution/interrupts-scope.cc b/deps/v8/src/execution/interrupts-scope.cc
new file mode 100644
index 0000000000..cf8611f8d6
--- /dev/null
+++ b/deps/v8/src/execution/interrupts-scope.cc
@@ -0,0 +1,42 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/execution/interrupts-scope.h"
+
+#include "src/execution/isolate.h"
+
+namespace v8 {
+namespace internal {
+
+InterruptsScope::InterruptsScope(Isolate* isolate, int intercept_mask,
+ Mode mode)
+ : stack_guard_(isolate->stack_guard()),
+ intercept_mask_(intercept_mask),
+ intercepted_flags_(0),
+ mode_(mode) {
+ if (mode_ != kNoop) stack_guard_->PushInterruptsScope(this);
+}
+
+bool InterruptsScope::Intercept(StackGuard::InterruptFlag flag) {
+ InterruptsScope* last_postpone_scope = nullptr;
+ for (InterruptsScope* current = this; current; current = current->prev_) {
+ // We only consider scopes related to passed flag.
+ if (!(current->intercept_mask_ & flag)) continue;
+ if (current->mode_ == kRunInterrupts) {
+ // If innermost scope is kRunInterrupts scope, prevent interrupt from
+ // being intercepted.
+ break;
+ } else {
+ DCHECK_EQ(current->mode_, kPostponeInterrupts);
+ last_postpone_scope = current;
+ }
+ }
+ // If there is no postpone scope for passed flag then we should not intercept.
+ if (!last_postpone_scope) return false;
+ last_postpone_scope->intercepted_flags_ |= flag;
+ return true;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/execution/interrupts-scope.h b/deps/v8/src/execution/interrupts-scope.h
new file mode 100644
index 0000000000..3d74850a84
--- /dev/null
+++ b/deps/v8/src/execution/interrupts-scope.h
@@ -0,0 +1,72 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXECUTION_INTERRUPTS_SCOPE_H_
+#define V8_EXECUTION_INTERRUPTS_SCOPE_H_
+
+#include "src/execution/stack-guard.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+
+// Scope intercepts only interrupt which is part of its interrupt_mask and does
+// not affect other interrupts.
+class InterruptsScope {
+ public:
+ enum Mode { kPostponeInterrupts, kRunInterrupts, kNoop };
+
+ V8_EXPORT_PRIVATE InterruptsScope(Isolate* isolate, int intercept_mask,
+ Mode mode);
+
+ virtual ~InterruptsScope() {
+ if (mode_ != kNoop) stack_guard_->PopInterruptsScope();
+ }
+
+ // Find the scope that intercepts this interrupt.
+ // It may be outermost PostponeInterruptsScope or innermost
+ // SafeForInterruptsScope if any.
+ // Return whether the interrupt has been intercepted.
+ bool Intercept(StackGuard::InterruptFlag flag);
+
+ private:
+ StackGuard* stack_guard_;
+ int intercept_mask_;
+ int intercepted_flags_;
+ Mode mode_;
+ InterruptsScope* prev_;
+
+ friend class StackGuard;
+};
+
+// Support for temporarily postponing interrupts. When the outermost
+// postpone scope is left the interrupts will be re-enabled and any
+// interrupts that occurred while in the scope will be taken into
+// account.
+class PostponeInterruptsScope : public InterruptsScope {
+ public:
+ PostponeInterruptsScope(Isolate* isolate,
+ int intercept_mask = StackGuard::ALL_INTERRUPTS)
+ : InterruptsScope(isolate, intercept_mask,
+ InterruptsScope::kPostponeInterrupts) {}
+ ~PostponeInterruptsScope() override = default;
+};
+
+// Support for overriding PostponeInterruptsScope. Interrupt is not ignored if
+// innermost scope is SafeForInterruptsScope ignoring any outer
+// PostponeInterruptsScopes.
+class SafeForInterruptsScope : public InterruptsScope {
+ public:
+ SafeForInterruptsScope(Isolate* isolate,
+ int intercept_mask = StackGuard::ALL_INTERRUPTS)
+ : InterruptsScope(isolate, intercept_mask,
+ InterruptsScope::kRunInterrupts) {}
+ ~SafeForInterruptsScope() override = default;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_EXECUTION_INTERRUPTS_SCOPE_H_
diff --git a/deps/v8/src/execution/isolate-data.h b/deps/v8/src/execution/isolate-data.h
index d83ae708ec..adeb7f54d3 100644
--- a/deps/v8/src/execution/isolate-data.h
+++ b/deps/v8/src/execution/isolate-data.h
@@ -81,8 +81,10 @@ class IsolateData final {
// The FP and PC that are saved right before TurboAssembler::CallCFunction.
Address* fast_c_call_caller_fp_address() { return &fast_c_call_caller_fp_; }
Address* fast_c_call_caller_pc_address() { return &fast_c_call_caller_pc_; }
+ uint8_t* stack_is_iterable_address() { return &stack_is_iterable_; }
Address fast_c_call_caller_fp() { return fast_c_call_caller_fp_; }
Address fast_c_call_caller_pc() { return fast_c_call_caller_pc_; }
+ uint8_t stack_is_iterable() { return stack_is_iterable_; }
// Returns true if this address points to data stored in this instance.
// If it's the case then the value can be accessed indirectly through the
@@ -121,6 +123,7 @@ class IsolateData final {
V(kVirtualCallTargetRegisterOffset, kSystemPointerSize) \
V(kFastCCallCallerFPOffset, kSystemPointerSize) \
V(kFastCCallCallerPCOffset, kSystemPointerSize) \
+ V(kStackIsIterableOffset, kUInt8Size) \
/* This padding aligns IsolateData size by 8 bytes. */ \
V(kPaddingOffset, \
8 + RoundUp<8>(static_cast<int>(kPaddingOffset)) - kPaddingOffset) \
@@ -172,6 +175,9 @@ class IsolateData final {
// instruction in compiled code.
Address fast_c_call_caller_fp_ = kNullAddress;
Address fast_c_call_caller_pc_ = kNullAddress;
+ // Whether the SafeStackFrameIterator can successfully iterate the current
+ // stack. Only valid values are 0 or 1.
+ uint8_t stack_is_iterable_ = 1;
// Ensure the size is 8-byte aligned in order to make alignment of the field
// following the IsolateData field predictable. This solves the issue with
@@ -219,6 +225,8 @@ void IsolateData::AssertPredictableLayout() {
kFastCCallCallerFPOffset);
STATIC_ASSERT(offsetof(IsolateData, fast_c_call_caller_pc_) ==
kFastCCallCallerPCOffset);
+ STATIC_ASSERT(offsetof(IsolateData, stack_is_iterable_) ==
+ kStackIsIterableOffset);
STATIC_ASSERT(sizeof(IsolateData) == IsolateData::kSize);
}
diff --git a/deps/v8/src/execution/isolate-inl.h b/deps/v8/src/execution/isolate-inl.h
index fcbbed139c..7e037fb410 100644
--- a/deps/v8/src/execution/isolate-inl.h
+++ b/deps/v8/src/execution/isolate-inl.h
@@ -145,9 +145,10 @@ bool Isolate::IsTypedArraySpeciesLookupChainIntact() {
Smi::ToInt(species_cell.value()) == kProtectorValid;
}
-bool Isolate::IsRegExpSpeciesLookupChainIntact() {
- PropertyCell species_cell =
- PropertyCell::cast(root(RootIndex::kRegExpSpeciesProtector));
+bool Isolate::IsRegExpSpeciesLookupChainIntact(
+ Handle<NativeContext> native_context) {
+ DCHECK_EQ(*native_context, this->raw_native_context());
+ PropertyCell species_cell = native_context->regexp_species_protector();
return species_cell.value().IsSmi() &&
Smi::ToInt(species_cell.value()) == kProtectorValid;
}
diff --git a/deps/v8/src/execution/isolate-utils-inl.h b/deps/v8/src/execution/isolate-utils-inl.h
new file mode 100644
index 0000000000..6095970a31
--- /dev/null
+++ b/deps/v8/src/execution/isolate-utils-inl.h
@@ -0,0 +1,64 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXECUTION_ISOLATE_UTILS_INL_H_
+#define V8_EXECUTION_ISOLATE_UTILS_INL_H_
+
+#include "src/execution/isolate-utils.h"
+
+#include "src/common/ptr-compr-inl.h"
+#include "src/execution/isolate.h"
+#include "src/heap/heap-write-barrier-inl.h"
+
+namespace v8 {
+namespace internal {
+
+inline Isolate* GetIsolateForPtrCompr(HeapObject object) {
+#ifdef V8_COMPRESS_POINTERS
+ return Isolate::FromRoot(GetIsolateRoot(object.ptr()));
+#else
+ return nullptr;
+#endif // V8_COMPRESS_POINTERS
+}
+
+V8_INLINE Heap* GetHeapFromWritableObject(HeapObject object) {
+#ifdef V8_COMPRESS_POINTERS
+ return GetIsolateFromWritableObject(object)->heap();
+#else
+ heap_internals::MemoryChunk* chunk =
+ heap_internals::MemoryChunk::FromHeapObject(object);
+ return chunk->GetHeap();
+#endif // V8_COMPRESS_POINTERS
+}
+
+V8_INLINE Isolate* GetIsolateFromWritableObject(HeapObject object) {
+#ifdef V8_COMPRESS_POINTERS
+ Isolate* isolate = Isolate::FromRoot(GetIsolateRoot(object.ptr()));
+ DCHECK_NOT_NULL(isolate);
+ return isolate;
+#else
+ return Isolate::FromHeap(GetHeapFromWritableObject(object));
+#endif // V8_COMPRESS_POINTERS
+}
+
+V8_INLINE bool GetIsolateFromHeapObject(HeapObject object, Isolate** isolate) {
+#ifdef V8_COMPRESS_POINTERS
+ *isolate = GetIsolateFromWritableObject(object);
+ return true;
+#else
+ heap_internals::MemoryChunk* chunk =
+ heap_internals::MemoryChunk::FromHeapObject(object);
+ if (chunk->InReadOnlySpace()) {
+ *isolate = nullptr;
+ return false;
+ }
+ *isolate = Isolate::FromHeap(chunk->GetHeap());
+ return true;
+#endif // V8_COMPRESS_POINTERS
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_EXECUTION_ISOLATE_UTILS_INL_H_
diff --git a/deps/v8/src/execution/isolate-utils.h b/deps/v8/src/execution/isolate-utils.h
new file mode 100644
index 0000000000..31c154e7a4
--- /dev/null
+++ b/deps/v8/src/execution/isolate-utils.h
@@ -0,0 +1,31 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXECUTION_ISOLATE_UTILS_H_
+#define V8_EXECUTION_ISOLATE_UTILS_H_
+
+#include "src/common/globals.h"
+
+namespace v8 {
+namespace internal {
+
+// Computes isolate from any read only or writable heap object. The resulting
+// value is intended to be used only as a hoisted computation of isolate root
+// inside trivial accessors for optmizing value decompression.
+// When pointer compression is disabled this function always returns nullptr.
+V8_INLINE Isolate* GetIsolateForPtrCompr(HeapObject object);
+
+V8_INLINE Heap* GetHeapFromWritableObject(HeapObject object);
+
+V8_INLINE Isolate* GetIsolateFromWritableObject(HeapObject object);
+
+// Returns true if it succeeded to obtain isolate from given object.
+// If it fails then the object is definitely a read-only object but it may also
+// succeed for read only objects if pointer compression is enabled.
+V8_INLINE bool GetIsolateFromHeapObject(HeapObject object, Isolate** isolate);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_EXECUTION_ISOLATE_UTILS_H_
diff --git a/deps/v8/src/execution/isolate.cc b/deps/v8/src/execution/isolate.cc
index 8a8db12ca3..2b3551cdfb 100644
--- a/deps/v8/src/execution/isolate.cc
+++ b/deps/v8/src/execution/isolate.cc
@@ -736,15 +736,19 @@ class FrameArrayBuilder {
}
// Creates a StackTraceFrame object for each frame in the FrameArray.
- Handle<FixedArray> GetElementsAsStackTraceFrameArray() {
+ Handle<FixedArray> GetElementsAsStackTraceFrameArray(
+ bool enable_frame_caching) {
elements_->ShrinkToFit(isolate_);
const int frame_count = elements_->FrameCount();
Handle<FixedArray> stack_trace =
isolate_->factory()->NewFixedArray(frame_count);
for (int i = 0; i < frame_count; ++i) {
- // Caching stack frames only happens for non-Wasm frames.
- if (!elements_->IsAnyWasmFrame(i)) {
+ // Caching stack frames only happens for user JS frames.
+ const bool cache_frame =
+ enable_frame_caching && !elements_->IsAnyWasmFrame(i) &&
+ elements_->Function(i).shared().IsUserJavaScript();
+ if (cache_frame) {
MaybeHandle<StackTraceFrame> maybe_frame =
StackFrameCacheHelper::LookupCachedFrame(
isolate_, handle(elements_->Code(i), isolate_),
@@ -760,7 +764,7 @@ class FrameArrayBuilder {
isolate_->factory()->NewStackTraceFrame(elements_, i);
stack_trace->set(i, *frame);
- if (!elements_->IsAnyWasmFrame(i)) {
+ if (cache_frame) {
StackFrameCacheHelper::CacheFrameAndUpdateCache(
isolate_, handle(elements_->Code(i), isolate_),
Smi::ToInt(elements_->Offset(i)), frame);
@@ -938,6 +942,14 @@ void CaptureAsyncStackTrace(Isolate* isolate, Handle<JSPromise> promise,
PromiseCapability::cast(context->get(index)), isolate);
if (!capability->promise().IsJSPromise()) return;
promise = handle(JSPromise::cast(capability->promise()), isolate);
+ } else if (IsBuiltinFunction(isolate, reaction->fulfill_handler(),
+ Builtins::kPromiseCapabilityDefaultResolve)) {
+ Handle<JSFunction> function(JSFunction::cast(reaction->fulfill_handler()),
+ isolate);
+ Handle<Context> context(function->context(), isolate);
+ promise =
+ handle(JSPromise::cast(context->get(PromiseBuiltins::kPromiseSlot)),
+ isolate);
} else {
// We have some generic promise chain here, so try to
// continue with the chained promise on the reaction
@@ -973,9 +985,7 @@ struct CaptureStackTraceOptions {
bool capture_builtin_exit_frames;
bool capture_only_frames_subject_to_debugging;
bool async_stack_trace;
-
- enum CaptureResult { RAW_FRAME_ARRAY, STACK_TRACE_FRAME_ARRAY };
- CaptureResult capture_result;
+ bool enable_frame_caching;
};
Handle<Object> CaptureStackTrace(Isolate* isolate, Handle<Object> caller,
@@ -1105,10 +1115,8 @@ Handle<Object> CaptureStackTrace(Isolate* isolate, Handle<Object> caller,
}
// TODO(yangguo): Queue this structured stack trace for preprocessing on GC.
- if (options.capture_result == CaptureStackTraceOptions::RAW_FRAME_ARRAY) {
- return builder.GetElements();
- }
- return builder.GetElementsAsStackTraceFrameArray();
+ return builder.GetElementsAsStackTraceFrameArray(
+ options.enable_frame_caching);
}
} // namespace
@@ -1126,7 +1134,7 @@ Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
options.async_stack_trace = FLAG_async_stack_traces;
options.filter_mode = FrameArrayBuilder::CURRENT_SECURITY_CONTEXT;
options.capture_only_frames_subject_to_debugging = false;
- options.capture_result = CaptureStackTraceOptions::RAW_FRAME_ARRAY;
+ options.enable_frame_caching = false;
return CaptureStackTrace(this, caller, options);
}
@@ -1222,7 +1230,7 @@ Handle<FixedArray> Isolate::CaptureCurrentStackTrace(
? FrameArrayBuilder::ALL
: FrameArrayBuilder::CURRENT_SECURITY_CONTEXT;
options.capture_only_frames_subject_to_debugging = true;
- options.capture_result = CaptureStackTraceOptions::STACK_TRACE_FRAME_ARRAY;
+ options.enable_frame_caching = true;
return Handle<FixedArray>::cast(
CaptureStackTrace(this, factory()->undefined_value(), options));
@@ -1377,7 +1385,8 @@ Object Isolate::StackOverflow() {
Handle<Object> exception;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
this, exception,
- ErrorUtils::Construct(this, fun, fun, msg, SKIP_NONE, no_caller, true));
+ ErrorUtils::Construct(this, fun, fun, msg, SKIP_NONE, no_caller,
+ ErrorUtils::StackTraceCollection::kSimple));
Throw(*exception, nullptr);
@@ -1621,7 +1630,12 @@ Object Isolate::UnwindAndFindHandler() {
thread_local_top()->pending_handler_fp_ = handler_fp;
thread_local_top()->pending_handler_sp_ = handler_sp;
- // Return and clear pending exception.
+ // Return and clear pending exception. The contract is that:
+ // (1) the pending exception is stored in one place (no duplication), and
+ // (2) within generated-code land, that one place is the return register.
+ // If/when we unwind back into C++ (returning to the JSEntry stub,
+ // or to Execution::CallWasm), the returned exception will be sent
+ // back to isolate->set_pending_exception(...).
clear_pending_exception();
return exception;
};
@@ -1656,6 +1670,19 @@ Object Isolate::UnwindAndFindHandler() {
0);
}
+ case StackFrame::C_WASM_ENTRY: {
+ StackHandler* handler = frame->top_handler();
+ thread_local_top()->handler_ = handler->next_address();
+ Code code = frame->LookupCode();
+ HandlerTable table(code);
+ Address instruction_start = code.InstructionStart();
+ int return_offset = static_cast<int>(frame->pc() - instruction_start);
+ int handler_offset = table.LookupReturn(return_offset);
+ DCHECK_NE(-1, handler_offset);
+ return FoundHandler(Context(), instruction_start, handler_offset,
+ code.constant_pool(), frame->sp(), frame->fp());
+ }
+
case StackFrame::WASM_COMPILED: {
if (trap_handler::IsThreadInWasm()) {
trap_handler::ClearThreadInWasm();
@@ -2014,33 +2041,23 @@ Object Isolate::PromoteScheduledException() {
}
void Isolate::PrintCurrentStackTrace(FILE* out) {
- IncrementalStringBuilder builder(this);
- for (StackTraceFrameIterator it(this); !it.done(); it.Advance()) {
- if (!it.is_javascript()) continue;
+ CaptureStackTraceOptions options;
+ options.limit = 0;
+ options.skip_mode = SKIP_NONE;
+ options.capture_builtin_exit_frames = true;
+ options.async_stack_trace = FLAG_async_stack_traces;
+ options.filter_mode = FrameArrayBuilder::CURRENT_SECURITY_CONTEXT;
+ options.capture_only_frames_subject_to_debugging = false;
+ options.enable_frame_caching = false;
- HandleScope scope(this);
- JavaScriptFrame* frame = it.javascript_frame();
-
- Handle<Object> receiver(frame->receiver(), this);
- Handle<JSFunction> function(frame->function(), this);
- Handle<AbstractCode> code;
- int offset;
- if (frame->is_interpreted()) {
- InterpretedFrame* interpreted_frame = InterpretedFrame::cast(frame);
- code = handle(AbstractCode::cast(interpreted_frame->GetBytecodeArray()),
- this);
- offset = interpreted_frame->GetBytecodeOffset();
- } else {
- code = handle(AbstractCode::cast(frame->LookupCode()), this);
- offset = static_cast<int>(frame->pc() - code->InstructionStart());
- }
+ Handle<FixedArray> frames = Handle<FixedArray>::cast(
+ CaptureStackTrace(this, this->factory()->undefined_value(), options));
- // To preserve backwards compatiblity, only append a newline when
- // the current stringified frame actually has characters.
- const int old_length = builder.Length();
- JSStackFrame site(this, receiver, function, code, offset);
- site.ToString(builder);
- if (old_length != builder.Length()) builder.AppendCharacter('\n');
+ IncrementalStringBuilder builder(this);
+ for (int i = 0; i < frames->length(); ++i) {
+ Handle<StackTraceFrame> frame(StackTraceFrame::cast(frames->get(i)), this);
+
+ SerializeStackTraceFrame(this, frame, builder);
}
Handle<String> stack_trace = builder.Finish().ToHandleChecked();
@@ -2113,7 +2130,8 @@ bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
JSReceiver::GetDataProperty(Handle<JSObject>::cast(exception), key);
if (!property->IsFixedArray()) return false;
- Handle<FrameArray> elements = Handle<FrameArray>::cast(property);
+ Handle<FrameArray> elements =
+ GetFrameArrayFromStackTrace(this, Handle<FixedArray>::cast(property));
const int frame_count = elements->FrameCount();
for (int i = 0; i < frame_count; i++) {
@@ -2248,7 +2266,7 @@ bool Isolate::IsExternalHandlerOnTop(Object exception) {
}
void Isolate::ReportPendingMessagesImpl(bool report_externally) {
- Object exception = pending_exception();
+ Object exception_obj = pending_exception();
// Clear the pending message object early to avoid endless recursion.
Object message_obj = thread_local_top()->pending_message_obj_;
@@ -2256,7 +2274,7 @@ void Isolate::ReportPendingMessagesImpl(bool report_externally) {
// For uncatchable exceptions we do nothing. If needed, the exception and the
// message have already been propagated to v8::TryCatch.
- if (!is_catchable_by_javascript(exception)) return;
+ if (!is_catchable_by_javascript(exception_obj)) return;
// Determine whether the message needs to be reported to all message handlers
// depending on whether and external v8::TryCatch or an internal JavaScript
@@ -2267,19 +2285,20 @@ void Isolate::ReportPendingMessagesImpl(bool report_externally) {
should_report_exception = try_catch_handler()->is_verbose_;
} else {
// Report the exception if it isn't caught by JavaScript code.
- should_report_exception = !IsJavaScriptHandlerOnTop(exception);
+ should_report_exception = !IsJavaScriptHandlerOnTop(exception_obj);
}
// Actually report the pending message to all message handlers.
if (!message_obj.IsTheHole(this) && should_report_exception) {
HandleScope scope(this);
Handle<JSMessageObject> message(JSMessageObject::cast(message_obj), this);
+ Handle<Object> exception(exception_obj, this);
Handle<Script> script(message->script(), this);
// Clear the exception and restore it afterwards, otherwise
// CollectSourcePositions will abort.
clear_pending_exception();
JSMessageObject::EnsureSourcePositionsAvailable(this, message);
- set_pending_exception(exception);
+ set_pending_exception(*exception);
int start_pos = message->GetStartPosition();
int end_pos = message->GetEndPosition();
MessageLocation location(script, start_pos, end_pos);
@@ -2853,6 +2872,13 @@ void Isolate::Delete(Isolate* isolate) {
SetIsolateThreadLocals(saved_isolate, saved_data);
}
+void Isolate::SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap) {
+ DCHECK_NOT_NULL(ro_heap);
+ DCHECK_IMPLIES(read_only_heap_ != nullptr, read_only_heap_ == ro_heap);
+ read_only_heap_ = ro_heap;
+ heap_.SetUpFromReadOnlyHeap(ro_heap);
+}
+
v8::PageAllocator* Isolate::page_allocator() {
return isolate_allocator_->page_allocator();
}
@@ -3282,6 +3308,21 @@ bool Isolate::InitWithSnapshot(ReadOnlyDeserializer* read_only_deserializer,
return Init(read_only_deserializer, startup_deserializer);
}
+static void AddCrashKeysForIsolateAndHeapPointers(Isolate* isolate) {
+ v8::Platform* platform = V8::GetCurrentPlatform();
+
+ const int id = isolate->id();
+ platform->AddCrashKey(id, "isolate", reinterpret_cast<uintptr_t>(isolate));
+
+ auto heap = isolate->heap();
+ platform->AddCrashKey(id, "ro_space",
+ reinterpret_cast<uintptr_t>(heap->read_only_space()->first_page()));
+ platform->AddCrashKey(id, "map_space",
+ reinterpret_cast<uintptr_t>(heap->map_space()->first_page()));
+ platform->AddCrashKey(id, "code_space",
+ reinterpret_cast<uintptr_t>(heap->code_space()->first_page()));
+}
+
bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
StartupDeserializer* startup_deserializer) {
TRACE_ISOLATE(init);
@@ -3432,7 +3473,7 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
if (create_heap_objects) {
heap_.read_only_space()->ClearStringPaddingIfNeeded();
- heap_.read_only_heap()->OnCreateHeapObjectsComplete(this);
+ read_only_heap_->OnCreateHeapObjectsComplete(this);
} else {
startup_deserializer->DeserializeInto(this);
}
@@ -3527,6 +3568,7 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
PrintF("[Initializing isolate from scratch took %0.3f ms]\n", ms);
}
+ AddCrashKeysForIsolateAndHeapPointers(this);
return true;
}
@@ -3693,9 +3735,9 @@ void Isolate::MaybeInitializeVectorListFromHeap() {
std::vector<Handle<FeedbackVector>> vectors;
{
- HeapIterator heap_iterator(heap());
- for (HeapObject current_obj = heap_iterator.next(); !current_obj.is_null();
- current_obj = heap_iterator.next()) {
+ HeapObjectIterator heap_iterator(heap());
+ for (HeapObject current_obj = heap_iterator.Next(); !current_obj.is_null();
+ current_obj = heap_iterator.Next()) {
if (!current_obj.IsFeedbackVector()) continue;
FeedbackVector vector = FeedbackVector::cast(current_obj);
@@ -3907,13 +3949,31 @@ void Isolate::UpdateNoElementsProtectorOnSetElement(Handle<JSObject> object) {
if (!IsNoElementsProtectorIntact()) return;
if (!IsArrayOrObjectOrStringPrototype(*object)) return;
PropertyCell::SetValueWithInvalidation(
- this, factory()->no_elements_protector(),
+ this, "no_elements_protector", factory()->no_elements_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
}
+void Isolate::TraceProtectorInvalidation(const char* protector_name) {
+ static constexpr char kInvalidateProtectorTracingCategory[] =
+ "V8.InvalidateProtector";
+ static constexpr char kInvalidateProtectorTracingArg[] = "protector-name";
+
+ DCHECK(FLAG_trace_protector_invalidation);
+
+ // TODO(jgruber): Remove the PrintF once tracing can output to stdout.
+ i::PrintF("Invalidating protector cell %s in isolate %p\n", protector_name,
+ this);
+ TRACE_EVENT_INSTANT1("v8", kInvalidateProtectorTracingCategory,
+ TRACE_EVENT_SCOPE_THREAD, kInvalidateProtectorTracingArg,
+ protector_name);
+}
+
void Isolate::InvalidateIsConcatSpreadableProtector() {
DCHECK(factory()->is_concat_spreadable_protector()->value().IsSmi());
DCHECK(IsIsConcatSpreadableLookupChainIntact());
+ if (FLAG_trace_protector_invalidation) {
+ TraceProtectorInvalidation("is_concat_spreadable_protector");
+ }
factory()->is_concat_spreadable_protector()->set_value(
Smi::FromInt(kProtectorInvalid));
DCHECK(!IsIsConcatSpreadableLookupChainIntact());
@@ -3922,6 +3982,9 @@ void Isolate::InvalidateIsConcatSpreadableProtector() {
void Isolate::InvalidateArrayConstructorProtector() {
DCHECK(factory()->array_constructor_protector()->value().IsSmi());
DCHECK(IsArrayConstructorIntact());
+ if (FLAG_trace_protector_invalidation) {
+ TraceProtectorInvalidation("array_constructor_protector");
+ }
factory()->array_constructor_protector()->set_value(
Smi::FromInt(kProtectorInvalid));
DCHECK(!IsArrayConstructorIntact());
@@ -3931,7 +3994,7 @@ void Isolate::InvalidateArraySpeciesProtector() {
DCHECK(factory()->array_species_protector()->value().IsSmi());
DCHECK(IsArraySpeciesLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
- this, factory()->array_species_protector(),
+ this, "array_species_protector", factory()->array_species_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsArraySpeciesLookupChainIntact());
}
@@ -3940,25 +4003,30 @@ void Isolate::InvalidateTypedArraySpeciesProtector() {
DCHECK(factory()->typed_array_species_protector()->value().IsSmi());
DCHECK(IsTypedArraySpeciesLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
- this, factory()->typed_array_species_protector(),
+ this, "typed_array_species_protector",
+ factory()->typed_array_species_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsTypedArraySpeciesLookupChainIntact());
}
-void Isolate::InvalidateRegExpSpeciesProtector() {
- DCHECK(factory()->regexp_species_protector()->value().IsSmi());
- DCHECK(IsRegExpSpeciesLookupChainIntact());
+void Isolate::InvalidateRegExpSpeciesProtector(
+ Handle<NativeContext> native_context) {
+ DCHECK_EQ(*native_context, this->raw_native_context());
+ DCHECK(native_context->regexp_species_protector().value().IsSmi());
+ DCHECK(IsRegExpSpeciesLookupChainIntact(native_context));
+ Handle<PropertyCell> species_cell(native_context->regexp_species_protector(),
+ this);
PropertyCell::SetValueWithInvalidation(
- this, factory()->regexp_species_protector(),
+ this, "regexp_species_protector", species_cell,
handle(Smi::FromInt(kProtectorInvalid), this));
- DCHECK(!IsRegExpSpeciesLookupChainIntact());
+ DCHECK(!IsRegExpSpeciesLookupChainIntact(native_context));
}
void Isolate::InvalidatePromiseSpeciesProtector() {
DCHECK(factory()->promise_species_protector()->value().IsSmi());
DCHECK(IsPromiseSpeciesLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
- this, factory()->promise_species_protector(),
+ this, "promise_species_protector", factory()->promise_species_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsPromiseSpeciesLookupChainIntact());
}
@@ -3966,6 +4034,9 @@ void Isolate::InvalidatePromiseSpeciesProtector() {
void Isolate::InvalidateStringLengthOverflowProtector() {
DCHECK(factory()->string_length_protector()->value().IsSmi());
DCHECK(IsStringLengthOverflowIntact());
+ if (FLAG_trace_protector_invalidation) {
+ TraceProtectorInvalidation("string_length_protector");
+ }
factory()->string_length_protector()->set_value(
Smi::FromInt(kProtectorInvalid));
DCHECK(!IsStringLengthOverflowIntact());
@@ -3975,7 +4046,7 @@ void Isolate::InvalidateArrayIteratorProtector() {
DCHECK(factory()->array_iterator_protector()->value().IsSmi());
DCHECK(IsArrayIteratorLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
- this, factory()->array_iterator_protector(),
+ this, "array_iterator_protector", factory()->array_iterator_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsArrayIteratorLookupChainIntact());
}
@@ -3984,7 +4055,7 @@ void Isolate::InvalidateMapIteratorProtector() {
DCHECK(factory()->map_iterator_protector()->value().IsSmi());
DCHECK(IsMapIteratorLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
- this, factory()->map_iterator_protector(),
+ this, "map_iterator_protector", factory()->map_iterator_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsMapIteratorLookupChainIntact());
}
@@ -3993,7 +4064,7 @@ void Isolate::InvalidateSetIteratorProtector() {
DCHECK(factory()->set_iterator_protector()->value().IsSmi());
DCHECK(IsSetIteratorLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
- this, factory()->set_iterator_protector(),
+ this, "set_iterator_protector", factory()->set_iterator_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsSetIteratorLookupChainIntact());
}
@@ -4002,7 +4073,7 @@ void Isolate::InvalidateStringIteratorProtector() {
DCHECK(factory()->string_iterator_protector()->value().IsSmi());
DCHECK(IsStringIteratorLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
- this, factory()->string_iterator_protector(),
+ this, "string_iterator_protector", factory()->string_iterator_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsStringIteratorLookupChainIntact());
}
@@ -4011,7 +4082,8 @@ void Isolate::InvalidateArrayBufferDetachingProtector() {
DCHECK(factory()->array_buffer_detaching_protector()->value().IsSmi());
DCHECK(IsArrayBufferDetachingIntact());
PropertyCell::SetValueWithInvalidation(
- this, factory()->array_buffer_detaching_protector(),
+ this, "array_buffer_detaching_protector",
+ factory()->array_buffer_detaching_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsArrayBufferDetachingIntact());
}
@@ -4020,7 +4092,7 @@ void Isolate::InvalidatePromiseHookProtector() {
DCHECK(factory()->promise_hook_protector()->value().IsSmi());
DCHECK(IsPromiseHookProtectorIntact());
PropertyCell::SetValueWithInvalidation(
- this, factory()->promise_hook_protector(),
+ this, "promise_hook_protector", factory()->promise_hook_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsPromiseHookProtectorIntact());
}
@@ -4028,6 +4100,9 @@ void Isolate::InvalidatePromiseHookProtector() {
void Isolate::InvalidatePromiseResolveProtector() {
DCHECK(factory()->promise_resolve_protector()->value().IsSmi());
DCHECK(IsPromiseResolveLookupChainIntact());
+ if (FLAG_trace_protector_invalidation) {
+ TraceProtectorInvalidation("promise_resolve_protector");
+ }
factory()->promise_resolve_protector()->set_value(
Smi::FromInt(kProtectorInvalid));
DCHECK(!IsPromiseResolveLookupChainIntact());
@@ -4037,7 +4112,7 @@ void Isolate::InvalidatePromiseThenProtector() {
DCHECK(factory()->promise_then_protector()->value().IsSmi());
DCHECK(IsPromiseThenLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
- this, factory()->promise_then_protector(),
+ this, "promise_then_protector", factory()->promise_then_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsPromiseThenLookupChainIntact());
}
@@ -4176,7 +4251,7 @@ void Isolate::FireCallCompletedCallback(MicrotaskQueue* microtask_queue) {
// set is still open (whether to clear it after every microtask or once
// during a microtask checkpoint). See also
// https://github.com/tc39/proposal-weakrefs/issues/39 .
- heap()->ClearKeepDuringJobSet();
+ heap()->ClearKeptObjects();
}
if (call_completed_callbacks_.empty()) return;
@@ -4261,7 +4336,7 @@ void Isolate::SetHostImportModuleDynamicallyCallback(
}
Handle<JSObject> Isolate::RunHostInitializeImportMetaObjectCallback(
- Handle<Module> module) {
+ Handle<SourceTextModule> module) {
Handle<Object> host_meta(module->import_meta(), this);
if (host_meta->IsTheHole(this)) {
host_meta = factory()->NewJSObjectWithNullProto();
@@ -4269,7 +4344,7 @@ Handle<JSObject> Isolate::RunHostInitializeImportMetaObjectCallback(
v8::Local<v8::Context> api_context =
v8::Utils::ToLocal(Handle<Context>(native_context()));
host_initialize_import_meta_object_callback_(
- api_context, Utils::ToLocal(module),
+ api_context, Utils::ToLocal(Handle<Module>::cast(module)),
v8::Local<v8::Object>::Cast(v8::Utils::ToLocal(host_meta)));
}
module->set_import_meta(*host_meta);
@@ -4641,26 +4716,6 @@ AssertNoContextChange::AssertNoContextChange(Isolate* isolate)
: isolate_(isolate), context_(isolate->context(), isolate) {}
#endif // DEBUG
-bool InterruptsScope::Intercept(StackGuard::InterruptFlag flag) {
- InterruptsScope* last_postpone_scope = nullptr;
- for (InterruptsScope* current = this; current; current = current->prev_) {
- // We only consider scopes related to passed flag.
- if (!(current->intercept_mask_ & flag)) continue;
- if (current->mode_ == kRunInterrupts) {
- // If innermost scope is kRunInterrupts scope, prevent interrupt from
- // being intercepted.
- break;
- } else {
- DCHECK_EQ(current->mode_, kPostponeInterrupts);
- last_postpone_scope = current;
- }
- }
- // If there is no postpone scope for passed flag then we should not intercept.
- if (!last_postpone_scope) return false;
- last_postpone_scope->intercepted_flags_ |= flag;
- return true;
-}
-
#undef TRACE_ISOLATE
} // namespace internal
diff --git a/deps/v8/src/execution/isolate.h b/deps/v8/src/execution/isolate.h
index 4b4bf9cd7c..2ead7bf844 100644
--- a/deps/v8/src/execution/isolate.h
+++ b/deps/v8/src/execution/isolate.h
@@ -23,6 +23,7 @@
#include "src/execution/futex-emulation.h"
#include "src/execution/isolate-data.h"
#include "src/execution/messages.h"
+#include "src/execution/stack-guard.h"
#include "src/handles/handles.h"
#include "src/heap/factory.h"
#include "src/heap/heap.h"
@@ -69,7 +70,6 @@ class CodeTracer;
class CompilationCache;
class CompilationStatistics;
class CompilerDispatcher;
-class ContextSlotCache;
class Counters;
class Debug;
class DeoptimizerData;
@@ -91,8 +91,8 @@ class RootVisitor;
class RuntimeProfiler;
class SetupIsolateDelegate;
class Simulator;
-class StartupDeserializer;
class StandardFrame;
+class StartupDeserializer;
class StubCache;
class ThreadManager;
class ThreadState;
@@ -397,6 +397,8 @@ using DebugObjectCache = std::vector<Handle<HeapObject>>;
V(OOMErrorCallback, oom_behavior, nullptr) \
V(LogEventCallback, event_logger, nullptr) \
V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, nullptr) \
+ V(ModifyCodeGenerationFromStringsCallback, modify_code_gen_callback, \
+ nullptr) \
V(AllowWasmCodeGenerationCallback, allow_wasm_code_gen_callback, nullptr) \
V(ExtensionCallback, wasm_module_callback, &NoExtension) \
V(ExtensionCallback, wasm_instance_callback, &NoExtension) \
@@ -515,6 +517,8 @@ class Isolate final : private HiddenFactory {
// for legacy API reasons.
static void Delete(Isolate* isolate);
+ void SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap);
+
// Returns allocation mode of this isolate.
V8_INLINE IsolateAllocationMode isolate_allocation_mode();
@@ -900,6 +904,7 @@ class Isolate final : private HiddenFactory {
}
StackGuard* stack_guard() { return &stack_guard_; }
Heap* heap() { return &heap_; }
+ ReadOnlyHeap* read_only_heap() const { return read_only_heap_; }
static Isolate* FromHeap(Heap* heap) {
return reinterpret_cast<Isolate*>(reinterpret_cast<Address>(heap) -
OFFSET_OF(Isolate, heap_));
@@ -915,6 +920,9 @@ class Isolate final : private HiddenFactory {
static size_t isolate_root_bias() {
return OFFSET_OF(Isolate, isolate_data_) + IsolateData::kIsolateRootBias;
}
+ static Isolate* FromRoot(Address isolate_root) {
+ return reinterpret_cast<Isolate*>(isolate_root - isolate_root_bias());
+ }
RootsTable& roots_table() { return isolate_data()->roots(); }
@@ -1168,7 +1176,8 @@ class Isolate final : private HiddenFactory {
inline bool IsArraySpeciesLookupChainIntact();
inline bool IsTypedArraySpeciesLookupChainIntact();
- inline bool IsRegExpSpeciesLookupChainIntact();
+ inline bool IsRegExpSpeciesLookupChainIntact(
+ Handle<NativeContext> native_context);
// Check that the @@species protector is intact, which guards the lookup of
// "constructor" on JSPromise instances, whose [[Prototype]] is the initial
@@ -1250,10 +1259,14 @@ class Isolate final : private HiddenFactory {
void UpdateNoElementsProtectorOnNormalizeElements(Handle<JSObject> object) {
UpdateNoElementsProtectorOnSetElement(object);
}
+
+ // The `protector_name` C string must be statically allocated.
+ void TraceProtectorInvalidation(const char* protector_name);
+
void InvalidateArrayConstructorProtector();
void InvalidateArraySpeciesProtector();
void InvalidateTypedArraySpeciesProtector();
- void InvalidateRegExpSpeciesProtector();
+ void InvalidateRegExpSpeciesProtector(Handle<NativeContext> native_context);
void InvalidatePromiseSpeciesProtector();
void InvalidateIsConcatSpreadableProtector();
void InvalidateStringLengthOverflowProtector();
@@ -1469,7 +1482,7 @@ class Isolate final : private HiddenFactory {
void SetHostInitializeImportMetaObjectCallback(
HostInitializeImportMetaObjectCallback callback);
V8_EXPORT_PRIVATE Handle<JSObject> RunHostInitializeImportMetaObjectCallback(
- Handle<Module> module);
+ Handle<SourceTextModule> module);
void RegisterEmbeddedFileWriter(EmbeddedFileWriterInterface* writer) {
embedded_file_writer_ = writer;
@@ -1647,6 +1660,7 @@ class Isolate final : private HiddenFactory {
std::unique_ptr<IsolateAllocator> isolate_allocator_;
Heap heap_;
+ ReadOnlyHeap* read_only_heap_ = nullptr;
const int id_;
EntryStackItem* entry_stack_ = nullptr;
@@ -1982,65 +1996,6 @@ class StackLimitCheck {
} \
} while (false)
-// Scope intercepts only interrupt which is part of its interrupt_mask and does
-// not affect other interrupts.
-class InterruptsScope {
- public:
- enum Mode { kPostponeInterrupts, kRunInterrupts, kNoop };
-
- virtual ~InterruptsScope() {
- if (mode_ != kNoop) stack_guard_->PopInterruptsScope();
- }
-
- // Find the scope that intercepts this interrupt.
- // It may be outermost PostponeInterruptsScope or innermost
- // SafeForInterruptsScope if any.
- // Return whether the interrupt has been intercepted.
- bool Intercept(StackGuard::InterruptFlag flag);
-
- InterruptsScope(Isolate* isolate, int intercept_mask, Mode mode)
- : stack_guard_(isolate->stack_guard()),
- intercept_mask_(intercept_mask),
- intercepted_flags_(0),
- mode_(mode) {
- if (mode_ != kNoop) stack_guard_->PushInterruptsScope(this);
- }
-
- private:
- StackGuard* stack_guard_;
- int intercept_mask_;
- int intercepted_flags_;
- Mode mode_;
- InterruptsScope* prev_;
-
- friend class StackGuard;
-};
-
-// Support for temporarily postponing interrupts. When the outermost
-// postpone scope is left the interrupts will be re-enabled and any
-// interrupts that occurred while in the scope will be taken into
-// account.
-class PostponeInterruptsScope : public InterruptsScope {
- public:
- PostponeInterruptsScope(Isolate* isolate,
- int intercept_mask = StackGuard::ALL_INTERRUPTS)
- : InterruptsScope(isolate, intercept_mask,
- InterruptsScope::kPostponeInterrupts) {}
- ~PostponeInterruptsScope() override = default;
-};
-
-// Support for overriding PostponeInterruptsScope. Interrupt is not ignored if
-// innermost scope is SafeForInterruptsScope ignoring any outer
-// PostponeInterruptsScopes.
-class SafeForInterruptsScope : public InterruptsScope {
- public:
- SafeForInterruptsScope(Isolate* isolate,
- int intercept_mask = StackGuard::ALL_INTERRUPTS)
- : InterruptsScope(isolate, intercept_mask,
- InterruptsScope::kRunInterrupts) {}
- ~SafeForInterruptsScope() override = default;
-};
-
class StackTraceFailureMessage {
public:
explicit StackTraceFailureMessage(Isolate* isolate, void* ptr1 = nullptr,
diff --git a/deps/v8/src/execution/message-template.h b/deps/v8/src/execution/message-template.h
deleted file mode 100644
index ae88aa4411..0000000000
--- a/deps/v8/src/execution/message-template.h
+++ /dev/null
@@ -1,591 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_EXECUTION_MESSAGE_TEMPLATE_H_
-#define V8_EXECUTION_MESSAGE_TEMPLATE_H_
-
-#include "src/base/logging.h"
-
-namespace v8 {
-namespace internal {
-
-// TODO(913887): fix the use of 'neuter' in these error messages.
-#define MESSAGE_TEMPLATES(T) \
- /* Error */ \
- T(None, "") \
- T(CyclicProto, "Cyclic __proto__ value") \
- T(Debugger, "Debugger: %") \
- T(DebuggerLoading, "Error loading debugger") \
- T(DefaultOptionsMissing, "Internal % error. Default options are missing.") \
- T(DeletePrivateField, "Private fields can not be deleted") \
- T(UncaughtException, "Uncaught %") \
- T(Unsupported, "Not supported") \
- T(WrongServiceType, "Internal error, wrong service type: %") \
- T(WrongValueType, "Internal error. Wrong value type.") \
- T(IcuError, "Internal error. Icu error.") \
- /* TypeError */ \
- T(ApplyNonFunction, \
- "Function.prototype.apply was called on %, which is a % and not a " \
- "function") \
- T(ArgumentsDisallowedInInitializer, \
- "'arguments' is not allowed in class field initializer") \
- T(ArrayBufferTooShort, \
- "Derived ArrayBuffer constructor created a buffer which was too small") \
- T(ArrayBufferSpeciesThis, \
- "ArrayBuffer subclass returned this from species constructor") \
- T(ArrayItemNotType, "array %[%] is not type %") \
- T(AwaitNotInAsyncFunction, "await is only valid in async function") \
- T(AtomicsWaitNotAllowed, "Atomics.wait cannot be called in this context") \
- T(BadSortComparisonFunction, \
- "The comparison function must be either a function or undefined") \
- T(BigIntFromNumber, \
- "The number % cannot be converted to a BigInt because it is not an " \
- "integer") \
- T(BigIntFromObject, "Cannot convert % to a BigInt") \
- T(BigIntMixedTypes, \
- "Cannot mix BigInt and other types, use explicit conversions") \
- T(BigIntSerializeJSON, "Do not know how to serialize a BigInt") \
- T(BigIntShr, "BigInts have no unsigned right shift, use >> instead") \
- T(BigIntToNumber, "Cannot convert a BigInt value to a number") \
- T(CalledNonCallable, "% is not a function") \
- T(CalledOnNonObject, "% called on non-object") \
- T(CalledOnNullOrUndefined, "% called on null or undefined") \
- T(CallSiteExpectsFunction, \
- "CallSite expects wasm object as first or function as second argument, " \
- "got <%, %>") \
- T(CallSiteMethod, "CallSite method % expects CallSite as receiver") \
- T(CannotConvertToPrimitive, "Cannot convert object to primitive value") \
- T(CannotPreventExt, "Cannot prevent extensions") \
- T(CannotFreeze, "Cannot freeze") \
- T(CannotFreezeArrayBufferView, \
- "Cannot freeze array buffer views with elements") \
- T(CannotSeal, "Cannot seal") \
- T(CircularStructure, "Converting circular structure to JSON%") \
- T(ConstructAbstractClass, "Abstract class % not directly constructable") \
- T(ConstAssign, "Assignment to constant variable.") \
- T(ConstructorClassField, "Classes may not have a field named 'constructor'") \
- T(ConstructorNonCallable, \
- "Class constructor % cannot be invoked without 'new'") \
- T(ConstructorNotFunction, "Constructor % requires 'new'") \
- T(ConstructorNotReceiver, "The .constructor property is not an object") \
- T(CurrencyCode, "Currency code is required with currency style.") \
- T(CyclicModuleDependency, "Detected cycle while resolving name '%' in '%'") \
- T(DataViewNotArrayBuffer, \
- "First argument to DataView constructor must be an ArrayBuffer") \
- T(DateType, "this is not a Date object.") \
- T(DebuggerFrame, "Debugger: Invalid frame index.") \
- T(DebuggerType, "Debugger: Parameters have wrong types.") \
- T(DeclarationMissingInitializer, "Missing initializer in % declaration") \
- T(DefineDisallowed, "Cannot define property %, object is not extensible") \
- T(DetachedOperation, "Cannot perform % on a neutered ArrayBuffer") \
- T(DuplicateTemplateProperty, "Object template has duplicate property '%'") \
- T(ExtendsValueNotConstructor, \
- "Class extends value % is not a constructor or null") \
- T(FirstArgumentNotRegExp, \
- "First argument to % must not be a regular expression") \
- T(FunctionBind, "Bind must be called on a function") \
- T(GeneratorRunning, "Generator is already running") \
- T(IllegalInvocation, "Illegal invocation") \
- T(ImmutablePrototypeSet, \
- "Immutable prototype object '%' cannot have their prototype set") \
- T(ImportCallNotNewExpression, "Cannot use new with import") \
- T(ImportMetaOutsideModule, "Cannot use 'import.meta' outside a module") \
- T(ImportMissingSpecifier, "import() requires a specifier") \
- T(IncompatibleMethodReceiver, "Method % called on incompatible receiver %") \
- T(InstanceofNonobjectProto, \
- "Function has non-object prototype '%' in instanceof check") \
- T(InvalidArgument, "invalid_argument") \
- T(InvalidInOperatorUse, "Cannot use 'in' operator to search for '%' in %") \
- T(InvalidRegExpExecResult, \
- "RegExp exec method returned something other than an Object or null") \
- T(InvalidUnit, "Invalid unit argument for %() '%'") \
- T(IteratorResultNotAnObject, "Iterator result % is not an object") \
- T(IteratorSymbolNonCallable, "Found non-callable @@iterator") \
- T(IteratorValueNotAnObject, "Iterator value % is not an entry object") \
- T(LanguageID, "Language ID should be string or object.") \
- T(LocaleNotEmpty, \
- "First argument to Intl.Locale constructor can't be empty or missing") \
- T(LocaleBadParameters, "Incorrect locale information provided") \
- T(ListFormatBadParameters, "Incorrect ListFormat information provided") \
- T(MapperFunctionNonCallable, "flatMap mapper function is not callable") \
- T(MethodCalledOnWrongObject, \
- "Method % called on a non-object or on a wrong type of object.") \
- T(MethodInvokedOnNullOrUndefined, \
- "Method invoked on undefined or null value.") \
- T(MethodInvokedOnWrongType, "Method invoked on an object that is not %.") \
- T(NoAccess, "no access") \
- T(NonCallableInInstanceOfCheck, \
- "Right-hand side of 'instanceof' is not callable") \
- T(NonCoercible, "Cannot destructure 'undefined' or 'null'.") \
- T(NonCoercibleWithProperty, \
- "Cannot destructure property `%` of 'undefined' or 'null'.") \
- T(NonExtensibleProto, "% is not extensible") \
- T(NonObjectInInstanceOfCheck, \
- "Right-hand side of 'instanceof' is not an object") \
- T(NonObjectPropertyLoad, "Cannot read property '%' of %") \
- T(NonObjectPropertyStore, "Cannot set property '%' of %") \
- T(NoSetterInCallback, "Cannot set property % of % which has only a getter") \
- T(NotAnIterator, "% is not an iterator") \
- T(NotAPromise, "% is not a promise") \
- T(NotConstructor, "% is not a constructor") \
- T(NotDateObject, "this is not a Date object.") \
- T(NotGeneric, "% requires that 'this' be a %") \
- T(NotCallableOrIterable, \
- "% is not a function or its return value is not iterable") \
- T(NotCallableOrAsyncIterable, \
- "% is not a function or its return value is not async iterable") \
- T(NotFiniteNumber, "Value need to be finite number for %()") \
- T(NotIterable, "% is not iterable") \
- T(NotIterableNoSymbolLoad, "% is not iterable (cannot read property %)") \
- T(NotAsyncIterable, "% is not async iterable") \
- T(NotPropertyName, "% is not a valid property name") \
- T(NotTypedArray, "this is not a typed array.") \
- T(NotSuperConstructor, "Super constructor % of % is not a constructor") \
- T(NotSuperConstructorAnonymousClass, \
- "Super constructor % of anonymous class is not a constructor") \
- T(NotIntegerSharedTypedArray, "% is not an integer shared typed array.") \
- T(NotInt32SharedTypedArray, "% is not an int32 shared typed array.") \
- T(ObjectGetterExpectingFunction, \
- "Object.prototype.__defineGetter__: Expecting function") \
- T(ObjectGetterCallable, "Getter must be a function: %") \
- T(ObjectNotExtensible, "Cannot add property %, object is not extensible") \
- T(ObjectSetterExpectingFunction, \
- "Object.prototype.__defineSetter__: Expecting function") \
- T(ObjectSetterCallable, "Setter must be a function: %") \
- T(OrdinaryFunctionCalledAsConstructor, \
- "Function object that's not a constructor was created with new") \
- T(PromiseCyclic, "Chaining cycle detected for promise %") \
- T(PromiseExecutorAlreadyInvoked, \
- "Promise executor has already been invoked with non-undefined arguments") \
- T(PromiseNonCallable, "Promise resolve or reject function is not callable") \
- T(PropertyDescObject, "Property description must be an object: %") \
- T(PropertyNotFunction, \
- "'%' returned for property '%' of object '%' is not a function") \
- T(ProtoObjectOrNull, "Object prototype may only be an Object or null: %") \
- T(PrototypeParentNotAnObject, \
- "Class extends value does not have valid prototype property %") \
- T(ProxyConstructNonObject, \
- "'construct' on proxy: trap returned non-object ('%')") \
- T(ProxyDefinePropertyNonConfigurable, \
- "'defineProperty' on proxy: trap returned truish for defining " \
- "non-configurable property '%' which is either non-existent or " \
- "configurable in the proxy target") \
- T(ProxyDefinePropertyNonConfigurableWritable, \
- "'defineProperty' on proxy: trap returned truish for defining " \
- "non-configurable property '%' which cannot be non-writable, unless " \
- "there exists a corresponding non-configurable, non-writable own " \
- "property of the target object.") \
- T(ProxyDefinePropertyNonExtensible, \
- "'defineProperty' on proxy: trap returned truish for adding property '%' " \
- " to the non-extensible proxy target") \
- T(ProxyDefinePropertyIncompatible, \
- "'defineProperty' on proxy: trap returned truish for adding property '%' " \
- " that is incompatible with the existing property in the proxy target") \
- T(ProxyDeletePropertyNonConfigurable, \
- "'deleteProperty' on proxy: trap returned truish for property '%' which " \
- "is non-configurable in the proxy target") \
- T(ProxyDeletePropertyNonExtensible, \
- "'deleteProperty' on proxy: trap returned truish for property '%' but " \
- "the proxy target is non-extensible") \
- T(ProxyGetNonConfigurableData, \
- "'get' on proxy: property '%' is a read-only and " \
- "non-configurable data property on the proxy target but the proxy " \
- "did not return its actual value (expected '%' but got '%')") \
- T(ProxyGetNonConfigurableAccessor, \
- "'get' on proxy: property '%' is a non-configurable accessor " \
- "property on the proxy target and does not have a getter function, but " \
- "the trap did not return 'undefined' (got '%')") \
- T(ProxyGetOwnPropertyDescriptorIncompatible, \
- "'getOwnPropertyDescriptor' on proxy: trap returned descriptor for " \
- "property '%' that is incompatible with the existing property in the " \
- "proxy target") \
- T(ProxyGetOwnPropertyDescriptorInvalid, \
- "'getOwnPropertyDescriptor' on proxy: trap returned neither object nor " \
- "undefined for property '%'") \
- T(ProxyGetOwnPropertyDescriptorNonConfigurable, \
- "'getOwnPropertyDescriptor' on proxy: trap reported non-configurability " \
- "for property '%' which is either non-existent or configurable in the " \
- "proxy target") \
- T(ProxyGetOwnPropertyDescriptorNonConfigurableWritable, \
- "'getOwnPropertyDescriptor' on proxy: trap reported non-configurable " \
- "and writable for property '%' which is non-configurable, non-writable " \
- "in the proxy target") \
- T(ProxyGetOwnPropertyDescriptorNonExtensible, \
- "'getOwnPropertyDescriptor' on proxy: trap returned undefined for " \
- "property '%' which exists in the non-extensible proxy target") \
- T(ProxyGetOwnPropertyDescriptorUndefined, \
- "'getOwnPropertyDescriptor' on proxy: trap returned undefined for " \
- "property '%' which is non-configurable in the proxy target") \
- T(ProxyGetPrototypeOfInvalid, \
- "'getPrototypeOf' on proxy: trap returned neither object nor null") \
- T(ProxyGetPrototypeOfNonExtensible, \
- "'getPrototypeOf' on proxy: proxy target is non-extensible but the " \
- "trap did not return its actual prototype") \
- T(ProxyHandlerOrTargetRevoked, \
- "Cannot create proxy with a revoked proxy as target or handler") \
- T(ProxyHasNonConfigurable, \
- "'has' on proxy: trap returned falsish for property '%' which exists in " \
- "the proxy target as non-configurable") \
- T(ProxyHasNonExtensible, \
- "'has' on proxy: trap returned falsish for property '%' but the proxy " \
- "target is not extensible") \
- T(ProxyIsExtensibleInconsistent, \
- "'isExtensible' on proxy: trap result does not reflect extensibility of " \
- "proxy target (which is '%')") \
- T(ProxyNonObject, \
- "Cannot create proxy with a non-object as target or handler") \
- T(ProxyOwnKeysMissing, \
- "'ownKeys' on proxy: trap result did not include '%'") \
- T(ProxyOwnKeysNonExtensible, \
- "'ownKeys' on proxy: trap returned extra keys but proxy target is " \
- "non-extensible") \
- T(ProxyOwnKeysDuplicateEntries, \
- "'ownKeys' on proxy: trap returned duplicate entries") \
- T(ProxyPreventExtensionsExtensible, \
- "'preventExtensions' on proxy: trap returned truish but the proxy target " \
- "is extensible") \
- T(ProxyPrivate, "Cannot pass private property name to proxy trap") \
- T(ProxyRevoked, "Cannot perform '%' on a proxy that has been revoked") \
- T(ProxySetFrozenData, \
- "'set' on proxy: trap returned truish for property '%' which exists in " \
- "the proxy target as a non-configurable and non-writable data property " \
- "with a different value") \
- T(ProxySetFrozenAccessor, \
- "'set' on proxy: trap returned truish for property '%' which exists in " \
- "the proxy target as a non-configurable and non-writable accessor " \
- "property without a setter") \
- T(ProxySetPrototypeOfNonExtensible, \
- "'setPrototypeOf' on proxy: trap returned truish for setting a new " \
- "prototype on the non-extensible proxy target") \
- T(ProxyTrapReturnedFalsish, "'%' on proxy: trap returned falsish") \
- T(ProxyTrapReturnedFalsishFor, \
- "'%' on proxy: trap returned falsish for property '%'") \
- T(RedefineDisallowed, "Cannot redefine property: %") \
- T(RedefineExternalArray, \
- "Cannot redefine a property of an object with external array elements") \
- T(ReduceNoInitial, "Reduce of empty array with no initial value") \
- T(RegExpFlags, \
- "Cannot supply flags when constructing one RegExp from another") \
- T(RegExpNonObject, "% getter called on non-object %") \
- T(RegExpNonRegExp, "% getter called on non-RegExp object") \
- T(RelativeDateTimeFormatterBadParameters, \
- "Incorrect RelativeDateTimeFormatter provided") \
- T(ResolverNotAFunction, "Promise resolver % is not a function") \
- T(ReturnMethodNotCallable, "The iterator's 'return' method is not callable") \
- T(SharedArrayBufferTooShort, \
- "Derived SharedArrayBuffer constructor created a buffer which was too " \
- "small") \
- T(SharedArrayBufferSpeciesThis, \
- "SharedArrayBuffer subclass returned this from species constructor") \
- T(StaticPrototype, \
- "Classes may not have a static property named 'prototype'") \
- T(StrictDeleteProperty, "Cannot delete property '%' of %") \
- T(StrictPoisonPill, \
- "'caller', 'callee', and 'arguments' properties may not be accessed on " \
- "strict mode functions or the arguments objects for calls to them") \
- T(StrictReadOnlyProperty, \
- "Cannot assign to read only property '%' of % '%'") \
- T(StrictCannotCreateProperty, "Cannot create property '%' on % '%'") \
- T(SymbolIteratorInvalid, \
- "Result of the Symbol.iterator method is not an object") \
- T(SymbolAsyncIteratorInvalid, \
- "Result of the Symbol.asyncIterator method is not an object") \
- T(SymbolKeyFor, "% is not a symbol") \
- T(SymbolToNumber, "Cannot convert a Symbol value to a number") \
- T(SymbolToString, "Cannot convert a Symbol value to a string") \
- T(ThrowMethodMissing, "The iterator does not provide a 'throw' method.") \
- T(UndefinedOrNullToObject, "Cannot convert undefined or null to object") \
- T(ValueAndAccessor, \
- "Invalid property descriptor. Cannot both specify accessors and a value " \
- "or writable attribute, %") \
- T(VarRedeclaration, "Identifier '%' has already been declared") \
- T(WrongArgs, "%: Arguments list has wrong type") \
- /* ReferenceError */ \
- T(NotDefined, "% is not defined") \
- T(SuperAlreadyCalled, "Super constructor may only be called once") \
- T(AccessedUninitializedVariable, "Cannot access '%' before initialization") \
- T(UnsupportedSuper, "Unsupported reference to 'super'") \
- /* RangeError */ \
- T(BigIntDivZero, "Division by zero") \
- T(BigIntNegativeExponent, "Exponent must be positive") \
- T(BigIntTooBig, "Maximum BigInt size exceeded") \
- T(DateRange, "Provided date is not in valid range.") \
- T(ExpectedLocation, \
- "Expected letters optionally connected with underscores or hyphens for " \
- "a location, got %") \
- T(InvalidArrayBufferLength, "Invalid array buffer length") \
- T(ArrayBufferAllocationFailed, "Array buffer allocation failed") \
- T(Invalid, "Invalid %s : %") \
- T(InvalidArrayLength, "Invalid array length") \
- T(InvalidAtomicAccessIndex, "Invalid atomic access index") \
- T(InvalidCodePoint, "Invalid code point %") \
- T(InvalidCountValue, "Invalid count value") \
- T(InvalidDataViewAccessorOffset, \
- "Offset is outside the bounds of the DataView") \
- T(InvalidDataViewLength, "Invalid DataView length %") \
- T(InvalidOffset, "Start offset % is outside the bounds of the buffer") \
- T(InvalidHint, "Invalid hint: %") \
- T(InvalidIndex, "Invalid value: not (convertible to) a safe integer") \
- T(InvalidLanguageTag, "Invalid language tag: %") \
- T(InvalidWeakMapKey, "Invalid value used as weak map key") \
- T(InvalidWeakSetValue, "Invalid value used in weak set") \
- T(InvalidStringLength, "Invalid string length") \
- T(InvalidTimeValue, "Invalid time value") \
- T(InvalidTimeZone, "Invalid time zone specified: %") \
- T(InvalidTypedArrayAlignment, "% of % should be a multiple of %") \
- T(InvalidTypedArrayIndex, "Invalid typed array index") \
- T(InvalidTypedArrayLength, "Invalid typed array length: %") \
- T(LetInLexicalBinding, "let is disallowed as a lexically bound name") \
- T(LocaleMatcher, "Illegal value for localeMatcher:%") \
- T(NormalizationForm, "The normalization form should be one of %.") \
- T(ParameterOfFunctionOutOfRange, \
- "Paramenter % of function %() is % and out of range") \
- T(ZeroDigitNumericSeparator, \
- "Numeric separator can not be used after leading 0.") \
- T(NumberFormatRange, "% argument must be between 0 and 100") \
- T(TrailingNumericSeparator, \
- "Numeric separators are not allowed at the end of numeric literals") \
- T(ContinuousNumericSeparator, \
- "Only one underscore is allowed as numeric separator") \
- T(PropertyValueOutOfRange, "% value is out of range.") \
- T(StackOverflow, "Maximum call stack size exceeded") \
- T(ToPrecisionFormatRange, \
- "toPrecision() argument must be between 1 and 100") \
- T(ToRadixFormatRange, "toString() radix argument must be between 2 and 36") \
- T(TypedArraySetOffsetOutOfBounds, "offset is out of bounds") \
- T(TypedArraySetSourceTooLarge, "Source is too large") \
- T(ValueOutOfRange, "Value % out of range for % options property %") \
- /* SyntaxError */ \
- T(AmbiguousExport, \
- "The requested module '%' contains conflicting star exports for name '%'") \
- T(BadGetterArity, "Getter must not have any formal parameters.") \
- T(BadSetterArity, "Setter must have exactly one formal parameter.") \
- T(BigIntInvalidString, "Invalid BigInt string") \
- T(ConstructorIsAccessor, "Class constructor may not be an accessor") \
- T(ConstructorIsGenerator, "Class constructor may not be a generator") \
- T(ConstructorIsAsync, "Class constructor may not be an async method") \
- T(ConstructorIsPrivate, "Class constructor may not be a private method") \
- T(DerivedConstructorReturnedNonObject, \
- "Derived constructors may only return object or undefined") \
- T(DuplicateConstructor, "A class may only have one constructor") \
- T(DuplicateExport, "Duplicate export of '%'") \
- T(DuplicateProto, \
- "Duplicate __proto__ fields are not allowed in object literals") \
- T(ForInOfLoopInitializer, \
- "% loop variable declaration may not have an initializer.") \
- T(ForOfLet, "The left-hand side of a for-of loop may not start with 'let'.") \
- T(ForInOfLoopMultiBindings, \
- "Invalid left-hand side in % loop: Must have a single binding.") \
- T(GeneratorInSingleStatementContext, \
- "Generators can only be declared at the top level or inside a block.") \
- T(AsyncFunctionInSingleStatementContext, \
- "Async functions can only be declared at the top level or inside a " \
- "block.") \
- T(IllegalBreak, "Illegal break statement") \
- T(NoIterationStatement, \
- "Illegal continue statement: no surrounding iteration statement") \
- T(IllegalContinue, \
- "Illegal continue statement: '%' does not denote an iteration statement") \
- T(IllegalLanguageModeDirective, \
- "Illegal '%' directive in function with non-simple parameter list") \
- T(IllegalReturn, "Illegal return statement") \
- T(IntrinsicWithSpread, "Intrinsic calls do not support spread arguments") \
- T(InvalidRestBindingPattern, \
- "`...` must be followed by an identifier in declaration contexts") \
- T(InvalidPropertyBindingPattern, "Illegal property in declaration context") \
- T(InvalidRestAssignmentPattern, \
- "`...` must be followed by an assignable reference in assignment " \
- "contexts") \
- T(InvalidEscapedReservedWord, "Keyword must not contain escaped characters") \
- T(InvalidEscapedMetaProperty, "'%' must not contain escaped characters") \
- T(InvalidLhsInAssignment, "Invalid left-hand side in assignment") \
- T(InvalidCoverInitializedName, "Invalid shorthand property initializer") \
- T(InvalidDestructuringTarget, "Invalid destructuring assignment target") \
- T(InvalidLhsInFor, "Invalid left-hand side in for-loop") \
- T(InvalidLhsInPostfixOp, \
- "Invalid left-hand side expression in postfix operation") \
- T(InvalidLhsInPrefixOp, \
- "Invalid left-hand side expression in prefix operation") \
- T(InvalidRegExpFlags, "Invalid flags supplied to RegExp constructor '%'") \
- T(InvalidOrUnexpectedToken, "Invalid or unexpected token") \
- T(InvalidPrivateFieldResolution, \
- "Private field '%' must be declared in an enclosing class") \
- T(InvalidPrivateFieldRead, \
- "Read of private field % from an object which did not contain the field") \
- T(InvalidPrivateFieldWrite, \
- "Write of private field % to an object which did not contain the field") \
- T(JsonParseUnexpectedEOS, "Unexpected end of JSON input") \
- T(JsonParseUnexpectedToken, "Unexpected token % in JSON at position %") \
- T(JsonParseUnexpectedTokenNumber, "Unexpected number in JSON at position %") \
- T(JsonParseUnexpectedTokenString, "Unexpected string in JSON at position %") \
- T(LabelRedeclaration, "Label '%' has already been declared") \
- T(LabelledFunctionDeclaration, \
- "Labelled function declaration not allowed as the body of a control flow " \
- "structure") \
- T(MalformedArrowFunParamList, "Malformed arrow function parameter list") \
- T(MalformedRegExp, "Invalid regular expression: /%/: %") \
- T(MalformedRegExpFlags, "Invalid regular expression flags") \
- T(ModuleExportUndefined, "Export '%' is not defined in module") \
- T(MissingFunctionName, "Function statements require a function name") \
- T(HtmlCommentInModule, "HTML comments are not allowed in modules") \
- T(MultipleDefaultsInSwitch, \
- "More than one default clause in switch statement") \
- T(NewlineAfterThrow, "Illegal newline after throw") \
- T(NoCatchOrFinally, "Missing catch or finally after try") \
- T(ParamAfterRest, "Rest parameter must be last formal parameter") \
- T(FlattenPastSafeLength, \
- "Flattening % elements on an array-like of length % " \
- "is disallowed, as the total surpasses 2**53-1") \
- T(PushPastSafeLength, \
- "Pushing % elements on an array-like of length % " \
- "is disallowed, as the total surpasses 2**53-1") \
- T(ElementAfterRest, "Rest element must be last element") \
- T(BadSetterRestParameter, \
- "Setter function argument must not be a rest parameter") \
- T(ParamDupe, "Duplicate parameter name not allowed in this context") \
- T(ParenthesisInArgString, "Function arg string contains parenthesis") \
- T(ArgStringTerminatesParametersEarly, \
- "Arg string terminates parameters early") \
- T(UnexpectedEndOfArgString, "Unexpected end of arg string") \
- T(RestDefaultInitializer, \
- "Rest parameter may not have a default initializer") \
- T(RuntimeWrongNumArgs, "Runtime function given wrong number of arguments") \
- T(SuperNotCalled, \
- "Must call super constructor in derived class before accessing 'this' or " \
- "returning from derived constructor") \
- T(SingleFunctionLiteral, "Single function literal required") \
- T(SloppyFunction, \
- "In non-strict mode code, functions can only be declared at top level, " \
- "inside a block, or as the body of an if statement.") \
- T(SpeciesNotConstructor, \
- "object.constructor[Symbol.species] is not a constructor") \
- T(StrictDelete, "Delete of an unqualified identifier in strict mode.") \
- T(StrictEvalArguments, "Unexpected eval or arguments in strict mode") \
- T(StrictFunction, \
- "In strict mode code, functions can only be declared at top level or " \
- "inside a block.") \
- T(StrictOctalLiteral, "Octal literals are not allowed in strict mode.") \
- T(StrictDecimalWithLeadingZero, \
- "Decimals with leading zeros are not allowed in strict mode.") \
- T(StrictOctalEscape, \
- "Octal escape sequences are not allowed in strict mode.") \
- T(StrictWith, "Strict mode code may not include a with statement") \
- T(TemplateOctalLiteral, \
- "Octal escape sequences are not allowed in template strings.") \
- T(ThisFormalParameter, "'this' is not a valid formal parameter name") \
- T(AwaitBindingIdentifier, \
- "'await' is not a valid identifier name in an async function") \
- T(AwaitExpressionFormalParameter, \
- "Illegal await-expression in formal parameters of async function") \
- T(TooManyArguments, \
- "Too many arguments in function call (only 65535 allowed)") \
- T(TooManyParameters, \
- "Too many parameters in function definition (only 65534 allowed)") \
- T(TooManySpreads, \
- "Literal containing too many nested spreads (up to 65534 allowed)") \
- T(TooManyVariables, "Too many variables declared (only 4194303 allowed)") \
- T(TooManyElementsInPromiseAll, "Too many elements passed to Promise.all") \
- T(TypedArrayTooShort, \
- "Derived TypedArray constructor created an array which was too small") \
- T(UnexpectedEOS, "Unexpected end of input") \
- T(UnexpectedPrivateField, "Unexpected private field") \
- T(UnexpectedReserved, "Unexpected reserved word") \
- T(UnexpectedStrictReserved, "Unexpected strict mode reserved word") \
- T(UnexpectedSuper, "'super' keyword unexpected here") \
- T(UnexpectedNewTarget, "new.target expression is not allowed here") \
- T(UnexpectedTemplateString, "Unexpected template string") \
- T(UnexpectedToken, "Unexpected token %") \
- T(UnexpectedTokenUnaryExponentiation, \
- "Unary operator used immediately before exponentiation expression. " \
- "Parenthesis must be used to disambiguate operator precedence") \
- T(UnexpectedTokenIdentifier, "Unexpected identifier") \
- T(UnexpectedTokenNumber, "Unexpected number") \
- T(UnexpectedTokenString, "Unexpected string") \
- T(UnexpectedTokenRegExp, "Unexpected regular expression") \
- T(UnexpectedLexicalDeclaration, \
- "Lexical declaration cannot appear in a single-statement context") \
- T(UnknownLabel, "Undefined label '%'") \
- T(UnresolvableExport, \
- "The requested module '%' does not provide an export named '%'") \
- T(UnterminatedArgList, "missing ) after argument list") \
- T(UnterminatedRegExp, "Invalid regular expression: missing /") \
- T(UnterminatedTemplate, "Unterminated template literal") \
- T(UnterminatedTemplateExpr, "Missing } in template expression") \
- T(FoundNonCallableHasInstance, "Found non-callable @@hasInstance") \
- T(InvalidHexEscapeSequence, "Invalid hexadecimal escape sequence") \
- T(InvalidUnicodeEscapeSequence, "Invalid Unicode escape sequence") \
- T(UndefinedUnicodeCodePoint, "Undefined Unicode code-point") \
- T(YieldInParameter, "Yield expression not allowed in formal parameter") \
- /* EvalError */ \
- T(CodeGenFromStrings, "%") \
- T(NoSideEffectDebugEvaluate, "Possible side-effect in debug-evaluate") \
- /* URIError */ \
- T(URIMalformed, "URI malformed") \
- /* Wasm errors (currently Error) */ \
- T(WasmTrapUnreachable, "unreachable") \
- T(WasmTrapMemOutOfBounds, "memory access out of bounds") \
- T(WasmTrapUnalignedAccess, "operation does not support unaligned accesses") \
- T(WasmTrapDivByZero, "divide by zero") \
- T(WasmTrapDivUnrepresentable, "divide result unrepresentable") \
- T(WasmTrapRemByZero, "remainder by zero") \
- T(WasmTrapFloatUnrepresentable, "float unrepresentable in integer range") \
- T(WasmTrapFuncInvalid, "invalid index into function table") \
- T(WasmTrapFuncSigMismatch, "function signature mismatch") \
- T(WasmTrapTypeError, "wasm function signature contains illegal type") \
- T(WasmTrapDataSegmentDropped, "data segment has been dropped") \
- T(WasmTrapElemSegmentDropped, "element segment has been dropped") \
- T(WasmTrapTableOutOfBounds, "table access out of bounds") \
- T(WasmExceptionError, "wasm exception") \
- /* Asm.js validation related */ \
- T(AsmJsInvalid, "Invalid asm.js: %") \
- T(AsmJsCompiled, "Converted asm.js to WebAssembly: %") \
- T(AsmJsInstantiated, "Instantiated asm.js: %") \
- T(AsmJsLinkingFailed, "Linking failure in asm.js: %") \
- /* DataCloneError messages */ \
- T(DataCloneError, "% could not be cloned.") \
- T(DataCloneErrorOutOfMemory, "Data cannot be cloned, out of memory.") \
- T(DataCloneErrorDetachedArrayBuffer, \
- "An ArrayBuffer is neutered and could not be cloned.") \
- T(DataCloneErrorSharedArrayBufferTransferred, \
- "A SharedArrayBuffer could not be cloned. SharedArrayBuffer must not be " \
- "transferred.") \
- T(DataCloneDeserializationError, "Unable to deserialize cloned data.") \
- T(DataCloneDeserializationVersionError, \
- "Unable to deserialize cloned data due to invalid or unsupported " \
- "version.") \
- /* Builtins-Trace Errors */ \
- T(TraceEventCategoryError, "Trace event category must be a string.") \
- T(TraceEventNameError, "Trace event name must be a string.") \
- T(TraceEventNameLengthError, \
- "Trace event name must not be an empty string.") \
- T(TraceEventPhaseError, "Trace event phase must be a number.") \
- T(TraceEventIDError, "Trace event id must be a number.") \
- /* Weak refs */ \
- T(WeakRefsCleanupMustBeCallable, \
- "FinalizationGroup: cleanup must be callable") \
- T(WeakRefsRegisterTargetMustBeObject, \
- "FinalizationGroup.prototype.register: target must be an object") \
- T(WeakRefsRegisterTargetAndHoldingsMustNotBeSame, \
- "FinalizationGroup.prototype.register: target and holdings must not be " \
- "same") \
- T(WeakRefsWeakRefConstructorTargetMustBeObject, \
- "WeakRef: target must be an object")
-
-enum class MessageTemplate {
-#define TEMPLATE(NAME, STRING) k##NAME,
- MESSAGE_TEMPLATES(TEMPLATE)
-#undef TEMPLATE
- kLastMessage
-};
-
-inline MessageTemplate MessageTemplateFromInt(int message_id) {
- DCHECK_LE(0, message_id);
- DCHECK_LT(message_id, static_cast<int>(MessageTemplate::kLastMessage));
- return static_cast<MessageTemplate>(message_id);
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_EXECUTION_MESSAGE_TEMPLATE_H_
diff --git a/deps/v8/src/execution/messages.cc b/deps/v8/src/execution/messages.cc
index c76f546d62..d216d3bc39 100644
--- a/deps/v8/src/execution/messages.cc
+++ b/deps/v8/src/execution/messages.cc
@@ -7,13 +7,16 @@
#include <memory>
#include "src/api/api-inl.h"
+#include "src/base/v8-fallthrough.h"
#include "src/execution/execution.h"
+#include "src/execution/frames.h"
#include "src/execution/isolate-inl.h"
#include "src/logging/counters.h"
#include "src/objects/foreign-inl.h"
#include "src/objects/frame-array-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/keys.h"
+#include "src/objects/stack-frame-info-inl.h"
#include "src/objects/struct-inl.h"
#include "src/strings/string-builder-inl.h"
#include "src/wasm/wasm-code-manager.h"
@@ -303,7 +306,7 @@ MaybeHandle<String> FormatEvalOrigin(Isolate* isolate, Handle<Script> script) {
} // namespace
Handle<Object> StackFrameBase::GetEvalOrigin() {
- if (!HasScript()) return isolate_->factory()->undefined_value();
+ if (!HasScript() || !IsEval()) return isolate_->factory()->undefined_value();
return FormatEvalOrigin(isolate_, GetScript()).ToHandleChecked();
}
@@ -321,12 +324,6 @@ bool StackFrameBase::IsEval() {
GetScript()->compilation_type() == Script::COMPILATION_TYPE_EVAL;
}
-MaybeHandle<String> StackFrameBase::ToString() {
- IncrementalStringBuilder builder(isolate_);
- ToString(builder);
- return builder.Finish();
-}
-
void JSStackFrame::FromFrameArray(Isolate* isolate, Handle<FrameArray> array,
int frame_ix) {
DCHECK(!array->IsWasmFrame(frame_ix));
@@ -365,7 +362,7 @@ Handle<Object> JSStackFrame::GetFileName() {
}
Handle<Object> JSStackFrame::GetFunctionName() {
- Handle<String> result = JSFunction::GetName(function_);
+ Handle<String> result = JSFunction::GetDebugName(function_);
if (result->length() != 0) return result;
if (HasScript() &&
@@ -514,177 +511,6 @@ bool JSStackFrame::IsToplevel() {
return receiver_->IsJSGlobalProxy() || receiver_->IsNullOrUndefined(isolate_);
}
-namespace {
-
-bool IsNonEmptyString(Handle<Object> object) {
- return (object->IsString() && String::cast(*object).length() > 0);
-}
-
-void AppendFileLocation(Isolate* isolate, StackFrameBase* call_site,
- IncrementalStringBuilder* builder) {
- if (call_site->IsNative()) {
- builder->AppendCString("native");
- return;
- }
-
- Handle<Object> file_name = call_site->GetScriptNameOrSourceUrl();
- if (!file_name->IsString() && call_site->IsEval()) {
- Handle<Object> eval_origin = call_site->GetEvalOrigin();
- DCHECK(eval_origin->IsString());
- builder->AppendString(Handle<String>::cast(eval_origin));
- builder->AppendCString(", "); // Expecting source position to follow.
- }
-
- if (IsNonEmptyString(file_name)) {
- builder->AppendString(Handle<String>::cast(file_name));
- } else {
- // Source code does not originate from a file and is not native, but we
- // can still get the source position inside the source string, e.g. in
- // an eval string.
- builder->AppendCString("<anonymous>");
- }
-
- int line_number = call_site->GetLineNumber();
- if (line_number != StackFrameBase::kNone) {
- builder->AppendCharacter(':');
- Handle<String> line_string = isolate->factory()->NumberToString(
- handle(Smi::FromInt(line_number), isolate), isolate);
- builder->AppendString(line_string);
-
- int column_number = call_site->GetColumnNumber();
- if (column_number != StackFrameBase::kNone) {
- builder->AppendCharacter(':');
- Handle<String> column_string = isolate->factory()->NumberToString(
- handle(Smi::FromInt(column_number), isolate), isolate);
- builder->AppendString(column_string);
- }
- }
-}
-
-int StringIndexOf(Isolate* isolate, Handle<String> subject,
- Handle<String> pattern) {
- if (pattern->length() > subject->length()) return -1;
- return String::IndexOf(isolate, subject, pattern, 0);
-}
-
-// Returns true iff
-// 1. the subject ends with '.' + pattern, or
-// 2. subject == pattern.
-bool StringEndsWithMethodName(Isolate* isolate, Handle<String> subject,
- Handle<String> pattern) {
- if (String::Equals(isolate, subject, pattern)) return true;
-
- FlatStringReader subject_reader(isolate, String::Flatten(isolate, subject));
- FlatStringReader pattern_reader(isolate, String::Flatten(isolate, pattern));
-
- int pattern_index = pattern_reader.length() - 1;
- int subject_index = subject_reader.length() - 1;
- for (int i = 0; i <= pattern_reader.length(); i++) { // Iterate over len + 1.
- if (subject_index < 0) {
- return false;
- }
-
- const uc32 subject_char = subject_reader.Get(subject_index);
- if (i == pattern_reader.length()) {
- if (subject_char != '.') return false;
- } else if (subject_char != pattern_reader.Get(pattern_index)) {
- return false;
- }
-
- pattern_index--;
- subject_index--;
- }
-
- return true;
-}
-
-void AppendMethodCall(Isolate* isolate, JSStackFrame* call_site,
- IncrementalStringBuilder* builder) {
- Handle<Object> type_name = call_site->GetTypeName();
- Handle<Object> method_name = call_site->GetMethodName();
- Handle<Object> function_name = call_site->GetFunctionName();
-
- if (IsNonEmptyString(function_name)) {
- Handle<String> function_string = Handle<String>::cast(function_name);
- if (IsNonEmptyString(type_name)) {
- Handle<String> type_string = Handle<String>::cast(type_name);
- bool starts_with_type_name =
- (StringIndexOf(isolate, function_string, type_string) == 0);
- if (!starts_with_type_name) {
- builder->AppendString(type_string);
- builder->AppendCharacter('.');
- }
- }
- builder->AppendString(function_string);
-
- if (IsNonEmptyString(method_name)) {
- Handle<String> method_string = Handle<String>::cast(method_name);
- if (!StringEndsWithMethodName(isolate, function_string, method_string)) {
- builder->AppendCString(" [as ");
- builder->AppendString(method_string);
- builder->AppendCharacter(']');
- }
- }
- } else {
- if (IsNonEmptyString(type_name)) {
- builder->AppendString(Handle<String>::cast(type_name));
- builder->AppendCharacter('.');
- }
- if (IsNonEmptyString(method_name)) {
- builder->AppendString(Handle<String>::cast(method_name));
- } else {
- builder->AppendCString("<anonymous>");
- }
- }
-}
-
-} // namespace
-
-void JSStackFrame::ToString(IncrementalStringBuilder& builder) {
- Handle<Object> function_name = GetFunctionName();
-
- const bool is_toplevel = IsToplevel();
- const bool is_async = IsAsync();
- const bool is_promise_all = IsPromiseAll();
- const bool is_constructor = IsConstructor();
- const bool is_method_call = !(is_toplevel || is_constructor);
-
- if (is_async) {
- builder.AppendCString("async ");
- }
- if (is_promise_all) {
- // For `Promise.all(iterable)` frames we interpret the {offset_}
- // as the element index into `iterable` where the error occurred.
- builder.AppendCString("Promise.all (index ");
- Handle<String> index_string = isolate_->factory()->NumberToString(
- handle(Smi::FromInt(offset_), isolate_), isolate_);
- builder.AppendString(index_string);
- builder.AppendCString(")");
- return;
- }
- if (is_method_call) {
- AppendMethodCall(isolate_, this, &builder);
- } else if (is_constructor) {
- builder.AppendCString("new ");
- if (IsNonEmptyString(function_name)) {
- builder.AppendString(Handle<String>::cast(function_name));
- } else {
- builder.AppendCString("<anonymous>");
- }
- } else if (IsNonEmptyString(function_name)) {
- builder.AppendString(Handle<String>::cast(function_name));
- } else {
- AppendFileLocation(isolate_, this, &builder);
- return;
- }
-
- builder.AppendCString(" (");
- AppendFileLocation(isolate_, this, &builder);
- builder.AppendCString(")");
-
- return;
-}
-
int JSStackFrame::GetPosition() const {
Handle<SharedFunctionInfo> shared = handle(function_->shared(), isolate_);
SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate_, shared);
@@ -749,41 +575,6 @@ Handle<Object> WasmStackFrame::GetWasmModuleName() {
return module_name;
}
-void WasmStackFrame::ToString(IncrementalStringBuilder& builder) {
- Handle<WasmModuleObject> module_object(wasm_instance_->module_object(),
- isolate_);
- MaybeHandle<String> module_name =
- WasmModuleObject::GetModuleNameOrNull(isolate_, module_object);
- MaybeHandle<String> function_name = WasmModuleObject::GetFunctionNameOrNull(
- isolate_, module_object, wasm_func_index_);
- bool has_name = !module_name.is_null() || !function_name.is_null();
- if (has_name) {
- if (module_name.is_null()) {
- builder.AppendString(function_name.ToHandleChecked());
- } else {
- builder.AppendString(module_name.ToHandleChecked());
- if (!function_name.is_null()) {
- builder.AppendCString(".");
- builder.AppendString(function_name.ToHandleChecked());
- }
- }
- builder.AppendCString(" (");
- }
-
- builder.AppendCString("wasm-function[");
-
- char buffer[16];
- SNPrintF(ArrayVector(buffer), "%u]", wasm_func_index_);
- builder.AppendCString(buffer);
-
- SNPrintF(ArrayVector(buffer), ":%d", GetPosition());
- builder.AppendCString(buffer);
-
- if (has_name) builder.AppendCString(")");
-
- return;
-}
-
int WasmStackFrame::GetPosition() const {
return IsInterpreted()
? offset_
@@ -791,6 +582,14 @@ int WasmStackFrame::GetPosition() const {
code_, offset_);
}
+int WasmStackFrame::GetColumnNumber() { return GetModuleOffset(); }
+
+int WasmStackFrame::GetModuleOffset() const {
+ const int function_offset =
+ wasm_instance_->module_object().GetFunctionOffset(wasm_func_index_);
+ return function_offset + GetPosition();
+}
+
Handle<Object> WasmStackFrame::Null() const {
return isolate_->factory()->null_value();
}
@@ -858,24 +657,6 @@ int AsmJsWasmStackFrame::GetColumnNumber() {
return Script::GetColumnNumber(script, GetPosition()) + 1;
}
-void AsmJsWasmStackFrame::ToString(IncrementalStringBuilder& builder) {
- // The string should look exactly as the respective javascript frame string.
- // Keep this method in line to
- // JSStackFrame::ToString(IncrementalStringBuilder&).
- Handle<Object> function_name = GetFunctionName();
-
- if (IsNonEmptyString(function_name)) {
- builder.AppendString(Handle<String>::cast(function_name));
- builder.AppendCString(" (");
- }
-
- AppendFileLocation(isolate_, this, &builder);
-
- if (IsNonEmptyString(function_name)) builder.AppendCString(")");
-
- return;
-}
-
FrameArrayIterator::FrameArrayIterator(Isolate* isolate,
Handle<FrameArray> array, int frame_ix)
: isolate_(isolate), array_(array), frame_ix_(frame_ix) {}
@@ -914,8 +695,7 @@ StackFrameBase* FrameArrayIterator::Frame() {
namespace {
MaybeHandle<Object> ConstructCallSite(Isolate* isolate,
- Handle<FrameArray> frame_array,
- int frame_index) {
+ Handle<StackTraceFrame> frame) {
Handle<JSFunction> target =
handle(isolate->native_context()->callsite_function(), isolate);
@@ -924,6 +704,14 @@ MaybeHandle<Object> ConstructCallSite(Isolate* isolate,
isolate, obj,
JSObject::New(target, target, Handle<AllocationSite>::null()), Object);
+ // TODO(szuend): Introduce a new symbol "call_site_frame_symbol" and set
+ // it to the StackTraceFrame. The CallSite API builtins can then
+ // be implemented using StackFrameInfo objects.
+
+ Handle<FrameArray> frame_array(FrameArray::cast(frame->frame_array()),
+ isolate);
+ int frame_index = frame->frame_index();
+
Handle<Symbol> key = isolate->factory()->call_site_frame_array_symbol();
RETURN_ON_EXCEPTION(isolate,
JSObject::SetOwnPropertyIgnoreAttributes(
@@ -943,14 +731,16 @@ MaybeHandle<Object> ConstructCallSite(Isolate* isolate,
// Convert the raw frames as written by Isolate::CaptureSimpleStackTrace into
// a JSArray of JSCallSite objects.
MaybeHandle<JSArray> GetStackFrames(Isolate* isolate,
- Handle<FrameArray> elems) {
- const int frame_count = elems->FrameCount();
+ Handle<FixedArray> elems) {
+ const int frame_count = elems->length();
Handle<FixedArray> frames = isolate->factory()->NewFixedArray(frame_count);
for (int i = 0; i < frame_count; i++) {
Handle<Object> site;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, site,
- ConstructCallSite(isolate, elems, i), JSArray);
+ Handle<StackTraceFrame> frame(StackTraceFrame::cast(elems->get(i)),
+ isolate);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, site, ConstructCallSite(isolate, frame),
+ JSArray);
frames->set(i, *site);
}
@@ -1013,13 +803,14 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
Handle<JSObject> error,
Handle<Object> raw_stack) {
DCHECK(raw_stack->IsFixedArray());
- Handle<FrameArray> elems = Handle<FrameArray>::cast(raw_stack);
+ Handle<FixedArray> elems = Handle<FixedArray>::cast(raw_stack);
const bool in_recursion = isolate->formatting_stack_trace();
if (!in_recursion) {
+ Handle<Context> error_context = error->GetCreationContext();
+ DCHECK(error_context->IsNativeContext());
+
if (isolate->HasPrepareStackTraceCallback()) {
- Handle<Context> error_context = error->GetCreationContext();
- DCHECK(!error_context.is_null() && error_context->IsNativeContext());
PrepareStackTraceScope scope(isolate);
Handle<JSArray> sites;
@@ -1033,7 +824,8 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
Object);
return result;
} else {
- Handle<JSFunction> global_error = isolate->error_function();
+ Handle<JSFunction> global_error =
+ handle(error_context->error_function(), isolate);
// If there's a user-specified "prepareStackTrace" function, call it on
// the frames and use its result.
@@ -1080,11 +872,13 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
wasm::WasmCodeRefScope wasm_code_ref_scope;
- for (FrameArrayIterator it(isolate, elems); it.HasFrame(); it.Advance()) {
+ for (int i = 0; i < elems->length(); ++i) {
builder.AppendCString("\n at ");
- StackFrameBase* frame = it.Frame();
- frame->ToString(builder);
+ Handle<StackTraceFrame> frame(StackTraceFrame::cast(elems->get(i)),
+ isolate);
+ SerializeStackTraceFrame(isolate, frame, builder);
+
if (isolate->has_pending_exception()) {
// CallSite.toString threw. Parts of the current frame might have been
// stringified already regardless. Still, try to append a string
@@ -1140,7 +934,7 @@ const char* MessageFormatter::TemplateString(MessageTemplate index) {
return STRING;
MESSAGE_TEMPLATES(CASE)
#undef CASE
- case MessageTemplate::kLastMessage:
+ case MessageTemplate::kMessageCount:
default:
return nullptr;
}
@@ -1183,7 +977,7 @@ MaybeHandle<String> MessageFormatter::Format(Isolate* isolate,
MaybeHandle<Object> ErrorUtils::Construct(
Isolate* isolate, Handle<JSFunction> target, Handle<Object> new_target,
Handle<Object> message, FrameSkipMode mode, Handle<Object> caller,
- bool suppress_detailed_trace) {
+ StackTraceCollection stack_trace_collection) {
// 1. If NewTarget is undefined, let newTarget be the active function object,
// else let newTarget be NewTarget.
@@ -1217,17 +1011,19 @@ MaybeHandle<Object> ErrorUtils::Construct(
Object);
}
- // Optionally capture a more detailed stack trace for the message.
- if (!suppress_detailed_trace) {
- RETURN_ON_EXCEPTION(isolate, isolate->CaptureAndSetDetailedStackTrace(err),
- Object);
+ switch (stack_trace_collection) {
+ case StackTraceCollection::kDetailed:
+ RETURN_ON_EXCEPTION(
+ isolate, isolate->CaptureAndSetDetailedStackTrace(err), Object);
+ V8_FALLTHROUGH;
+ case StackTraceCollection::kSimple:
+ RETURN_ON_EXCEPTION(
+ isolate, isolate->CaptureAndSetSimpleStackTrace(err, mode, caller),
+ Object);
+ break;
+ case StackTraceCollection::kNone:
+ break;
}
-
- // Capture a simple stack trace for the stack property.
- RETURN_ON_EXCEPTION(isolate,
- isolate->CaptureAndSetSimpleStackTrace(err, mode, caller),
- Object);
-
return err;
}
@@ -1356,7 +1152,7 @@ MaybeHandle<Object> ErrorUtils::MakeGenericError(
Handle<Object> no_caller;
return ErrorUtils::Construct(isolate, constructor, constructor, msg, mode,
- no_caller, false);
+ no_caller, StackTraceCollection::kDetailed);
}
} // namespace internal
diff --git a/deps/v8/src/execution/messages.h b/deps/v8/src/execution/messages.h
index 0fc3692f64..23f32c2fe1 100644
--- a/deps/v8/src/execution/messages.h
+++ b/deps/v8/src/execution/messages.h
@@ -12,7 +12,7 @@
#include <memory>
-#include "src/execution/message-template.h"
+#include "src/common/message-template.h"
#include "src/handles/handles.h"
namespace v8 {
@@ -24,7 +24,6 @@ class WasmCode;
// Forward declarations.
class AbstractCode;
class FrameArray;
-class IncrementalStringBuilder;
class JSMessageObject;
class LookupIterator;
class SharedFunctionInfo;
@@ -94,9 +93,6 @@ class StackFrameBase {
virtual bool IsConstructor() = 0;
virtual bool IsStrict() const = 0;
- MaybeHandle<String> ToString();
- virtual void ToString(IncrementalStringBuilder& builder) = 0;
-
// Used to signal that the requested field is unknown.
static const int kNone = -1;
@@ -139,8 +135,6 @@ class JSStackFrame : public StackFrameBase {
bool IsConstructor() override { return is_constructor_; }
bool IsStrict() const override { return is_strict_; }
- void ToString(IncrementalStringBuilder& builder) override;
-
private:
JSStackFrame() = default;
void FromFrameArray(Isolate* isolate, Handle<FrameArray> array, int frame_ix);
@@ -177,7 +171,7 @@ class WasmStackFrame : public StackFrameBase {
int GetPosition() const override;
int GetLineNumber() override { return wasm_func_index_; }
- int GetColumnNumber() override { return kNone; }
+ int GetColumnNumber() override;
int GetPromiseIndex() const override { return kNone; }
@@ -189,8 +183,6 @@ class WasmStackFrame : public StackFrameBase {
bool IsStrict() const override { return false; }
bool IsInterpreted() const { return code_ == nullptr; }
- void ToString(IncrementalStringBuilder& builder) override;
-
protected:
Handle<Object> Null() const;
@@ -203,6 +195,8 @@ class WasmStackFrame : public StackFrameBase {
int offset_;
private:
+ int GetModuleOffset() const;
+
WasmStackFrame() = default;
void FromFrameArray(Isolate* isolate, Handle<FrameArray> array, int frame_ix);
@@ -224,8 +218,6 @@ class AsmJsWasmStackFrame : public WasmStackFrame {
int GetLineNumber() override;
int GetColumnNumber() override;
- void ToString(IncrementalStringBuilder& builder) override;
-
private:
friend class FrameArrayIterator;
AsmJsWasmStackFrame() = default;
@@ -267,10 +259,13 @@ enum FrameSkipMode {
class ErrorUtils : public AllStatic {
public:
+ // |kNone| is useful when you don't need the stack information at all, for
+ // example when creating a deserialized error.
+ enum class StackTraceCollection { kDetailed, kSimple, kNone };
static MaybeHandle<Object> Construct(
Isolate* isolate, Handle<JSFunction> target, Handle<Object> new_target,
Handle<Object> message, FrameSkipMode mode, Handle<Object> caller,
- bool suppress_detailed_trace);
+ StackTraceCollection stack_trace_collection);
static MaybeHandle<String> ToString(Isolate* isolate, Handle<Object> recv);
diff --git a/deps/v8/src/execution/microtask-queue.cc b/deps/v8/src/execution/microtask-queue.cc
index 8088935154..3cc95205fa 100644
--- a/deps/v8/src/execution/microtask-queue.cc
+++ b/deps/v8/src/execution/microtask-queue.cc
@@ -253,7 +253,7 @@ void MicrotaskQueue::OnCompleted(Isolate* isolate) {
// set is still open (whether to clear it after every microtask or once
// during a microtask checkpoint). See also
// https://github.com/tc39/proposal-weakrefs/issues/39 .
- isolate->heap()->ClearKeepDuringJobSet();
+ isolate->heap()->ClearKeptObjects();
FireMicrotasksCompletedCallback(isolate);
}
diff --git a/deps/v8/src/execution/mips/frame-constants-mips.cc b/deps/v8/src/execution/mips/frame-constants-mips.cc
index 95d6eb951c..4c930e71a9 100644
--- a/deps/v8/src/execution/mips/frame-constants-mips.cc
+++ b/deps/v8/src/execution/mips/frame-constants-mips.cc
@@ -4,12 +4,11 @@
#if V8_TARGET_ARCH_MIPS
-#include "src/codegen/assembler.h"
+#include "src/execution/mips/frame-constants-mips.h"
+
#include "src/codegen/mips/assembler-mips-inl.h"
-#include "src/codegen/mips/assembler-mips.h"
#include "src/execution/frame-constants.h"
-
-#include "src/execution/mips/frame-constants-mips.h"
+#include "src/execution/frames.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/execution/mips/simulator-mips.cc b/deps/v8/src/execution/mips/simulator-mips.cc
index e0448f232a..6a3a160ec3 100644
--- a/deps/v8/src/execution/mips/simulator-mips.cc
+++ b/deps/v8/src/execution/mips/simulator-mips.cc
@@ -1356,8 +1356,8 @@ bool Simulator::set_fcsr_round64_error(float original, float rounded) {
return ret;
}
-void Simulator::round_according_to_fcsr(double toRound, double& rounded,
- int32_t& rounded_int, double fs) {
+void Simulator::round_according_to_fcsr(double toRound, double* rounded,
+ int32_t* rounded_int, double fs) {
// 0 RN (round to nearest): Round a result to the nearest
// representable value; if the result is exactly halfway between
// two representable values, round to zero. Behave like round_w_d.
@@ -1373,32 +1373,32 @@ void Simulator::round_according_to_fcsr(double toRound, double& rounded,
// the next representable value down. Behave like floor_w_d.
switch (get_fcsr_rounding_mode()) {
case kRoundToNearest:
- rounded = std::floor(fs + 0.5);
- rounded_int = static_cast<int32_t>(rounded);
- if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ *rounded = std::floor(fs + 0.5);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - fs == 0.5) {
// If the number is halfway between two integers,
// round to the even one.
- rounded_int--;
- rounded -= 1.;
+ *rounded_int -= 1;
+ *rounded -= 1.;
}
break;
case kRoundToZero:
- rounded = trunc(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = trunc(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
case kRoundToPlusInf:
- rounded = std::ceil(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = std::ceil(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
case kRoundToMinusInf:
- rounded = std::floor(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = std::floor(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
}
}
-void Simulator::round_according_to_fcsr(float toRound, float& rounded,
- int32_t& rounded_int, float fs) {
+void Simulator::round_according_to_fcsr(float toRound, float* rounded,
+ int32_t* rounded_int, float fs) {
// 0 RN (round to nearest): Round a result to the nearest
// representable value; if the result is exactly halfway between
// two representable values, round to zero. Behave like round_w_d.
@@ -1414,33 +1414,33 @@ void Simulator::round_according_to_fcsr(float toRound, float& rounded,
// the next representable value down. Behave like floor_w_d.
switch (get_fcsr_rounding_mode()) {
case kRoundToNearest:
- rounded = std::floor(fs + 0.5);
- rounded_int = static_cast<int32_t>(rounded);
- if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ *rounded = std::floor(fs + 0.5);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - fs == 0.5) {
// If the number is halfway between two integers,
// round to the even one.
- rounded_int--;
- rounded -= 1.f;
+ *rounded_int -= 1;
+ *rounded -= 1.f;
}
break;
case kRoundToZero:
- rounded = trunc(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = trunc(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
case kRoundToPlusInf:
- rounded = std::ceil(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = std::ceil(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
case kRoundToMinusInf:
- rounded = std::floor(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = std::floor(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
}
}
template <typename T_fp, typename T_int>
-void Simulator::round_according_to_msacsr(T_fp toRound, T_fp& rounded,
- T_int& rounded_int) {
+void Simulator::round_according_to_msacsr(T_fp toRound, T_fp* rounded,
+ T_int* rounded_int) {
// 0 RN (round to nearest): Round a result to the nearest
// representable value; if the result is exactly halfway between
// two representable values, round to zero. Behave like round_w_d.
@@ -1456,32 +1456,32 @@ void Simulator::round_according_to_msacsr(T_fp toRound, T_fp& rounded,
// the next representable value down. Behave like floor_w_d.
switch (get_msacsr_rounding_mode()) {
case kRoundToNearest:
- rounded = std::floor(toRound + 0.5);
- rounded_int = static_cast<T_int>(rounded);
- if ((rounded_int & 1) != 0 && rounded_int - toRound == 0.5) {
+ *rounded = std::floor(toRound + 0.5);
+ *rounded_int = static_cast<T_int>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) {
// If the number is halfway between two integers,
// round to the even one.
- rounded_int--;
- rounded -= 1;
+ *rounded_int -= 1;
+ *rounded -= 1;
}
break;
case kRoundToZero:
- rounded = trunc(toRound);
- rounded_int = static_cast<T_int>(rounded);
+ *rounded = trunc(toRound);
+ *rounded_int = static_cast<T_int>(*rounded);
break;
case kRoundToPlusInf:
- rounded = std::ceil(toRound);
- rounded_int = static_cast<T_int>(rounded);
+ *rounded = std::ceil(toRound);
+ *rounded_int = static_cast<T_int>(*rounded);
break;
case kRoundToMinusInf:
- rounded = std::floor(toRound);
- rounded_int = static_cast<T_int>(rounded);
+ *rounded = std::floor(toRound);
+ *rounded_int = static_cast<T_int>(*rounded);
break;
}
}
-void Simulator::round64_according_to_fcsr(double toRound, double& rounded,
- int64_t& rounded_int, double fs) {
+void Simulator::round64_according_to_fcsr(double toRound, double* rounded,
+ int64_t* rounded_int, double fs) {
// 0 RN (round to nearest): Round a result to the nearest
// representable value; if the result is exactly halfway between
// two representable values, round to zero. Behave like round_w_d.
@@ -1497,32 +1497,32 @@ void Simulator::round64_according_to_fcsr(double toRound, double& rounded,
// the next representable value down. Behave like floor_w_d.
switch (FCSR_ & 3) {
case kRoundToNearest:
- rounded = std::floor(fs + 0.5);
- rounded_int = static_cast<int64_t>(rounded);
- if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ *rounded = std::floor(fs + 0.5);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - fs == 0.5) {
// If the number is halfway between two integers,
// round to the even one.
- rounded_int--;
- rounded -= 1.;
+ *rounded_int -= 1;
+ *rounded -= 1.;
}
break;
case kRoundToZero:
- rounded = trunc(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = trunc(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
case kRoundToPlusInf:
- rounded = std::ceil(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = std::ceil(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
case kRoundToMinusInf:
- rounded = std::floor(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = std::floor(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
}
}
-void Simulator::round64_according_to_fcsr(float toRound, float& rounded,
- int64_t& rounded_int, float fs) {
+void Simulator::round64_according_to_fcsr(float toRound, float* rounded,
+ int64_t* rounded_int, float fs) {
// 0 RN (round to nearest): Round a result to the nearest
// representable value; if the result is exactly halfway between
// two representable values, round to zero. Behave like round_w_d.
@@ -1538,26 +1538,26 @@ void Simulator::round64_according_to_fcsr(float toRound, float& rounded,
// the next representable value down. Behave like floor_w_d.
switch (FCSR_ & 3) {
case kRoundToNearest:
- rounded = std::floor(fs + 0.5);
- rounded_int = static_cast<int64_t>(rounded);
- if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ *rounded = std::floor(fs + 0.5);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - fs == 0.5) {
// If the number is halfway between two integers,
// round to the even one.
- rounded_int--;
- rounded -= 1.f;
+ *rounded_int -= 1;
+ *rounded -= 1.f;
}
break;
case kRoundToZero:
- rounded = trunc(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = trunc(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
case kRoundToPlusInf:
- rounded = std::ceil(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = std::ceil(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
case kRoundToMinusInf:
- rounded = std::floor(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = std::floor(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
}
}
@@ -2512,18 +2512,18 @@ float FPAbs<float>(float a) {
}
template <typename T>
-static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T& result) {
+static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T* result) {
if (std::isnan(a) && std::isnan(b)) {
- result = a;
+ *result = a;
} else if (std::isnan(a)) {
- result = b;
+ *result = b;
} else if (std::isnan(b)) {
- result = a;
+ *result = a;
} else if (b == a) {
// Handle -0.0 == 0.0 case.
// std::signbit() returns int 0 or 1 so subtracting MaxMinKind::kMax
// negates the result.
- result = std::signbit(b) - static_cast<int>(kind) ? b : a;
+ *result = std::signbit(b) - static_cast<int>(kind) ? b : a;
} else {
return false;
}
@@ -2533,7 +2533,7 @@ static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T& result) {
template <typename T>
static T FPUMin(T a, T b) {
T result;
- if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) {
return result;
} else {
return b < a ? b : a;
@@ -2543,7 +2543,7 @@ static T FPUMin(T a, T b) {
template <typename T>
static T FPUMax(T a, T b) {
T result;
- if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMax, result)) {
+ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMax, &result)) {
return result;
} else {
return b > a ? b : a;
@@ -2553,7 +2553,7 @@ static T FPUMax(T a, T b) {
template <typename T>
static T FPUMinA(T a, T b) {
T result;
- if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) {
if (FPAbs(a) < FPAbs(b)) {
result = a;
} else if (FPAbs(b) < FPAbs(a)) {
@@ -2568,7 +2568,7 @@ static T FPUMinA(T a, T b) {
template <typename T>
static T FPUMaxA(T a, T b) {
T result;
- if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) {
if (FPAbs(a) > FPAbs(b)) {
result = a;
} else if (FPAbs(b) > FPAbs(a)) {
@@ -2822,7 +2822,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
case CVT_W_D: { // Convert double to word.
double rounded;
int32_t result;
- round_according_to_fcsr(fs, rounded, result, fs);
+ round_according_to_fcsr(fs, &rounded, &result, fs);
SetFPUWordResult(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register_word_invalid_result(fs, rounded);
@@ -2876,7 +2876,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
if (IsFp64Mode()) {
int64_t result;
double rounded;
- round64_according_to_fcsr(fs, rounded, result, fs);
+ round64_according_to_fcsr(fs, &rounded, &result, fs);
SetFPUResult(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
set_fpu_register_invalid_result64(fs, rounded);
@@ -3489,7 +3489,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
if (IsFp64Mode()) {
int64_t result;
float rounded;
- round64_according_to_fcsr(fs, rounded, result, fs);
+ round64_according_to_fcsr(fs, &rounded, &result, fs);
SetFPUResult(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
set_fpu_register_invalid_result64(fs, rounded);
@@ -3502,7 +3502,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
case CVT_W_S: {
float rounded;
int32_t result;
- round_according_to_fcsr(fs, rounded, result, fs);
+ round_according_to_fcsr(fs, &rounded, &result, fs);
SetFPUWordResult(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register_word_invalid_result(fs, rounded);
@@ -5271,128 +5271,128 @@ void Simulator::DecodeTypeMsa3R() {
}
template <typename T_int, typename T_fp, typename T_reg>
-void Msa3RFInstrHelper(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
+void Msa3RFInstrHelper(uint32_t opcode, T_reg ws, T_reg wt, T_reg* wd) {
const T_int all_ones = static_cast<T_int>(-1);
const T_fp s_element = *reinterpret_cast<T_fp*>(&ws);
const T_fp t_element = *reinterpret_cast<T_fp*>(&wt);
switch (opcode) {
case FCUN: {
if (std::isnan(s_element) || std::isnan(t_element)) {
- wd = all_ones;
+ *wd = all_ones;
} else {
- wd = 0;
+ *wd = 0;
}
} break;
case FCEQ: {
if (s_element != t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = 0;
+ *wd = 0;
} else {
- wd = all_ones;
+ *wd = all_ones;
}
} break;
case FCUEQ: {
if (s_element == t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = all_ones;
+ *wd = all_ones;
} else {
- wd = 0;
+ *wd = 0;
}
} break;
case FCLT: {
if (s_element >= t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = 0;
+ *wd = 0;
} else {
- wd = all_ones;
+ *wd = all_ones;
}
} break;
case FCULT: {
if (s_element < t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = all_ones;
+ *wd = all_ones;
} else {
- wd = 0;
+ *wd = 0;
}
} break;
case FCLE: {
if (s_element > t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = 0;
+ *wd = 0;
} else {
- wd = all_ones;
+ *wd = all_ones;
}
} break;
case FCULE: {
if (s_element <= t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = all_ones;
+ *wd = all_ones;
} else {
- wd = 0;
+ *wd = 0;
}
} break;
case FCOR: {
if (std::isnan(s_element) || std::isnan(t_element)) {
- wd = 0;
+ *wd = 0;
} else {
- wd = all_ones;
+ *wd = all_ones;
}
} break;
case FCUNE: {
if (s_element != t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = all_ones;
+ *wd = all_ones;
} else {
- wd = 0;
+ *wd = 0;
}
} break;
case FCNE: {
if (s_element == t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = 0;
+ *wd = 0;
} else {
- wd = all_ones;
+ *wd = all_ones;
}
} break;
case FADD:
- wd = bit_cast<T_int>(s_element + t_element);
+ *wd = bit_cast<T_int>(s_element + t_element);
break;
case FSUB:
- wd = bit_cast<T_int>(s_element - t_element);
+ *wd = bit_cast<T_int>(s_element - t_element);
break;
case FMUL:
- wd = bit_cast<T_int>(s_element * t_element);
+ *wd = bit_cast<T_int>(s_element * t_element);
break;
case FDIV: {
if (t_element == 0) {
- wd = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *wd = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
- wd = bit_cast<T_int>(s_element / t_element);
+ *wd = bit_cast<T_int>(s_element / t_element);
}
} break;
case FMADD:
- wd = bit_cast<T_int>(
- std::fma(s_element, t_element, *reinterpret_cast<T_fp*>(&wd)));
+ *wd = bit_cast<T_int>(
+ std::fma(s_element, t_element, *reinterpret_cast<T_fp*>(wd)));
break;
case FMSUB:
- wd = bit_cast<T_int>(
- std::fma(s_element, -t_element, *reinterpret_cast<T_fp*>(&wd)));
+ *wd = bit_cast<T_int>(
+ std::fma(s_element, -t_element, *reinterpret_cast<T_fp*>(wd)));
break;
case FEXP2:
- wd = bit_cast<T_int>(std::ldexp(s_element, static_cast<int>(wt)));
+ *wd = bit_cast<T_int>(std::ldexp(s_element, static_cast<int>(wt)));
break;
case FMIN:
- wd = bit_cast<T_int>(std::min(s_element, t_element));
+ *wd = bit_cast<T_int>(std::min(s_element, t_element));
break;
case FMAX:
- wd = bit_cast<T_int>(std::max(s_element, t_element));
+ *wd = bit_cast<T_int>(std::max(s_element, t_element));
break;
case FMIN_A: {
- wd = bit_cast<T_int>(
+ *wd = bit_cast<T_int>(
std::fabs(s_element) < std::fabs(t_element) ? s_element : t_element);
} break;
case FMAX_A: {
- wd = bit_cast<T_int>(
+ *wd = bit_cast<T_int>(
std::fabs(s_element) > std::fabs(t_element) ? s_element : t_element);
} break;
case FSOR:
@@ -5414,7 +5414,7 @@ void Msa3RFInstrHelper(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
}
template <typename T_int, typename T_int_dbl, typename T_reg>
-void Msa3RFInstrHelper2(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
+void Msa3RFInstrHelper2(uint32_t opcode, T_reg ws, T_reg wt, T_reg* wd) {
// using T_uint = typename std::make_unsigned<T_int>::type;
using T_uint_dbl = typename std::make_unsigned<T_int_dbl>::type;
const T_int max_int = std::numeric_limits<T_int>::max();
@@ -5432,16 +5432,16 @@ void Msa3RFInstrHelper2(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
if (product == min_fix_dbl) {
product = max_fix_dbl;
}
- wd = static_cast<T_int>(product >> shift);
+ *wd = static_cast<T_int>(product >> shift);
} break;
case MADD_Q: {
- result = (product + (static_cast<T_int_dbl>(wd) << shift)) >> shift;
- wd = static_cast<T_int>(
+ result = (product + (static_cast<T_int_dbl>(*wd) << shift)) >> shift;
+ *wd = static_cast<T_int>(
result > max_int ? max_int : result < min_int ? min_int : result);
} break;
case MSUB_Q: {
- result = (-product + (static_cast<T_int_dbl>(wd) << shift)) >> shift;
- wd = static_cast<T_int>(
+ result = (-product + (static_cast<T_int_dbl>(*wd) << shift)) >> shift;
+ *wd = static_cast<T_int>(
result > max_int ? max_int : result < min_int ? min_int : result);
} break;
case MULR_Q: {
@@ -5449,23 +5449,23 @@ void Msa3RFInstrHelper2(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
bit_cast<T_uint_dbl>(std::numeric_limits<T_int_dbl>::min()) >> 1U;
const T_int_dbl max_fix_dbl = std::numeric_limits<T_int_dbl>::max() >> 1U;
if (product == min_fix_dbl) {
- wd = static_cast<T_int>(max_fix_dbl >> shift);
+ *wd = static_cast<T_int>(max_fix_dbl >> shift);
break;
}
- wd = static_cast<T_int>((product + (1 << (shift - 1))) >> shift);
+ *wd = static_cast<T_int>((product + (1 << (shift - 1))) >> shift);
} break;
case MADDR_Q: {
- result = (product + (static_cast<T_int_dbl>(wd) << shift) +
+ result = (product + (static_cast<T_int_dbl>(*wd) << shift) +
(1 << (shift - 1))) >>
shift;
- wd = static_cast<T_int>(
+ *wd = static_cast<T_int>(
result > max_int ? max_int : result < min_int ? min_int : result);
} break;
case MSUBR_Q: {
- result = (-product + (static_cast<T_int_dbl>(wd) << shift) +
+ result = (-product + (static_cast<T_int_dbl>(*wd) << shift) +
(1 << (shift - 1))) >>
shift;
- wd = static_cast<T_int>(
+ *wd = static_cast<T_int>(
result > max_int ? max_int : result < min_int ? min_int : result);
} break;
default:
@@ -5588,19 +5588,19 @@ void Simulator::DecodeTypeMsa3RF() {
#undef PACK_FLOAT16
#undef FEXDO_DF
case FTQ:
-#define FTQ_DF(source, dst, fp_type, int_type) \
- element = bit_cast<fp_type>(source) * \
- (1U << (sizeof(int_type) * kBitsPerByte - 1)); \
- if (element > std::numeric_limits<int_type>::max()) { \
- dst = std::numeric_limits<int_type>::max(); \
- } else if (element < std::numeric_limits<int_type>::min()) { \
- dst = std::numeric_limits<int_type>::min(); \
- } else if (std::isnan(element)) { \
- dst = 0; \
- } else { \
- int_type fixed_point; \
- round_according_to_msacsr(element, element, fixed_point); \
- dst = fixed_point; \
+#define FTQ_DF(source, dst, fp_type, int_type) \
+ element = bit_cast<fp_type>(source) * \
+ (1U << (sizeof(int_type) * kBitsPerByte - 1)); \
+ if (element > std::numeric_limits<int_type>::max()) { \
+ dst = std::numeric_limits<int_type>::max(); \
+ } else if (element < std::numeric_limits<int_type>::min()) { \
+ dst = std::numeric_limits<int_type>::min(); \
+ } else if (std::isnan(element)) { \
+ dst = 0; \
+ } else { \
+ int_type fixed_point; \
+ round_according_to_msacsr(element, &element, &fixed_point); \
+ dst = fixed_point; \
}
switch (DecodeMsaDataFormat()) {
@@ -5623,13 +5623,13 @@ void Simulator::DecodeTypeMsa3RF() {
}
break;
#undef FTQ_DF
-#define MSA_3RF_DF(T1, T2, Lanes, ws, wt, wd) \
- for (int i = 0; i < Lanes; i++) { \
- Msa3RFInstrHelper<T1, T2>(opcode, ws, wt, wd); \
+#define MSA_3RF_DF(T1, T2, Lanes, ws, wt, wd) \
+ for (int i = 0; i < Lanes; i++) { \
+ Msa3RFInstrHelper<T1, T2>(opcode, ws, wt, &(wd)); \
}
-#define MSA_3RF_DF2(T1, T2, Lanes, ws, wt, wd) \
- for (int i = 0; i < Lanes; i++) { \
- Msa3RFInstrHelper2<T1, T2>(opcode, ws, wt, wd); \
+#define MSA_3RF_DF2(T1, T2, Lanes, ws, wt, wd) \
+ for (int i = 0; i < Lanes; i++) { \
+ Msa3RFInstrHelper2<T1, T2>(opcode, ws, wt, &(wd)); \
}
case MADD_Q:
case MSUB_Q:
@@ -5859,7 +5859,7 @@ static inline bool isSnan(double fp) { return !QUIET_BIT_D(fp); }
#undef QUIET_BIT_D
template <typename T_int, typename T_fp, typename T_src, typename T_dst>
-T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
+T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst* dst,
Simulator* sim) {
using T_uint = typename std::make_unsigned<T_int>::type;
switch (opcode) {
@@ -5878,37 +5878,37 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
switch (std::fpclassify(element)) {
case FP_INFINITE:
if (std::signbit(element)) {
- dst = NEG_INFINITY_BIT;
+ *dst = NEG_INFINITY_BIT;
} else {
- dst = POS_INFINITY_BIT;
+ *dst = POS_INFINITY_BIT;
}
break;
case FP_NAN:
if (isSnan(element)) {
- dst = SNAN_BIT;
+ *dst = SNAN_BIT;
} else {
- dst = QNAN_BIT;
+ *dst = QNAN_BIT;
}
break;
case FP_NORMAL:
if (std::signbit(element)) {
- dst = NEG_NORMAL_BIT;
+ *dst = NEG_NORMAL_BIT;
} else {
- dst = POS_NORMAL_BIT;
+ *dst = POS_NORMAL_BIT;
}
break;
case FP_SUBNORMAL:
if (std::signbit(element)) {
- dst = NEG_SUBNORMAL_BIT;
+ *dst = NEG_SUBNORMAL_BIT;
} else {
- dst = POS_SUBNORMAL_BIT;
+ *dst = POS_SUBNORMAL_BIT;
}
break;
case FP_ZERO:
if (std::signbit(element)) {
- dst = NEG_ZERO_BIT;
+ *dst = NEG_ZERO_BIT;
} else {
- dst = POS_ZERO_BIT;
+ *dst = POS_ZERO_BIT;
}
break;
default:
@@ -5932,11 +5932,11 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
const T_int max_int = std::numeric_limits<T_int>::max();
const T_int min_int = std::numeric_limits<T_int>::min();
if (std::isnan(element)) {
- dst = 0;
+ *dst = 0;
} else if (element >= max_int || element <= min_int) {
- dst = element >= max_int ? max_int : min_int;
+ *dst = element >= max_int ? max_int : min_int;
} else {
- dst = static_cast<T_int>(std::trunc(element));
+ *dst = static_cast<T_int>(std::trunc(element));
}
break;
}
@@ -5944,49 +5944,49 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
T_fp element = bit_cast<T_fp>(src);
const T_uint max_int = std::numeric_limits<T_uint>::max();
if (std::isnan(element)) {
- dst = 0;
+ *dst = 0;
} else if (element >= max_int || element <= 0) {
- dst = element >= max_int ? max_int : 0;
+ *dst = element >= max_int ? max_int : 0;
} else {
- dst = static_cast<T_uint>(std::trunc(element));
+ *dst = static_cast<T_uint>(std::trunc(element));
}
break;
}
case FSQRT: {
T_fp element = bit_cast<T_fp>(src);
if (element < 0 || std::isnan(element)) {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
- dst = bit_cast<T_int>(std::sqrt(element));
+ *dst = bit_cast<T_int>(std::sqrt(element));
}
break;
}
case FRSQRT: {
T_fp element = bit_cast<T_fp>(src);
if (element < 0 || std::isnan(element)) {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
- dst = bit_cast<T_int>(1 / std::sqrt(element));
+ *dst = bit_cast<T_int>(1 / std::sqrt(element));
}
break;
}
case FRCP: {
T_fp element = bit_cast<T_fp>(src);
if (std::isnan(element)) {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
- dst = bit_cast<T_int>(1 / element);
+ *dst = bit_cast<T_int>(1 / element);
}
break;
}
case FRINT: {
T_fp element = bit_cast<T_fp>(src);
if (std::isnan(element)) {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
T_int dummy;
- sim->round_according_to_msacsr<T_fp, T_int>(element, element, dummy);
- dst = bit_cast<T_int>(element);
+ sim->round_according_to_msacsr<T_fp, T_int>(element, &element, &dummy);
+ *dst = bit_cast<T_int>(element);
}
break;
}
@@ -5995,19 +5995,19 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
switch (std::fpclassify(element)) {
case FP_NORMAL:
case FP_SUBNORMAL:
- dst = bit_cast<T_int>(std::logb(element));
+ *dst = bit_cast<T_int>(std::logb(element));
break;
case FP_ZERO:
- dst = bit_cast<T_int>(-std::numeric_limits<T_fp>::infinity());
+ *dst = bit_cast<T_int>(-std::numeric_limits<T_fp>::infinity());
break;
case FP_NAN:
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
break;
case FP_INFINITE:
if (element < 0) {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::infinity());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::infinity());
}
break;
default:
@@ -6020,11 +6020,11 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
const T_int max_int = std::numeric_limits<T_int>::max();
const T_int min_int = std::numeric_limits<T_int>::min();
if (std::isnan(element)) {
- dst = 0;
+ *dst = 0;
} else if (element < min_int || element > max_int) {
- dst = element > max_int ? max_int : min_int;
+ *dst = element > max_int ? max_int : min_int;
} else {
- sim->round_according_to_msacsr<T_fp, T_int>(element, element, dst);
+ sim->round_according_to_msacsr<T_fp, T_int>(element, &element, dst);
}
break;
}
@@ -6032,22 +6032,22 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
T_fp element = bit_cast<T_fp>(src);
const T_uint max_uint = std::numeric_limits<T_uint>::max();
if (std::isnan(element)) {
- dst = 0;
+ *dst = 0;
} else if (element < 0 || element > max_uint) {
- dst = element > max_uint ? max_uint : 0;
+ *dst = element > max_uint ? max_uint : 0;
} else {
T_uint res;
- sim->round_according_to_msacsr<T_fp, T_uint>(element, element, res);
- dst = *reinterpret_cast<T_int*>(&res);
+ sim->round_according_to_msacsr<T_fp, T_uint>(element, &element, &res);
+ *dst = *reinterpret_cast<T_int*>(&res);
}
break;
}
case FFINT_S:
- dst = bit_cast<T_int>(static_cast<T_fp>(src));
+ *dst = bit_cast<T_int>(static_cast<T_fp>(src));
break;
case FFINT_U:
using uT_src = typename std::make_unsigned<T_src>::type;
- dst = bit_cast<T_int>(static_cast<T_fp>(bit_cast<uT_src>(src)));
+ *dst = bit_cast<T_int>(static_cast<T_fp>(bit_cast<uT_src>(src)));
break;
default:
UNREACHABLE();
@@ -6157,12 +6157,12 @@ void Simulator::DecodeTypeMsa2RF() {
switch (DecodeMsaDataFormat()) {
case MSA_WORD:
for (int i = 0; i < kMSALanesWord; i++) {
- Msa2RFInstrHelper<int32_t, float>(opcode, ws.w[i], wd.w[i], this);
+ Msa2RFInstrHelper<int32_t, float>(opcode, ws.w[i], &wd.w[i], this);
}
break;
case MSA_DWORD:
for (int i = 0; i < kMSALanesDword; i++) {
- Msa2RFInstrHelper<int64_t, double>(opcode, ws.d[i], wd.d[i], this);
+ Msa2RFInstrHelper<int64_t, double>(opcode, ws.d[i], &wd.d[i], this);
}
break;
default:
diff --git a/deps/v8/src/execution/mips/simulator-mips.h b/deps/v8/src/execution/mips/simulator-mips.h
index b5712d1a82..28e38fd0a5 100644
--- a/deps/v8/src/execution/mips/simulator-mips.h
+++ b/deps/v8/src/execution/mips/simulator-mips.h
@@ -258,16 +258,16 @@ class Simulator : public SimulatorBase {
bool set_fcsr_round_error(float original, float rounded);
bool set_fcsr_round64_error(double original, double rounded);
bool set_fcsr_round64_error(float original, float rounded);
- void round_according_to_fcsr(double toRound, double& rounded,
- int32_t& rounded_int, double fs);
- void round_according_to_fcsr(float toRound, float& rounded,
- int32_t& rounded_int, float fs);
+ void round_according_to_fcsr(double toRound, double* rounded,
+ int32_t* rounded_int, double fs);
+ void round_according_to_fcsr(float toRound, float* rounded,
+ int32_t* rounded_int, float fs);
template <typename Tfp, typename Tint>
- void round_according_to_msacsr(Tfp toRound, Tfp& rounded, Tint& rounded_int);
- void round64_according_to_fcsr(double toRound, double& rounded,
- int64_t& rounded_int, double fs);
- void round64_according_to_fcsr(float toRound, float& rounded,
- int64_t& rounded_int, float fs);
+ void round_according_to_msacsr(Tfp toRound, Tfp* rounded, Tint* rounded_int);
+ void round64_according_to_fcsr(double toRound, double* rounded,
+ int64_t* rounded_int, double fs);
+ void round64_according_to_fcsr(float toRound, float* rounded,
+ int64_t* rounded_int, float fs);
// Special case of set_register and get_register to access the raw PC value.
void set_pc(int32_t value);
int32_t get_pc() const;
diff --git a/deps/v8/src/execution/mips64/frame-constants-mips64.cc b/deps/v8/src/execution/mips64/frame-constants-mips64.cc
index 68398605ba..97ef183592 100644
--- a/deps/v8/src/execution/mips64/frame-constants-mips64.cc
+++ b/deps/v8/src/execution/mips64/frame-constants-mips64.cc
@@ -4,10 +4,9 @@
#if V8_TARGET_ARCH_MIPS64
-#include "src/codegen/assembler.h"
#include "src/codegen/mips64/assembler-mips64-inl.h"
-#include "src/codegen/mips64/assembler-mips64.h"
#include "src/execution/frame-constants.h"
+#include "src/execution/frames.h"
#include "src/execution/mips64/frame-constants-mips64.h"
diff --git a/deps/v8/src/execution/mips64/simulator-mips64.cc b/deps/v8/src/execution/mips64/simulator-mips64.cc
index 7c45e7f82d..3fbf1961a8 100644
--- a/deps/v8/src/execution/mips64/simulator-mips64.cc
+++ b/deps/v8/src/execution/mips64/simulator-mips64.cc
@@ -1285,8 +1285,8 @@ bool Simulator::set_fcsr_round64_error(float original, float rounded) {
}
// For cvt instructions only
-void Simulator::round_according_to_fcsr(double toRound, double& rounded,
- int32_t& rounded_int, double fs) {
+void Simulator::round_according_to_fcsr(double toRound, double* rounded,
+ int32_t* rounded_int, double fs) {
// 0 RN (round to nearest): Round a result to the nearest
// representable value; if the result is exactly halfway between
// two representable values, round to zero. Behave like round_w_d.
@@ -1302,32 +1302,32 @@ void Simulator::round_according_to_fcsr(double toRound, double& rounded,
// the next representable value down. Behave like floor_w_d.
switch (FCSR_ & 3) {
case kRoundToNearest:
- rounded = std::floor(fs + 0.5);
- rounded_int = static_cast<int32_t>(rounded);
- if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ *rounded = std::floor(fs + 0.5);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - fs == 0.5) {
// If the number is halfway between two integers,
// round to the even one.
- rounded_int--;
- rounded -= 1.;
+ *rounded_int -= 1;
+ *rounded -= 1.;
}
break;
case kRoundToZero:
- rounded = trunc(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = trunc(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
case kRoundToPlusInf:
- rounded = std::ceil(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = std::ceil(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
case kRoundToMinusInf:
- rounded = std::floor(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = std::floor(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
}
}
-void Simulator::round64_according_to_fcsr(double toRound, double& rounded,
- int64_t& rounded_int, double fs) {
+void Simulator::round64_according_to_fcsr(double toRound, double* rounded,
+ int64_t* rounded_int, double fs) {
// 0 RN (round to nearest): Round a result to the nearest
// representable value; if the result is exactly halfway between
// two representable values, round to zero. Behave like round_w_d.
@@ -1343,33 +1343,33 @@ void Simulator::round64_according_to_fcsr(double toRound, double& rounded,
// the next representable value down. Behave like floor_w_d.
switch (FCSR_ & 3) {
case kRoundToNearest:
- rounded = std::floor(fs + 0.5);
- rounded_int = static_cast<int64_t>(rounded);
- if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ *rounded = std::floor(fs + 0.5);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - fs == 0.5) {
// If the number is halfway between two integers,
// round to the even one.
- rounded_int--;
- rounded -= 1.;
+ *rounded_int -= 1;
+ *rounded -= 1.;
}
break;
case kRoundToZero:
- rounded = trunc(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = trunc(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
case kRoundToPlusInf:
- rounded = std::ceil(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = std::ceil(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
case kRoundToMinusInf:
- rounded = std::floor(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = std::floor(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
}
}
// for cvt instructions only
-void Simulator::round_according_to_fcsr(float toRound, float& rounded,
- int32_t& rounded_int, float fs) {
+void Simulator::round_according_to_fcsr(float toRound, float* rounded,
+ int32_t* rounded_int, float fs) {
// 0 RN (round to nearest): Round a result to the nearest
// representable value; if the result is exactly halfway between
// two representable values, round to zero. Behave like round_w_d.
@@ -1385,32 +1385,32 @@ void Simulator::round_according_to_fcsr(float toRound, float& rounded,
// the next representable value down. Behave like floor_w_d.
switch (FCSR_ & 3) {
case kRoundToNearest:
- rounded = std::floor(fs + 0.5);
- rounded_int = static_cast<int32_t>(rounded);
- if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ *rounded = std::floor(fs + 0.5);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - fs == 0.5) {
// If the number is halfway between two integers,
// round to the even one.
- rounded_int--;
- rounded -= 1.f;
+ *rounded_int -= 1;
+ *rounded -= 1.f;
}
break;
case kRoundToZero:
- rounded = trunc(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = trunc(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
case kRoundToPlusInf:
- rounded = std::ceil(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = std::ceil(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
case kRoundToMinusInf:
- rounded = std::floor(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = std::floor(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
}
}
-void Simulator::round64_according_to_fcsr(float toRound, float& rounded,
- int64_t& rounded_int, float fs) {
+void Simulator::round64_according_to_fcsr(float toRound, float* rounded,
+ int64_t* rounded_int, float fs) {
// 0 RN (round to nearest): Round a result to the nearest
// representable value; if the result is exactly halfway between
// two representable values, round to zero. Behave like round_w_d.
@@ -1426,33 +1426,33 @@ void Simulator::round64_according_to_fcsr(float toRound, float& rounded,
// the next representable value down. Behave like floor_w_d.
switch (FCSR_ & 3) {
case kRoundToNearest:
- rounded = std::floor(fs + 0.5);
- rounded_int = static_cast<int64_t>(rounded);
- if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ *rounded = std::floor(fs + 0.5);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - fs == 0.5) {
// If the number is halfway between two integers,
// round to the even one.
- rounded_int--;
- rounded -= 1.f;
+ *rounded_int -= 1;
+ *rounded -= 1.f;
}
break;
case kRoundToZero:
- rounded = trunc(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = trunc(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
case kRoundToPlusInf:
- rounded = std::ceil(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = std::ceil(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
case kRoundToMinusInf:
- rounded = std::floor(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = std::floor(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
}
}
template <typename T_fp, typename T_int>
-void Simulator::round_according_to_msacsr(T_fp toRound, T_fp& rounded,
- T_int& rounded_int) {
+void Simulator::round_according_to_msacsr(T_fp toRound, T_fp* rounded,
+ T_int* rounded_int) {
// 0 RN (round to nearest): Round a result to the nearest
// representable value; if the result is exactly halfway between
// two representable values, round to zero. Behave like round_w_d.
@@ -1468,26 +1468,26 @@ void Simulator::round_according_to_msacsr(T_fp toRound, T_fp& rounded,
// the next representable value down. Behave like floor_w_d.
switch (get_msacsr_rounding_mode()) {
case kRoundToNearest:
- rounded = std::floor(toRound + 0.5);
- rounded_int = static_cast<T_int>(rounded);
- if ((rounded_int & 1) != 0 && rounded_int - toRound == 0.5) {
+ *rounded = std::floor(toRound + 0.5);
+ *rounded_int = static_cast<T_int>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) {
// If the number is halfway between two integers,
// round to the even one.
- rounded_int--;
- rounded -= 1.;
+ *rounded_int -= 1;
+ *rounded -= 1.;
}
break;
case kRoundToZero:
- rounded = trunc(toRound);
- rounded_int = static_cast<T_int>(rounded);
+ *rounded = trunc(toRound);
+ *rounded_int = static_cast<T_int>(*rounded);
break;
case kRoundToPlusInf:
- rounded = std::ceil(toRound);
- rounded_int = static_cast<T_int>(rounded);
+ *rounded = std::ceil(toRound);
+ *rounded_int = static_cast<T_int>(*rounded);
break;
case kRoundToMinusInf:
- rounded = std::floor(toRound);
- rounded_int = static_cast<T_int>(rounded);
+ *rounded = std::floor(toRound);
+ *rounded_int = static_cast<T_int>(*rounded);
break;
}
}
@@ -2507,18 +2507,18 @@ float FPAbs<float>(float a) {
}
template <typename T>
-static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T& result) {
+static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T* result) {
if (std::isnan(a) && std::isnan(b)) {
- result = a;
+ *result = a;
} else if (std::isnan(a)) {
- result = b;
+ *result = b;
} else if (std::isnan(b)) {
- result = a;
+ *result = a;
} else if (b == a) {
// Handle -0.0 == 0.0 case.
// std::signbit() returns int 0 or 1 so subtracting MaxMinKind::kMax
// negates the result.
- result = std::signbit(b) - static_cast<int>(kind) ? b : a;
+ *result = std::signbit(b) - static_cast<int>(kind) ? b : a;
} else {
return false;
}
@@ -2528,7 +2528,7 @@ static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T& result) {
template <typename T>
static T FPUMin(T a, T b) {
T result;
- if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) {
return result;
} else {
return b < a ? b : a;
@@ -2538,7 +2538,7 @@ static T FPUMin(T a, T b) {
template <typename T>
static T FPUMax(T a, T b) {
T result;
- if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMax, result)) {
+ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMax, &result)) {
return result;
} else {
return b > a ? b : a;
@@ -2548,7 +2548,7 @@ static T FPUMax(T a, T b) {
template <typename T>
static T FPUMinA(T a, T b) {
T result;
- if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) {
if (FPAbs(a) < FPAbs(b)) {
result = a;
} else if (FPAbs(b) < FPAbs(a)) {
@@ -2563,7 +2563,7 @@ static T FPUMinA(T a, T b) {
template <typename T>
static T FPUMaxA(T a, T b) {
T result;
- if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) {
if (FPAbs(a) > FPAbs(b)) {
result = a;
} else if (FPAbs(b) > FPAbs(a)) {
@@ -2829,7 +2829,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
case CVT_L_S: {
float rounded;
int64_t result;
- round64_according_to_fcsr(fs, rounded, result, fs);
+ round64_according_to_fcsr(fs, &rounded, &result, fs);
SetFPUResult(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
set_fpu_register_invalid_result64(fs, rounded);
@@ -2839,7 +2839,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
case CVT_W_S: {
float rounded;
int32_t result;
- round_according_to_fcsr(fs, rounded, result, fs);
+ round_according_to_fcsr(fs, &rounded, &result, fs);
SetFPUWordResult(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register_word_invalid_result(fs, rounded);
@@ -3189,7 +3189,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
case CVT_W_D: { // Convert double to word.
double rounded;
int32_t result;
- round_according_to_fcsr(fs, rounded, result, fs);
+ round_according_to_fcsr(fs, &rounded, &result, fs);
SetFPUWordResult(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register_word_invalid_result(fs, rounded);
@@ -3243,7 +3243,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
case CVT_L_D: { // Mips64r2: Truncate double to 64-bit long-word.
double rounded;
int64_t result;
- round64_according_to_fcsr(fs, rounded, result, fs);
+ round64_according_to_fcsr(fs, &rounded, &result, fs);
SetFPUResult(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
set_fpu_register_invalid_result64(fs, rounded);
@@ -5544,128 +5544,128 @@ void Simulator::DecodeTypeMsa3R() {
}
template <typename T_int, typename T_fp, typename T_reg>
-void Msa3RFInstrHelper(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
+void Msa3RFInstrHelper(uint32_t opcode, T_reg ws, T_reg wt, T_reg* wd) {
const T_int all_ones = static_cast<T_int>(-1);
const T_fp s_element = *reinterpret_cast<T_fp*>(&ws);
const T_fp t_element = *reinterpret_cast<T_fp*>(&wt);
switch (opcode) {
case FCUN: {
if (std::isnan(s_element) || std::isnan(t_element)) {
- wd = all_ones;
+ *wd = all_ones;
} else {
- wd = 0;
+ *wd = 0;
}
} break;
case FCEQ: {
if (s_element != t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = 0;
+ *wd = 0;
} else {
- wd = all_ones;
+ *wd = all_ones;
}
} break;
case FCUEQ: {
if (s_element == t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = all_ones;
+ *wd = all_ones;
} else {
- wd = 0;
+ *wd = 0;
}
} break;
case FCLT: {
if (s_element >= t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = 0;
+ *wd = 0;
} else {
- wd = all_ones;
+ *wd = all_ones;
}
} break;
case FCULT: {
if (s_element < t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = all_ones;
+ *wd = all_ones;
} else {
- wd = 0;
+ *wd = 0;
}
} break;
case FCLE: {
if (s_element > t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = 0;
+ *wd = 0;
} else {
- wd = all_ones;
+ *wd = all_ones;
}
} break;
case FCULE: {
if (s_element <= t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = all_ones;
+ *wd = all_ones;
} else {
- wd = 0;
+ *wd = 0;
}
} break;
case FCOR: {
if (std::isnan(s_element) || std::isnan(t_element)) {
- wd = 0;
+ *wd = 0;
} else {
- wd = all_ones;
+ *wd = all_ones;
}
} break;
case FCUNE: {
if (s_element != t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = all_ones;
+ *wd = all_ones;
} else {
- wd = 0;
+ *wd = 0;
}
} break;
case FCNE: {
if (s_element == t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = 0;
+ *wd = 0;
} else {
- wd = all_ones;
+ *wd = all_ones;
}
} break;
case FADD:
- wd = bit_cast<T_int>(s_element + t_element);
+ *wd = bit_cast<T_int>(s_element + t_element);
break;
case FSUB:
- wd = bit_cast<T_int>(s_element - t_element);
+ *wd = bit_cast<T_int>(s_element - t_element);
break;
case FMUL:
- wd = bit_cast<T_int>(s_element * t_element);
+ *wd = bit_cast<T_int>(s_element * t_element);
break;
case FDIV: {
if (t_element == 0) {
- wd = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *wd = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
- wd = bit_cast<T_int>(s_element / t_element);
+ *wd = bit_cast<T_int>(s_element / t_element);
}
} break;
case FMADD:
- wd = bit_cast<T_int>(
- std::fma(s_element, t_element, *reinterpret_cast<T_fp*>(&wd)));
+ *wd = bit_cast<T_int>(
+ std::fma(s_element, t_element, *reinterpret_cast<T_fp*>(wd)));
break;
case FMSUB:
- wd = bit_cast<T_int>(
- std::fma(-s_element, t_element, *reinterpret_cast<T_fp*>(&wd)));
+ *wd = bit_cast<T_int>(
+ std::fma(-s_element, t_element, *reinterpret_cast<T_fp*>(wd)));
break;
case FEXP2:
- wd = bit_cast<T_int>(std::ldexp(s_element, static_cast<int>(wt)));
+ *wd = bit_cast<T_int>(std::ldexp(s_element, static_cast<int>(wt)));
break;
case FMIN:
- wd = bit_cast<T_int>(std::min(s_element, t_element));
+ *wd = bit_cast<T_int>(std::min(s_element, t_element));
break;
case FMAX:
- wd = bit_cast<T_int>(std::max(s_element, t_element));
+ *wd = bit_cast<T_int>(std::max(s_element, t_element));
break;
case FMIN_A: {
- wd = bit_cast<T_int>(
+ *wd = bit_cast<T_int>(
std::fabs(s_element) < std::fabs(t_element) ? s_element : t_element);
} break;
case FMAX_A: {
- wd = bit_cast<T_int>(
+ *wd = bit_cast<T_int>(
std::fabs(s_element) > std::fabs(t_element) ? s_element : t_element);
} break;
case FSOR:
@@ -5687,7 +5687,7 @@ void Msa3RFInstrHelper(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
}
template <typename T_int, typename T_int_dbl, typename T_reg>
-void Msa3RFInstrHelper2(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
+void Msa3RFInstrHelper2(uint32_t opcode, T_reg ws, T_reg wt, T_reg* wd) {
// using T_uint = typename std::make_unsigned<T_int>::type;
using T_uint_dbl = typename std::make_unsigned<T_int_dbl>::type;
const T_int max_int = std::numeric_limits<T_int>::max();
@@ -5705,16 +5705,16 @@ void Msa3RFInstrHelper2(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
if (product == min_fix_dbl) {
product = max_fix_dbl;
}
- wd = static_cast<T_int>(product >> shift);
+ *wd = static_cast<T_int>(product >> shift);
} break;
case MADD_Q: {
- result = (product + (static_cast<T_int_dbl>(wd) << shift)) >> shift;
- wd = static_cast<T_int>(
+ result = (product + (static_cast<T_int_dbl>(*wd) << shift)) >> shift;
+ *wd = static_cast<T_int>(
result > max_int ? max_int : result < min_int ? min_int : result);
} break;
case MSUB_Q: {
- result = (-product + (static_cast<T_int_dbl>(wd) << shift)) >> shift;
- wd = static_cast<T_int>(
+ result = (-product + (static_cast<T_int_dbl>(*wd) << shift)) >> shift;
+ *wd = static_cast<T_int>(
result > max_int ? max_int : result < min_int ? min_int : result);
} break;
case MULR_Q: {
@@ -5722,23 +5722,23 @@ void Msa3RFInstrHelper2(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
bit_cast<T_uint_dbl>(std::numeric_limits<T_int_dbl>::min()) >> 1U;
const T_int_dbl max_fix_dbl = std::numeric_limits<T_int_dbl>::max() >> 1U;
if (product == min_fix_dbl) {
- wd = static_cast<T_int>(max_fix_dbl >> shift);
+ *wd = static_cast<T_int>(max_fix_dbl >> shift);
break;
}
- wd = static_cast<T_int>((product + (1 << (shift - 1))) >> shift);
+ *wd = static_cast<T_int>((product + (1 << (shift - 1))) >> shift);
} break;
case MADDR_Q: {
- result = (product + (static_cast<T_int_dbl>(wd) << shift) +
+ result = (product + (static_cast<T_int_dbl>(*wd) << shift) +
(1 << (shift - 1))) >>
shift;
- wd = static_cast<T_int>(
+ *wd = static_cast<T_int>(
result > max_int ? max_int : result < min_int ? min_int : result);
} break;
case MSUBR_Q: {
- result = (-product + (static_cast<T_int_dbl>(wd) << shift) +
+ result = (-product + (static_cast<T_int_dbl>(*wd) << shift) +
(1 << (shift - 1))) >>
shift;
- wd = static_cast<T_int>(
+ *wd = static_cast<T_int>(
result > max_int ? max_int : result < min_int ? min_int : result);
} break;
default:
@@ -5861,19 +5861,19 @@ void Simulator::DecodeTypeMsa3RF() {
#undef PACK_FLOAT16
#undef FEXDO_DF
case FTQ:
-#define FTQ_DF(source, dst, fp_type, int_type) \
- element = bit_cast<fp_type>(source) * \
- (1U << (sizeof(int_type) * kBitsPerByte - 1)); \
- if (element > std::numeric_limits<int_type>::max()) { \
- dst = std::numeric_limits<int_type>::max(); \
- } else if (element < std::numeric_limits<int_type>::min()) { \
- dst = std::numeric_limits<int_type>::min(); \
- } else if (std::isnan(element)) { \
- dst = 0; \
- } else { \
- int_type fixed_point; \
- round_according_to_msacsr(element, element, fixed_point); \
- dst = fixed_point; \
+#define FTQ_DF(source, dst, fp_type, int_type) \
+ element = bit_cast<fp_type>(source) * \
+ (1U << (sizeof(int_type) * kBitsPerByte - 1)); \
+ if (element > std::numeric_limits<int_type>::max()) { \
+ dst = std::numeric_limits<int_type>::max(); \
+ } else if (element < std::numeric_limits<int_type>::min()) { \
+ dst = std::numeric_limits<int_type>::min(); \
+ } else if (std::isnan(element)) { \
+ dst = 0; \
+ } else { \
+ int_type fixed_point; \
+ round_according_to_msacsr(element, &element, &fixed_point); \
+ dst = fixed_point; \
}
switch (DecodeMsaDataFormat()) {
@@ -5896,13 +5896,13 @@ void Simulator::DecodeTypeMsa3RF() {
}
break;
#undef FTQ_DF
-#define MSA_3RF_DF(T1, T2, Lanes, ws, wt, wd) \
- for (int i = 0; i < Lanes; i++) { \
- Msa3RFInstrHelper<T1, T2>(opcode, ws, wt, wd); \
+#define MSA_3RF_DF(T1, T2, Lanes, ws, wt, wd) \
+ for (int i = 0; i < Lanes; i++) { \
+ Msa3RFInstrHelper<T1, T2>(opcode, ws, wt, &(wd)); \
}
-#define MSA_3RF_DF2(T1, T2, Lanes, ws, wt, wd) \
- for (int i = 0; i < Lanes; i++) { \
- Msa3RFInstrHelper2<T1, T2>(opcode, ws, wt, wd); \
+#define MSA_3RF_DF2(T1, T2, Lanes, ws, wt, wd) \
+ for (int i = 0; i < Lanes; i++) { \
+ Msa3RFInstrHelper2<T1, T2>(opcode, ws, wt, &(wd)); \
}
case MADD_Q:
case MSUB_Q:
@@ -6139,7 +6139,7 @@ static inline bool isSnan(double fp) { return !QUIET_BIT_D(fp); }
#undef QUIET_BIT_D
template <typename T_int, typename T_fp, typename T_src, typename T_dst>
-T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
+T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst* dst,
Simulator* sim) {
using T_uint = typename std::make_unsigned<T_int>::type;
switch (opcode) {
@@ -6158,37 +6158,37 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
switch (std::fpclassify(element)) {
case FP_INFINITE:
if (std::signbit(element)) {
- dst = NEG_INFINITY_BIT;
+ *dst = NEG_INFINITY_BIT;
} else {
- dst = POS_INFINITY_BIT;
+ *dst = POS_INFINITY_BIT;
}
break;
case FP_NAN:
if (isSnan(element)) {
- dst = SNAN_BIT;
+ *dst = SNAN_BIT;
} else {
- dst = QNAN_BIT;
+ *dst = QNAN_BIT;
}
break;
case FP_NORMAL:
if (std::signbit(element)) {
- dst = NEG_NORMAL_BIT;
+ *dst = NEG_NORMAL_BIT;
} else {
- dst = POS_NORMAL_BIT;
+ *dst = POS_NORMAL_BIT;
}
break;
case FP_SUBNORMAL:
if (std::signbit(element)) {
- dst = NEG_SUBNORMAL_BIT;
+ *dst = NEG_SUBNORMAL_BIT;
} else {
- dst = POS_SUBNORMAL_BIT;
+ *dst = POS_SUBNORMAL_BIT;
}
break;
case FP_ZERO:
if (std::signbit(element)) {
- dst = NEG_ZERO_BIT;
+ *dst = NEG_ZERO_BIT;
} else {
- dst = POS_ZERO_BIT;
+ *dst = POS_ZERO_BIT;
}
break;
default:
@@ -6212,11 +6212,11 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
const T_int max_int = std::numeric_limits<T_int>::max();
const T_int min_int = std::numeric_limits<T_int>::min();
if (std::isnan(element)) {
- dst = 0;
+ *dst = 0;
} else if (element >= max_int || element <= min_int) {
- dst = element >= max_int ? max_int : min_int;
+ *dst = element >= max_int ? max_int : min_int;
} else {
- dst = static_cast<T_int>(std::trunc(element));
+ *dst = static_cast<T_int>(std::trunc(element));
}
break;
}
@@ -6224,49 +6224,49 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
T_fp element = bit_cast<T_fp>(src);
const T_uint max_int = std::numeric_limits<T_uint>::max();
if (std::isnan(element)) {
- dst = 0;
+ *dst = 0;
} else if (element >= max_int || element <= 0) {
- dst = element >= max_int ? max_int : 0;
+ *dst = element >= max_int ? max_int : 0;
} else {
- dst = static_cast<T_uint>(std::trunc(element));
+ *dst = static_cast<T_uint>(std::trunc(element));
}
break;
}
case FSQRT: {
T_fp element = bit_cast<T_fp>(src);
if (element < 0 || std::isnan(element)) {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
- dst = bit_cast<T_int>(std::sqrt(element));
+ *dst = bit_cast<T_int>(std::sqrt(element));
}
break;
}
case FRSQRT: {
T_fp element = bit_cast<T_fp>(src);
if (element < 0 || std::isnan(element)) {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
- dst = bit_cast<T_int>(1 / std::sqrt(element));
+ *dst = bit_cast<T_int>(1 / std::sqrt(element));
}
break;
}
case FRCP: {
T_fp element = bit_cast<T_fp>(src);
if (std::isnan(element)) {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
- dst = bit_cast<T_int>(1 / element);
+ *dst = bit_cast<T_int>(1 / element);
}
break;
}
case FRINT: {
T_fp element = bit_cast<T_fp>(src);
if (std::isnan(element)) {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
T_int dummy;
- sim->round_according_to_msacsr<T_fp, T_int>(element, element, dummy);
- dst = bit_cast<T_int>(element);
+ sim->round_according_to_msacsr<T_fp, T_int>(element, &element, &dummy);
+ *dst = bit_cast<T_int>(element);
}
break;
}
@@ -6275,19 +6275,19 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
switch (std::fpclassify(element)) {
case FP_NORMAL:
case FP_SUBNORMAL:
- dst = bit_cast<T_int>(std::logb(element));
+ *dst = bit_cast<T_int>(std::logb(element));
break;
case FP_ZERO:
- dst = bit_cast<T_int>(-std::numeric_limits<T_fp>::infinity());
+ *dst = bit_cast<T_int>(-std::numeric_limits<T_fp>::infinity());
break;
case FP_NAN:
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
break;
case FP_INFINITE:
if (element < 0) {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::infinity());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::infinity());
}
break;
default:
@@ -6300,11 +6300,11 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
const T_int max_int = std::numeric_limits<T_int>::max();
const T_int min_int = std::numeric_limits<T_int>::min();
if (std::isnan(element)) {
- dst = 0;
+ *dst = 0;
} else if (element < min_int || element > max_int) {
- dst = element > max_int ? max_int : min_int;
+ *dst = element > max_int ? max_int : min_int;
} else {
- sim->round_according_to_msacsr<T_fp, T_int>(element, element, dst);
+ sim->round_according_to_msacsr<T_fp, T_int>(element, &element, dst);
}
break;
}
@@ -6312,22 +6312,22 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
T_fp element = bit_cast<T_fp>(src);
const T_uint max_uint = std::numeric_limits<T_uint>::max();
if (std::isnan(element)) {
- dst = 0;
+ *dst = 0;
} else if (element < 0 || element > max_uint) {
- dst = element > max_uint ? max_uint : 0;
+ *dst = element > max_uint ? max_uint : 0;
} else {
T_uint res;
- sim->round_according_to_msacsr<T_fp, T_uint>(element, element, res);
- dst = *reinterpret_cast<T_int*>(&res);
+ sim->round_according_to_msacsr<T_fp, T_uint>(element, &element, &res);
+ *dst = *reinterpret_cast<T_int*>(&res);
}
break;
}
case FFINT_S:
- dst = bit_cast<T_int>(static_cast<T_fp>(src));
+ *dst = bit_cast<T_int>(static_cast<T_fp>(src));
break;
case FFINT_U:
using uT_src = typename std::make_unsigned<T_src>::type;
- dst = bit_cast<T_int>(static_cast<T_fp>(bit_cast<uT_src>(src)));
+ *dst = bit_cast<T_int>(static_cast<T_fp>(bit_cast<uT_src>(src)));
break;
default:
UNREACHABLE();
@@ -6437,12 +6437,12 @@ void Simulator::DecodeTypeMsa2RF() {
switch (DecodeMsaDataFormat()) {
case MSA_WORD:
for (int i = 0; i < kMSALanesWord; i++) {
- Msa2RFInstrHelper<int32_t, float>(opcode, ws.w[i], wd.w[i], this);
+ Msa2RFInstrHelper<int32_t, float>(opcode, ws.w[i], &wd.w[i], this);
}
break;
case MSA_DWORD:
for (int i = 0; i < kMSALanesDword; i++) {
- Msa2RFInstrHelper<int64_t, double>(opcode, ws.d[i], wd.d[i], this);
+ Msa2RFInstrHelper<int64_t, double>(opcode, ws.d[i], &wd.d[i], this);
}
break;
default:
diff --git a/deps/v8/src/execution/mips64/simulator-mips64.h b/deps/v8/src/execution/mips64/simulator-mips64.h
index d1251f5f0e..2bfcbe9d98 100644
--- a/deps/v8/src/execution/mips64/simulator-mips64.h
+++ b/deps/v8/src/execution/mips64/simulator-mips64.h
@@ -255,17 +255,17 @@ class Simulator : public SimulatorBase {
bool set_fcsr_round64_error(double original, double rounded);
bool set_fcsr_round_error(float original, float rounded);
bool set_fcsr_round64_error(float original, float rounded);
- void round_according_to_fcsr(double toRound, double& rounded,
- int32_t& rounded_int, double fs);
- void round64_according_to_fcsr(double toRound, double& rounded,
- int64_t& rounded_int, double fs);
- void round_according_to_fcsr(float toRound, float& rounded,
- int32_t& rounded_int, float fs);
- void round64_according_to_fcsr(float toRound, float& rounded,
- int64_t& rounded_int, float fs);
+ void round_according_to_fcsr(double toRound, double* rounded,
+ int32_t* rounded_int, double fs);
+ void round64_according_to_fcsr(double toRound, double* rounded,
+ int64_t* rounded_int, double fs);
+ void round_according_to_fcsr(float toRound, float* rounded,
+ int32_t* rounded_int, float fs);
+ void round64_according_to_fcsr(float toRound, float* rounded,
+ int64_t* rounded_int, float fs);
template <typename T_fp, typename T_int>
- void round_according_to_msacsr(T_fp toRound, T_fp& rounded,
- T_int& rounded_int);
+ void round_according_to_msacsr(T_fp toRound, T_fp* rounded,
+ T_int* rounded_int);
void set_fcsr_rounding_mode(FPURoundingMode mode);
void set_msacsr_rounding_mode(FPURoundingMode mode);
unsigned int get_fcsr_rounding_mode();
diff --git a/deps/v8/src/execution/ppc/simulator-ppc.cc b/deps/v8/src/execution/ppc/simulator-ppc.cc
index 6cd4daa33c..96308f7f5b 100644
--- a/deps/v8/src/execution/ppc/simulator-ppc.cc
+++ b/deps/v8/src/execution/ppc/simulator-ppc.cc
@@ -342,7 +342,7 @@ void PPCDebugger::Debug() {
Object obj(value);
os << arg1 << ": \n";
#ifdef DEBUG
- obj->Print(os);
+ obj.Print(os);
os << "\n";
#else
os << Brief(obj) << "\n";
diff --git a/deps/v8/src/execution/s390/simulator-s390.cc b/deps/v8/src/execution/s390/simulator-s390.cc
index 8093497168..8a82e32243 100644
--- a/deps/v8/src/execution/s390/simulator-s390.cc
+++ b/deps/v8/src/execution/s390/simulator-s390.cc
@@ -372,7 +372,7 @@ void S390Debugger::Debug() {
Object obj(value);
os << arg1 << ": \n";
#ifdef DEBUG
- obj->Print(os);
+ obj.Print(os);
os << "\n";
#else
os << Brief(obj) << "\n";
@@ -5149,27 +5149,6 @@ EVALUATE(STM) {
return length;
}
-EVALUATE(TM) {
- DCHECK_OPCODE(TM);
- // Test Under Mask (Mem - Imm) (8)
- DECODE_SI_INSTRUCTION_I_UINT8(b1, d1_val, imm_val)
- int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
- intptr_t addr = b1_val + d1_val;
- uint8_t mem_val = ReadB(addr);
- uint8_t selected_bits = mem_val & imm_val;
- // CC0: Selected bits are zero
- // CC1: Selected bits mixed zeros and ones
- // CC3: Selected bits all ones
- if (0 == selected_bits) {
- condition_reg_ = CC_EQ; // CC0
- } else if (selected_bits == imm_val) {
- condition_reg_ = 0x1; // CC3
- } else {
- condition_reg_ = 0x4; // CC1
- }
- return length;
-}
-
EVALUATE(MVI) {
UNIMPLEMENTED();
USE(instr);
@@ -5595,7 +5574,8 @@ EVALUATE(LLILL) {
return 0;
}
-inline static int TestUnderMask(uint16_t val, uint16_t mask) {
+inline static int TestUnderMask(uint16_t val, uint16_t mask,
+ bool is_tm_or_tmy) {
// Test if all selected bits are zeros or mask is zero
if (0 == (mask & val)) {
return 0x8;
@@ -5607,6 +5587,13 @@ inline static int TestUnderMask(uint16_t val, uint16_t mask) {
}
// Now we know selected bits mixed zeros and ones
+ // Test if it is TM or TMY since they have
+ // different CC result from TMLL/TMLH/TMHH/TMHL
+ if (is_tm_or_tmy) {
+ return 0x4;
+ }
+
+ // Now we know the instruction is TMLL/TMLH/TMHH/TMHL
// Test if the leftmost bit is zero or one
#if defined(__GNUC__)
int leadingZeros = __builtin_clz(mask);
@@ -5639,7 +5626,8 @@ EVALUATE(TMLH) {
DECODE_RI_A_INSTRUCTION(instr, r1, i2);
uint32_t value = get_low_register<uint32_t>(r1) >> 16;
uint32_t mask = i2 & 0x0000FFFF;
- condition_reg_ = TestUnderMask(value, mask);
+ bool is_tm_or_tmy = 0;
+ condition_reg_ = TestUnderMask(value, mask, is_tm_or_tmy);
return length; // DONE
}
@@ -5648,20 +5636,29 @@ EVALUATE(TMLL) {
DECODE_RI_A_INSTRUCTION(instr, r1, i2);
uint32_t value = get_low_register<uint32_t>(r1) & 0x0000FFFF;
uint32_t mask = i2 & 0x0000FFFF;
- condition_reg_ = TestUnderMask(value, mask);
+ bool is_tm_or_tmy = 0;
+ condition_reg_ = TestUnderMask(value, mask, is_tm_or_tmy);
return length; // DONE
}
EVALUATE(TMHH) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
+ DCHECK_OPCODE(TMHH);
+ DECODE_RI_A_INSTRUCTION(instr, r1, i2);
+ uint32_t value = get_high_register<uint32_t>(r1) >> 16;
+ uint32_t mask = i2 & 0x0000FFFF;
+ bool is_tm_or_tmy = 0;
+ condition_reg_ = TestUnderMask(value, mask, is_tm_or_tmy);
+ return length;
}
EVALUATE(TMHL) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
+ DCHECK_OPCODE(TMHL);
+ DECODE_RI_A_INSTRUCTION(instr, r1, i2);
+ uint32_t value = get_high_register<uint32_t>(r1) & 0x0000FFFF;
+ uint32_t mask = i2 & 0x0000FFFF;
+ bool is_tm_or_tmy = 0;
+ condition_reg_ = TestUnderMask(value, mask, is_tm_or_tmy);
+ return length;
}
EVALUATE(BRAS) {
@@ -9972,26 +9969,31 @@ EVALUATE(ECAG) {
return 0;
}
+EVALUATE(TM) {
+ DCHECK_OPCODE(TM);
+ // Test Under Mask (Mem - Imm) (8)
+ DECODE_SI_INSTRUCTION_I_UINT8(b1, d1_val, imm_val)
+ int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+ intptr_t addr = b1_val + d1_val;
+ uint8_t mem_val = ReadB(addr);
+ uint8_t selected_bits = mem_val & imm_val;
+ // is TM
+ bool is_tm_or_tmy = 1;
+ condition_reg_ = TestUnderMask(selected_bits, imm_val, is_tm_or_tmy);
+ return length;
+}
+
EVALUATE(TMY) {
DCHECK_OPCODE(TMY);
// Test Under Mask (Mem - Imm) (8)
- DECODE_SIY_INSTRUCTION(b1, d1, i2);
+ DECODE_SIY_INSTRUCTION(b1, d1_val, imm_val);
int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
- intptr_t d1_val = d1;
intptr_t addr = b1_val + d1_val;
uint8_t mem_val = ReadB(addr);
- uint8_t imm_val = i2;
uint8_t selected_bits = mem_val & imm_val;
- // CC0: Selected bits are zero
- // CC1: Selected bits mixed zeros and ones
- // CC3: Selected bits all ones
- if (0 == selected_bits) {
- condition_reg_ = CC_EQ; // CC0
- } else if (selected_bits == imm_val) {
- condition_reg_ = 0x1; // CC3
- } else {
- condition_reg_ = 0x4; // CC1
- }
+ // is TMY
+ bool is_tm_or_tmy = 1;
+ condition_reg_ = TestUnderMask(selected_bits, imm_val, is_tm_or_tmy);
return length;
}
diff --git a/deps/v8/src/execution/stack-guard.cc b/deps/v8/src/execution/stack-guard.cc
new file mode 100644
index 0000000000..e5c24cef1e
--- /dev/null
+++ b/deps/v8/src/execution/stack-guard.cc
@@ -0,0 +1,345 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/execution/stack-guard.h"
+
+#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
+#include "src/execution/interrupts-scope.h"
+#include "src/execution/isolate.h"
+#include "src/execution/runtime-profiler.h"
+#include "src/execution/simulator.h"
+#include "src/logging/counters.h"
+#include "src/roots/roots-inl.h"
+#include "src/utils/memcopy.h"
+#include "src/wasm/wasm-engine.h"
+
+namespace v8 {
+namespace internal {
+
+void StackGuard::set_interrupt_limits(const ExecutionAccess& lock) {
+ DCHECK_NOT_NULL(isolate_);
+ thread_local_.set_jslimit(kInterruptLimit);
+ thread_local_.set_climit(kInterruptLimit);
+ isolate_->heap()->SetStackLimits();
+}
+
+void StackGuard::reset_limits(const ExecutionAccess& lock) {
+ DCHECK_NOT_NULL(isolate_);
+ thread_local_.set_jslimit(thread_local_.real_jslimit_);
+ thread_local_.set_climit(thread_local_.real_climit_);
+ isolate_->heap()->SetStackLimits();
+}
+
+void StackGuard::SetStackLimit(uintptr_t limit) {
+ ExecutionAccess access(isolate_);
+ // If the current limits are special (e.g. due to a pending interrupt) then
+ // leave them alone.
+ uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(isolate_, limit);
+ if (thread_local_.jslimit() == thread_local_.real_jslimit_) {
+ thread_local_.set_jslimit(jslimit);
+ }
+ if (thread_local_.climit() == thread_local_.real_climit_) {
+ thread_local_.set_climit(limit);
+ }
+ thread_local_.real_climit_ = limit;
+ thread_local_.real_jslimit_ = jslimit;
+}
+
+void StackGuard::AdjustStackLimitForSimulator() {
+ ExecutionAccess access(isolate_);
+ uintptr_t climit = thread_local_.real_climit_;
+ // If the current limits are special (e.g. due to a pending interrupt) then
+ // leave them alone.
+ uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(isolate_, climit);
+ if (thread_local_.jslimit() == thread_local_.real_jslimit_) {
+ thread_local_.set_jslimit(jslimit);
+ isolate_->heap()->SetStackLimits();
+ }
+}
+
+void StackGuard::EnableInterrupts() {
+ ExecutionAccess access(isolate_);
+ if (has_pending_interrupts(access)) {
+ set_interrupt_limits(access);
+ }
+}
+
+void StackGuard::DisableInterrupts() {
+ ExecutionAccess access(isolate_);
+ reset_limits(access);
+}
+
+void StackGuard::PushInterruptsScope(InterruptsScope* scope) {
+ ExecutionAccess access(isolate_);
+ DCHECK_NE(scope->mode_, InterruptsScope::kNoop);
+ if (scope->mode_ == InterruptsScope::kPostponeInterrupts) {
+ // Intercept already requested interrupts.
+ int intercepted = thread_local_.interrupt_flags_ & scope->intercept_mask_;
+ scope->intercepted_flags_ = intercepted;
+ thread_local_.interrupt_flags_ &= ~intercepted;
+ } else {
+ DCHECK_EQ(scope->mode_, InterruptsScope::kRunInterrupts);
+ // Restore postponed interrupts.
+ int restored_flags = 0;
+ for (InterruptsScope* current = thread_local_.interrupt_scopes_;
+ current != nullptr; current = current->prev_) {
+ restored_flags |= (current->intercepted_flags_ & scope->intercept_mask_);
+ current->intercepted_flags_ &= ~scope->intercept_mask_;
+ }
+ thread_local_.interrupt_flags_ |= restored_flags;
+ }
+ if (!has_pending_interrupts(access)) reset_limits(access);
+ // Add scope to the chain.
+ scope->prev_ = thread_local_.interrupt_scopes_;
+ thread_local_.interrupt_scopes_ = scope;
+}
+
+void StackGuard::PopInterruptsScope() {
+ ExecutionAccess access(isolate_);
+ InterruptsScope* top = thread_local_.interrupt_scopes_;
+ DCHECK_NE(top->mode_, InterruptsScope::kNoop);
+ if (top->mode_ == InterruptsScope::kPostponeInterrupts) {
+ // Make intercepted interrupts active.
+ DCHECK_EQ(thread_local_.interrupt_flags_ & top->intercept_mask_, 0);
+ thread_local_.interrupt_flags_ |= top->intercepted_flags_;
+ } else {
+ DCHECK_EQ(top->mode_, InterruptsScope::kRunInterrupts);
+ // Postpone existing interupts if needed.
+ if (top->prev_) {
+ for (int interrupt = 1; interrupt < ALL_INTERRUPTS;
+ interrupt = interrupt << 1) {
+ InterruptFlag flag = static_cast<InterruptFlag>(interrupt);
+ if ((thread_local_.interrupt_flags_ & flag) &&
+ top->prev_->Intercept(flag)) {
+ thread_local_.interrupt_flags_ &= ~flag;
+ }
+ }
+ }
+ }
+ if (has_pending_interrupts(access)) set_interrupt_limits(access);
+ // Remove scope from chain.
+ thread_local_.interrupt_scopes_ = top->prev_;
+}
+
+bool StackGuard::CheckInterrupt(InterruptFlag flag) {
+ ExecutionAccess access(isolate_);
+ return thread_local_.interrupt_flags_ & flag;
+}
+
+void StackGuard::RequestInterrupt(InterruptFlag flag) {
+ ExecutionAccess access(isolate_);
+ // Check the chain of InterruptsScope for interception.
+ if (thread_local_.interrupt_scopes_ &&
+ thread_local_.interrupt_scopes_->Intercept(flag)) {
+ return;
+ }
+
+ // Not intercepted. Set as active interrupt flag.
+ thread_local_.interrupt_flags_ |= flag;
+ set_interrupt_limits(access);
+
+ // If this isolate is waiting in a futex, notify it to wake up.
+ isolate_->futex_wait_list_node()->NotifyWake();
+}
+
+void StackGuard::ClearInterrupt(InterruptFlag flag) {
+ ExecutionAccess access(isolate_);
+ // Clear the interrupt flag from the chain of InterruptsScope.
+ for (InterruptsScope* current = thread_local_.interrupt_scopes_;
+ current != nullptr; current = current->prev_) {
+ current->intercepted_flags_ &= ~flag;
+ }
+
+ // Clear the interrupt flag from the active interrupt flags.
+ thread_local_.interrupt_flags_ &= ~flag;
+ if (!has_pending_interrupts(access)) reset_limits(access);
+}
+
+int StackGuard::FetchAndClearInterrupts() {
+ ExecutionAccess access(isolate_);
+
+ int result = 0;
+ if (thread_local_.interrupt_flags_ & TERMINATE_EXECUTION) {
+ // The TERMINATE_EXECUTION interrupt is special, since it terminates
+ // execution but should leave V8 in a resumable state. If it exists, we only
+ // fetch and clear that bit. On resume, V8 can continue processing other
+ // interrupts.
+ result = TERMINATE_EXECUTION;
+ thread_local_.interrupt_flags_ &= ~TERMINATE_EXECUTION;
+ if (!has_pending_interrupts(access)) reset_limits(access);
+ } else {
+ result = thread_local_.interrupt_flags_;
+ thread_local_.interrupt_flags_ = 0;
+ reset_limits(access);
+ }
+
+ return result;
+}
+
+char* StackGuard::ArchiveStackGuard(char* to) {
+ ExecutionAccess access(isolate_);
+ MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
+ ThreadLocal blank;
+
+ // Set the stack limits using the old thread_local_.
+ // TODO(isolates): This was the old semantics of constructing a ThreadLocal
+ // (as the ctor called SetStackLimits, which looked at the
+ // current thread_local_ from StackGuard)-- but is this
+ // really what was intended?
+ isolate_->heap()->SetStackLimits();
+ thread_local_ = blank;
+
+ return to + sizeof(ThreadLocal);
+}
+
+char* StackGuard::RestoreStackGuard(char* from) {
+ ExecutionAccess access(isolate_);
+ MemCopy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
+ isolate_->heap()->SetStackLimits();
+ return from + sizeof(ThreadLocal);
+}
+
+void StackGuard::FreeThreadResources() {
+ Isolate::PerIsolateThreadData* per_thread =
+ isolate_->FindOrAllocatePerThreadDataForThisThread();
+ per_thread->set_stack_limit(thread_local_.real_climit_);
+}
+
+void StackGuard::ThreadLocal::Clear() {
+ real_jslimit_ = kIllegalLimit;
+ set_jslimit(kIllegalLimit);
+ real_climit_ = kIllegalLimit;
+ set_climit(kIllegalLimit);
+ interrupt_scopes_ = nullptr;
+ interrupt_flags_ = 0;
+}
+
+bool StackGuard::ThreadLocal::Initialize(Isolate* isolate) {
+ bool should_set_stack_limits = false;
+ if (real_climit_ == kIllegalLimit) {
+ const uintptr_t kLimitSize = FLAG_stack_size * KB;
+ DCHECK_GT(GetCurrentStackPosition(), kLimitSize);
+ uintptr_t limit = GetCurrentStackPosition() - kLimitSize;
+ real_jslimit_ = SimulatorStack::JsLimitFromCLimit(isolate, limit);
+ set_jslimit(SimulatorStack::JsLimitFromCLimit(isolate, limit));
+ real_climit_ = limit;
+ set_climit(limit);
+ should_set_stack_limits = true;
+ }
+ interrupt_scopes_ = nullptr;
+ interrupt_flags_ = 0;
+ return should_set_stack_limits;
+}
+
+void StackGuard::ClearThread(const ExecutionAccess& lock) {
+ thread_local_.Clear();
+ isolate_->heap()->SetStackLimits();
+}
+
+void StackGuard::InitThread(const ExecutionAccess& lock) {
+ if (thread_local_.Initialize(isolate_)) isolate_->heap()->SetStackLimits();
+ Isolate::PerIsolateThreadData* per_thread =
+ isolate_->FindOrAllocatePerThreadDataForThisThread();
+ uintptr_t stored_limit = per_thread->stack_limit();
+ // You should hold the ExecutionAccess lock when you call this.
+ if (stored_limit != 0) {
+ SetStackLimit(stored_limit);
+ }
+}
+
+// --- C a l l s t o n a t i v e s ---
+
+namespace {
+
+bool TestAndClear(int* bitfield, int mask) {
+ bool result = (*bitfield & mask);
+ *bitfield &= ~mask;
+ return result;
+}
+
+class ShouldBeZeroOnReturnScope final {
+ public:
+#ifndef DEBUG
+ explicit ShouldBeZeroOnReturnScope(int*) {}
+#else // DEBUG
+ explicit ShouldBeZeroOnReturnScope(int* v) : v_(v) {}
+ ~ShouldBeZeroOnReturnScope() { DCHECK_EQ(*v_, 0); }
+
+ private:
+ int* v_;
+#endif // DEBUG
+};
+
+} // namespace
+
+Object StackGuard::HandleInterrupts() {
+ TRACE_EVENT0("v8.execute", "V8.HandleInterrupts");
+
+ if (FLAG_verify_predictable) {
+ // Advance synthetic time by making a time request.
+ isolate_->heap()->MonotonicallyIncreasingTimeInMs();
+ }
+
+ // Fetch and clear interrupt bits in one go. See comments inside the method
+ // for special handling of TERMINATE_EXECUTION.
+ int interrupt_flags = FetchAndClearInterrupts();
+
+ // All interrupts should be fully processed when returning from this method.
+ ShouldBeZeroOnReturnScope should_be_zero_on_return(&interrupt_flags);
+
+ if (TestAndClear(&interrupt_flags, TERMINATE_EXECUTION)) {
+ TRACE_EVENT0("v8.execute", "V8.TerminateExecution");
+ return isolate_->TerminateExecution();
+ }
+
+ if (TestAndClear(&interrupt_flags, GC_REQUEST)) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "V8.GCHandleGCRequest");
+ isolate_->heap()->HandleGCRequest();
+ }
+
+ if (TestAndClear(&interrupt_flags, GROW_SHARED_MEMORY)) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
+ "V8.WasmGrowSharedMemory");
+ isolate_->wasm_engine()->memory_tracker()->UpdateSharedMemoryInstances(
+ isolate_);
+ }
+
+ if (TestAndClear(&interrupt_flags, DEOPT_MARKED_ALLOCATION_SITES)) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "V8.GCDeoptMarkedAllocationSites");
+ isolate_->heap()->DeoptMarkedAllocationSites();
+ }
+
+ if (TestAndClear(&interrupt_flags, INSTALL_CODE)) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.InstallOptimizedFunctions");
+ DCHECK(isolate_->concurrent_recompilation_enabled());
+ isolate_->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
+ }
+
+ if (TestAndClear(&interrupt_flags, API_INTERRUPT)) {
+ TRACE_EVENT0("v8.execute", "V8.InvokeApiInterruptCallbacks");
+ // Callbacks must be invoked outside of ExecutionAccess lock.
+ isolate_->InvokeApiInterruptCallbacks();
+ }
+
+ if (TestAndClear(&interrupt_flags, LOG_WASM_CODE)) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "LogCode");
+ isolate_->wasm_engine()->LogOutstandingCodesForIsolate(isolate_);
+ }
+
+ if (TestAndClear(&interrupt_flags, WASM_CODE_GC)) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "WasmCodeGC");
+ isolate_->wasm_engine()->ReportLiveCodeFromStackForGC(isolate_);
+ }
+
+ isolate_->counters()->stack_interrupts()->Increment();
+ isolate_->counters()->runtime_profiler_ticks()->Increment();
+ isolate_->runtime_profiler()->MarkCandidatesForOptimization();
+
+ return ReadOnlyRoots(isolate_).undefined_value();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/execution/stack-guard.h b/deps/v8/src/execution/stack-guard.h
new file mode 100644
index 0000000000..d7477f1623
--- /dev/null
+++ b/deps/v8/src/execution/stack-guard.h
@@ -0,0 +1,186 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXECUTION_STACK_GUARD_H_
+#define V8_EXECUTION_STACK_GUARD_H_
+
+#include "include/v8-internal.h"
+#include "src/base/atomicops.h"
+
+namespace v8 {
+namespace internal {
+
+class ExecutionAccess;
+class InterruptsScope;
+class Isolate;
+class Object;
+
+// StackGuard contains the handling of the limits that are used to limit the
+// number of nested invocations of JavaScript and the stack size used in each
+// invocation.
+class V8_EXPORT_PRIVATE StackGuard final {
+ public:
+ explicit StackGuard(Isolate* isolate) : isolate_(isolate) {}
+
+ // Pass the address beyond which the stack should not grow. The stack
+ // is assumed to grow downwards.
+ void SetStackLimit(uintptr_t limit);
+
+ // The simulator uses a separate JS stack. Limits on the JS stack might have
+ // to be adjusted in order to reflect overflows of the C stack, because we
+ // cannot rely on the interleaving of frames on the simulator.
+ void AdjustStackLimitForSimulator();
+
+ // Threading support.
+ char* ArchiveStackGuard(char* to);
+ char* RestoreStackGuard(char* from);
+ static int ArchiveSpacePerThread() { return sizeof(ThreadLocal); }
+ void FreeThreadResources();
+ // Sets up the default stack guard for this thread if it has not
+ // already been set up.
+ void InitThread(const ExecutionAccess& lock);
+ // Clears the stack guard for this thread so it does not look as if
+ // it has been set up.
+ void ClearThread(const ExecutionAccess& lock);
+
+#define INTERRUPT_LIST(V) \
+ V(TERMINATE_EXECUTION, TerminateExecution, 0) \
+ V(GC_REQUEST, GC, 1) \
+ V(INSTALL_CODE, InstallCode, 2) \
+ V(API_INTERRUPT, ApiInterrupt, 3) \
+ V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites, 4) \
+ V(GROW_SHARED_MEMORY, GrowSharedMemory, 5) \
+ V(LOG_WASM_CODE, LogWasmCode, 6) \
+ V(WASM_CODE_GC, WasmCodeGC, 7)
+
+#define V(NAME, Name, id) \
+ inline bool Check##Name() { return CheckInterrupt(NAME); } \
+ inline void Request##Name() { RequestInterrupt(NAME); } \
+ inline void Clear##Name() { ClearInterrupt(NAME); }
+ INTERRUPT_LIST(V)
+#undef V
+
+ // Flag used to set the interrupt causes.
+ enum InterruptFlag {
+#define V(NAME, Name, id) NAME = (1 << id),
+ INTERRUPT_LIST(V)
+#undef V
+#define V(NAME, Name, id) NAME |
+ ALL_INTERRUPTS = INTERRUPT_LIST(V) 0
+#undef V
+ };
+
+ uintptr_t climit() { return thread_local_.climit(); }
+ uintptr_t jslimit() { return thread_local_.jslimit(); }
+ // This provides an asynchronous read of the stack limits for the current
+ // thread. There are no locks protecting this, but it is assumed that you
+ // have the global V8 lock if you are using multiple V8 threads.
+ uintptr_t real_climit() { return thread_local_.real_climit_; }
+ uintptr_t real_jslimit() { return thread_local_.real_jslimit_; }
+ Address address_of_jslimit() {
+ return reinterpret_cast<Address>(&thread_local_.jslimit_);
+ }
+ Address address_of_real_jslimit() {
+ return reinterpret_cast<Address>(&thread_local_.real_jslimit_);
+ }
+
+ // If the stack guard is triggered, but it is not an actual
+ // stack overflow, then handle the interruption accordingly.
+ Object HandleInterrupts();
+
+ private:
+ bool CheckInterrupt(InterruptFlag flag);
+ void RequestInterrupt(InterruptFlag flag);
+ void ClearInterrupt(InterruptFlag flag);
+ int FetchAndClearInterrupts();
+
+ // You should hold the ExecutionAccess lock when calling this method.
+ bool has_pending_interrupts(const ExecutionAccess& lock) {
+ return thread_local_.interrupt_flags_ != 0;
+ }
+
+ // You should hold the ExecutionAccess lock when calling this method.
+ inline void set_interrupt_limits(const ExecutionAccess& lock);
+
+ // Reset limits to actual values. For example after handling interrupt.
+ // You should hold the ExecutionAccess lock when calling this method.
+ inline void reset_limits(const ExecutionAccess& lock);
+
+ // Enable or disable interrupts.
+ void EnableInterrupts();
+ void DisableInterrupts();
+
+#if V8_TARGET_ARCH_64_BIT
+ static const uintptr_t kInterruptLimit = uintptr_t{0xfffffffffffffffe};
+ static const uintptr_t kIllegalLimit = uintptr_t{0xfffffffffffffff8};
+#else
+ static const uintptr_t kInterruptLimit = 0xfffffffe;
+ static const uintptr_t kIllegalLimit = 0xfffffff8;
+#endif
+
+ void PushInterruptsScope(InterruptsScope* scope);
+ void PopInterruptsScope();
+
+ class ThreadLocal final {
+ public:
+ ThreadLocal() { Clear(); }
+ // You should hold the ExecutionAccess lock when you call Initialize or
+ // Clear.
+ void Clear();
+
+ // Returns true if the heap's stack limits should be set, false if not.
+ bool Initialize(Isolate* isolate);
+
+ // The stack limit is split into a JavaScript and a C++ stack limit. These
+ // two are the same except when running on a simulator where the C++ and
+ // JavaScript stacks are separate. Each of the two stack limits have two
+ // values. The one eith the real_ prefix is the actual stack limit
+ // set for the VM. The one without the real_ prefix has the same value as
+ // the actual stack limit except when there is an interruption (e.g. debug
+ // break or preemption) in which case it is lowered to make stack checks
+ // fail. Both the generated code and the runtime system check against the
+ // one without the real_ prefix.
+ uintptr_t real_jslimit_; // Actual JavaScript stack limit set for the VM.
+ uintptr_t real_climit_; // Actual C++ stack limit set for the VM.
+
+ // jslimit_ and climit_ can be read without any lock.
+ // Writing requires the ExecutionAccess lock.
+ base::AtomicWord jslimit_;
+ base::AtomicWord climit_;
+
+ uintptr_t jslimit() {
+ return bit_cast<uintptr_t>(base::Relaxed_Load(&jslimit_));
+ }
+ void set_jslimit(uintptr_t limit) {
+ return base::Relaxed_Store(&jslimit_,
+ static_cast<base::AtomicWord>(limit));
+ }
+ uintptr_t climit() {
+ return bit_cast<uintptr_t>(base::Relaxed_Load(&climit_));
+ }
+ void set_climit(uintptr_t limit) {
+ return base::Relaxed_Store(&climit_,
+ static_cast<base::AtomicWord>(limit));
+ }
+
+ InterruptsScope* interrupt_scopes_;
+ int interrupt_flags_;
+ };
+
+ // TODO(isolates): Technically this could be calculated directly from a
+ // pointer to StackGuard.
+ Isolate* isolate_;
+ ThreadLocal thread_local_;
+
+ friend class Isolate;
+ friend class StackLimitCheck;
+ friend class InterruptsScope;
+
+ DISALLOW_COPY_AND_ASSIGN(StackGuard);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_EXECUTION_STACK_GUARD_H_
diff --git a/deps/v8/src/execution/x64/frame-constants-x64.cc b/deps/v8/src/execution/x64/frame-constants-x64.cc
index 2a55fea9c9..716a6d7082 100644
--- a/deps/v8/src/execution/x64/frame-constants-x64.cc
+++ b/deps/v8/src/execution/x64/frame-constants-x64.cc
@@ -8,6 +8,7 @@
#include "src/codegen/x64/assembler-x64-inl.h"
#include "src/execution/frame-constants.h"
+#include "src/execution/frames.h"
namespace v8 {
namespace internal {