summaryrefslogtreecommitdiff
path: root/deps/v8/src/base
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/base')
-rw-r--r--deps/v8/src/base/address-region.h7
-rw-r--r--deps/v8/src/base/flags.h6
-rw-r--r--deps/v8/src/base/page-allocator.cc4
-rw-r--r--deps/v8/src/base/page-allocator.h2
-rw-r--r--deps/v8/src/base/platform/mutex.h2
-rw-r--r--deps/v8/src/base/platform/platform-cygwin.cc10
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc25
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc20
-rw-r--r--deps/v8/src/base/platform/platform.h7
-rw-r--r--deps/v8/src/base/utils/random-number-generator.cc7
10 files changed, 53 insertions, 37 deletions
diff --git a/deps/v8/src/base/address-region.h b/deps/v8/src/base/address-region.h
index 1fdc479f6f..0f4809f9e8 100644
--- a/deps/v8/src/base/address-region.h
+++ b/deps/v8/src/base/address-region.h
@@ -45,6 +45,13 @@ class AddressRegion {
return contains(region.address_, region.size_);
}
+ base::AddressRegion GetOverlap(AddressRegion region) const {
+ Address overlap_start = std::max(begin(), region.begin());
+ Address overlap_end =
+ std::max(overlap_start, std::min(end(), region.end()));
+ return {overlap_start, overlap_end - overlap_start};
+ }
+
bool operator==(AddressRegion other) const {
return address_ == other.address_ && size_ == other.size_;
}
diff --git a/deps/v8/src/base/flags.h b/deps/v8/src/base/flags.h
index 055f0ff498..c2b7952260 100644
--- a/deps/v8/src/base/flags.h
+++ b/deps/v8/src/base/flags.h
@@ -53,13 +53,13 @@ class Flags final {
}
constexpr Flags operator&(const Flags& flags) const {
- return Flags(*this) &= flags;
+ return Flags(mask_ & flags.mask_);
}
constexpr Flags operator|(const Flags& flags) const {
- return Flags(*this) |= flags;
+ return Flags(mask_ | flags.mask_);
}
constexpr Flags operator^(const Flags& flags) const {
- return Flags(*this) ^= flags;
+ return Flags(mask_ ^ flags.mask_);
}
Flags& operator&=(flag_type flag) { return operator&=(Flags(flag)); }
diff --git a/deps/v8/src/base/page-allocator.cc b/deps/v8/src/base/page-allocator.cc
index b339f528d2..76a0aff399 100644
--- a/deps/v8/src/base/page-allocator.cc
+++ b/deps/v8/src/base/page-allocator.cc
@@ -36,9 +36,9 @@ void* PageAllocator::GetRandomMmapAddr() {
return base::OS::GetRandomMmapAddr();
}
-void* PageAllocator::AllocatePages(void* address, size_t size, size_t alignment,
+void* PageAllocator::AllocatePages(void* hint, size_t size, size_t alignment,
PageAllocator::Permission access) {
- return base::OS::Allocate(address, size, alignment,
+ return base::OS::Allocate(hint, size, alignment,
static_cast<base::OS::MemoryPermission>(access));
}
diff --git a/deps/v8/src/base/page-allocator.h b/deps/v8/src/base/page-allocator.h
index ced1156cca..2b8ee1a5e5 100644
--- a/deps/v8/src/base/page-allocator.h
+++ b/deps/v8/src/base/page-allocator.h
@@ -26,7 +26,7 @@ class V8_BASE_EXPORT PageAllocator
void* GetRandomMmapAddr() override;
- void* AllocatePages(void* address, size_t size, size_t alignment,
+ void* AllocatePages(void* hint, size_t size, size_t alignment,
PageAllocator::Permission access) override;
bool FreePages(void* address, size_t size) override;
diff --git a/deps/v8/src/base/platform/mutex.h b/deps/v8/src/base/platform/mutex.h
index 2b8b55eeb5..c48cf8d339 100644
--- a/deps/v8/src/base/platform/mutex.h
+++ b/deps/v8/src/base/platform/mutex.h
@@ -67,6 +67,8 @@ class V8_BASE_EXPORT Mutex final {
return native_handle_;
}
+ V8_INLINE void AssertHeld() { DCHECK_EQ(1, level_); }
+
private:
NativeHandle native_handle_;
#ifdef DEBUG
diff --git a/deps/v8/src/base/platform/platform-cygwin.cc b/deps/v8/src/base/platform/platform-cygwin.cc
index 17f9aa3f17..92a5fbe490 100644
--- a/deps/v8/src/base/platform/platform-cygwin.cc
+++ b/deps/v8/src/base/platform/platform-cygwin.cc
@@ -95,13 +95,13 @@ double LocalTimeOffset(double time_ms, bool is_utc) {
}
// static
-void* OS::Allocate(void* address, size_t size, size_t alignment,
+void* OS::Allocate(void* hint, size_t size, size_t alignment,
MemoryPermission access) {
size_t page_size = AllocatePageSize();
DCHECK_EQ(0, size % page_size);
DCHECK_EQ(0, alignment % page_size);
DCHECK_LE(page_size, alignment);
- address = AlignedAddress(address, alignment);
+ hint = AlignedAddress(hint, alignment);
DWORD flags = (access == OS::MemoryPermission::kNoAccess)
? MEM_RESERVE
@@ -109,7 +109,7 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
DWORD protect = GetProtectionFromMemoryPermission(access);
// First, try an exact size aligned allocation.
- uint8_t* base = RandomizedVirtualAlloc(size, flags, protect, address);
+ uint8_t* base = RandomizedVirtualAlloc(size, flags, protect, hint);
if (base == nullptr) return nullptr; // Can't allocate, we're OOM.
// If address is suitably aligned, we're done.
@@ -120,7 +120,7 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
CHECK(Free(base, size));
// Clear the hint. It's unlikely we can allocate at this address.
- address = nullptr;
+ hint = nullptr;
// Add the maximum misalignment so we are guaranteed an aligned base address
// in the allocated region.
@@ -128,7 +128,7 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
const int kMaxAttempts = 3;
aligned_base = nullptr;
for (int i = 0; i < kMaxAttempts; ++i) {
- base = RandomizedVirtualAlloc(padded_size, flags, protect, address);
+ base = RandomizedVirtualAlloc(padded_size, flags, protect, hint);
if (base == nullptr) return nullptr; // Can't allocate, we're OOM.
// Try to trim the allocation by freeing the padded allocation and then
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index 6da83d7e02..c50cdd7a98 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -137,10 +137,10 @@ int GetFlagsForMemoryPermission(OS::MemoryPermission access) {
return flags;
}
-void* Allocate(void* address, size_t size, OS::MemoryPermission access) {
+void* Allocate(void* hint, size_t size, OS::MemoryPermission access) {
int prot = GetProtectionFromMemoryPermission(access);
int flags = GetFlagsForMemoryPermission(access);
- void* result = mmap(address, size, prot, flags, kMmapFd, kMmapFdOffset);
+ void* result = mmap(hint, size, prot, flags, kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return nullptr;
return result;
}
@@ -278,16 +278,16 @@ void* OS::GetRandomMmapAddr() {
// TODO(bbudge) Move Cygwin and Fuchsia stuff into platform-specific files.
#if !V8_OS_CYGWIN && !V8_OS_FUCHSIA
// static
-void* OS::Allocate(void* address, size_t size, size_t alignment,
+void* OS::Allocate(void* hint, size_t size, size_t alignment,
MemoryPermission access) {
size_t page_size = AllocatePageSize();
DCHECK_EQ(0, size % page_size);
DCHECK_EQ(0, alignment % page_size);
- address = AlignedAddress(address, alignment);
+ hint = AlignedAddress(hint, alignment);
// Add the maximum misalignment so we are guaranteed an aligned base address.
size_t request_size = size + (alignment - page_size);
request_size = RoundUp(request_size, OS::AllocatePageSize());
- void* result = base::Allocate(address, request_size, access);
+ void* result = base::Allocate(hint, request_size, access);
if (result == nullptr) return nullptr;
// Unmap memory allocated before the aligned base address.
@@ -761,13 +761,12 @@ void Thread::set_name(const char* name) {
name_[sizeof(name_) - 1] = '\0';
}
-
-void Thread::Start() {
+bool Thread::Start() {
int result;
pthread_attr_t attr;
memset(&attr, 0, sizeof(attr));
result = pthread_attr_init(&attr);
- DCHECK_EQ(0, result);
+ if (result != 0) return false;
size_t stack_size = stack_size_;
if (stack_size == 0) {
#if V8_OS_MACOSX
@@ -780,17 +779,17 @@ void Thread::Start() {
}
if (stack_size > 0) {
result = pthread_attr_setstacksize(&attr, stack_size);
- DCHECK_EQ(0, result);
+ if (result != 0) return pthread_attr_destroy(&attr), false;
}
{
MutexGuard lock_guard(&data_->thread_creation_mutex_);
result = pthread_create(&data_->thread_, &attr, ThreadEntry, this);
+ if (result != 0 || data_->thread_ == kNoThread) {
+ return pthread_attr_destroy(&attr), false;
+ }
}
- DCHECK_EQ(0, result);
result = pthread_attr_destroy(&attr);
- DCHECK_EQ(0, result);
- DCHECK_NE(data_->thread_, kNoThread);
- USE(result);
+ return result == 0;
}
void Thread::Join() { pthread_join(data_->thread_, nullptr); }
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index d01b1c07fe..04ef8a30f2 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -798,13 +798,13 @@ uint8_t* RandomizedVirtualAlloc(size_t size, DWORD flags, DWORD protect,
} // namespace
// static
-void* OS::Allocate(void* address, size_t size, size_t alignment,
+void* OS::Allocate(void* hint, size_t size, size_t alignment,
MemoryPermission access) {
size_t page_size = AllocatePageSize();
DCHECK_EQ(0, size % page_size);
DCHECK_EQ(0, alignment % page_size);
DCHECK_LE(page_size, alignment);
- address = AlignedAddress(address, alignment);
+ hint = AlignedAddress(hint, alignment);
DWORD flags = (access == OS::MemoryPermission::kNoAccess)
? MEM_RESERVE
@@ -812,7 +812,7 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
DWORD protect = GetProtectionFromMemoryPermission(access);
// First, try an exact size aligned allocation.
- uint8_t* base = RandomizedVirtualAlloc(size, flags, protect, address);
+ uint8_t* base = RandomizedVirtualAlloc(size, flags, protect, hint);
if (base == nullptr) return nullptr; // Can't allocate, we're OOM.
// If address is suitably aligned, we're done.
@@ -824,7 +824,7 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
CHECK(Free(base, size));
// Clear the hint. It's unlikely we can allocate at this address.
- address = nullptr;
+ hint = nullptr;
// Add the maximum misalignment so we are guaranteed an aligned base address
// in the allocated region.
@@ -832,7 +832,7 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
const int kMaxAttempts = 3;
aligned_base = nullptr;
for (int i = 0; i < kMaxAttempts; ++i) {
- base = RandomizedVirtualAlloc(padded_size, flags, protect, address);
+ base = RandomizedVirtualAlloc(padded_size, flags, protect, hint);
if (base == nullptr) return nullptr; // Can't allocate, we're OOM.
// Try to trim the allocation by freeing the padded allocation and then
@@ -1352,13 +1352,13 @@ Thread::~Thread() {
// Create a new thread. It is important to use _beginthreadex() instead of
// the Win32 function CreateThread(), because the CreateThread() does not
// initialize thread specific structures in the C runtime library.
-void Thread::Start() {
- data_->thread_ = reinterpret_cast<HANDLE>(
- _beginthreadex(nullptr, static_cast<unsigned>(stack_size_), ThreadEntry,
- this, 0, &data_->thread_id_));
+bool Thread::Start() {
+ uintptr_t result = _beginthreadex(nullptr, static_cast<unsigned>(stack_size_),
+ ThreadEntry, this, 0, &data_->thread_id_);
+ data_->thread_ = reinterpret_cast<HANDLE>(result);
+ return result != 0;
}
-
// Wait for thread to terminate.
void Thread::Join() {
if (data_->thread_id_ != GetCurrentThreadId()) {
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index e073704b2c..e1f84043eb 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -333,15 +333,16 @@ class V8_BASE_EXPORT Thread {
virtual ~Thread();
// Start new thread by calling the Run() method on the new thread.
- void Start();
+ V8_WARN_UNUSED_RESULT bool Start();
// Start new thread and wait until Run() method is called on the new thread.
- void StartSynchronously() {
+ bool StartSynchronously() {
start_semaphore_ = new Semaphore(0);
- Start();
+ if (!Start()) return false;
start_semaphore_->Wait();
delete start_semaphore_;
start_semaphore_ = nullptr;
+ return true;
}
// Wait until thread terminates.
diff --git a/deps/v8/src/base/utils/random-number-generator.cc b/deps/v8/src/base/utils/random-number-generator.cc
index 3b38858192..17c2cced8a 100644
--- a/deps/v8/src/base/utils/random-number-generator.cc
+++ b/deps/v8/src/base/utils/random-number-generator.cc
@@ -51,6 +51,13 @@ RandomNumberGenerator::RandomNumberGenerator() {
result = rand_s(&second_half);
DCHECK_EQ(0, result);
SetSeed((static_cast<int64_t>(first_half) << 32) + second_half);
+#elif V8_OS_MACOSX
+ // Despite its prefix suggests it is not RC4 algorithm anymore.
+ // It always succeeds while having decent performance and
+ // no file descriptor involved.
+ int64_t seed;
+ arc4random_buf(&seed, sizeof(seed));
+ SetSeed(seed);
#else
// Gather entropy from /dev/urandom if available.
FILE* fp = fopen("/dev/urandom", "rb");