summaryrefslogtreecommitdiff
path: root/deps/v8/src/base
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/base')
-rw-r--r--deps/v8/src/base/address-region.h70
-rw-r--r--deps/v8/src/base/atomic-utils.h16
-rw-r--r--deps/v8/src/base/bits.h8
-rw-r--r--deps/v8/src/base/bounded-page-allocator.cc101
-rw-r--r--deps/v8/src/base/bounded-page-allocator.h79
-rw-r--r--deps/v8/src/base/build_config.h4
-rw-r--r--deps/v8/src/base/debug/stack_trace.cc2
-rw-r--r--deps/v8/src/base/debug/stack_trace_posix.cc8
-rw-r--r--deps/v8/src/base/debug/stack_trace_win.cc7
-rw-r--r--deps/v8/src/base/ieee754.cc20
-rw-r--r--deps/v8/src/base/logging.h10
-rw-r--r--deps/v8/src/base/lsan-page-allocator.cc59
-rw-r--r--deps/v8/src/base/lsan-page-allocator.h56
-rw-r--r--deps/v8/src/base/macros.h48
-rw-r--r--deps/v8/src/base/optional.h2
-rw-r--r--deps/v8/src/base/page-allocator.cc8
-rw-r--r--deps/v8/src/base/page-allocator.h11
-rw-r--r--deps/v8/src/base/platform/OWNERS2
-rw-r--r--deps/v8/src/base/platform/platform-fuchsia.cc12
-rw-r--r--deps/v8/src/base/platform/platform-linux.cc8
-rw-r--r--deps/v8/src/base/platform/platform-posix-time.h2
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc13
-rw-r--r--deps/v8/src/base/platform/platform-posix.h2
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc6
-rw-r--r--deps/v8/src/base/platform/platform.h2
-rw-r--r--deps/v8/src/base/platform/semaphore.cc4
-rw-r--r--deps/v8/src/base/platform/time.h5
-rw-r--r--deps/v8/src/base/region-allocator.cc291
-rw-r--r--deps/v8/src/base/region-allocator.h164
-rw-r--r--deps/v8/src/base/safe_math.h2
-rw-r--r--deps/v8/src/base/threaded-list.h267
-rw-r--r--deps/v8/src/base/timezone-cache.h2
-rw-r--r--deps/v8/src/base/utils/random-number-generator.cc2
-rw-r--r--deps/v8/src/base/utils/random-number-generator.h9
34 files changed, 1202 insertions, 100 deletions
diff --git a/deps/v8/src/base/address-region.h b/deps/v8/src/base/address-region.h
new file mode 100644
index 0000000000..6b733cfe4d
--- /dev/null
+++ b/deps/v8/src/base/address-region.h
@@ -0,0 +1,70 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_ADDRESS_REGION_H_
+#define V8_BASE_ADDRESS_REGION_H_
+
+#include <iostream>
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace base {
+
+// Helper class representing an address region of certain size.
+class AddressRegion {
+ public:
+ typedef uintptr_t Address;
+
+ AddressRegion() = default;
+
+ AddressRegion(Address address, size_t size)
+ : address_(address), size_(size) {}
+
+ Address begin() const { return address_; }
+ Address end() const { return address_ + size_; }
+
+ size_t size() const { return size_; }
+ void set_size(size_t size) { size_ = size; }
+
+ bool is_empty() const { return size_ == 0; }
+
+ bool contains(Address address) const {
+ STATIC_ASSERT(std::is_unsigned<Address>::value);
+ return (address - begin()) < size();
+ }
+
+ bool contains(Address address, size_t size) const {
+ STATIC_ASSERT(std::is_unsigned<Address>::value);
+ Address offset = address - begin();
+ return (offset < size_) && (offset + size <= size_);
+ }
+
+ bool contains(AddressRegion region) const {
+ return contains(region.address_, region.size_);
+ }
+
+ bool operator==(AddressRegion other) const {
+ return address_ == other.address_ && size_ == other.size_;
+ }
+
+ bool operator!=(AddressRegion other) const {
+ return address_ != other.address_ || size_ != other.size_;
+ }
+
+ private:
+ Address address_ = 0;
+ size_t size_ = 0;
+};
+ASSERT_TRIVIALLY_COPYABLE(AddressRegion);
+
+inline std::ostream& operator<<(std::ostream& out, AddressRegion region) {
+ return out << "[" << reinterpret_cast<void*>(region.begin()) << "+"
+ << region.size() << "]";
+}
+
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_ADDRESS_REGION_H_
diff --git a/deps/v8/src/base/atomic-utils.h b/deps/v8/src/base/atomic-utils.h
index d81c537e57..90681b8a35 100644
--- a/deps/v8/src/base/atomic-utils.h
+++ b/deps/v8/src/base/atomic-utils.h
@@ -377,6 +377,22 @@ class AtomicElement {
T value_;
};
+template <typename T,
+ typename = typename std::enable_if<std::is_unsigned<T>::value>::type>
+inline void CheckedIncrement(std::atomic<T>* number, T amount) {
+ const T old = number->fetch_add(amount);
+ DCHECK_GE(old + amount, old);
+ USE(old);
+}
+
+template <typename T,
+ typename = typename std::enable_if<std::is_unsigned<T>::value>::type>
+inline void CheckedDecrement(std::atomic<T>* number, T amount) {
+ const T old = number->fetch_sub(amount);
+ DCHECK_GE(old, amount);
+ USE(old);
+}
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/bits.h b/deps/v8/src/base/bits.h
index 731a7181d7..147a1730b2 100644
--- a/deps/v8/src/base/bits.h
+++ b/deps/v8/src/base/bits.h
@@ -146,6 +146,14 @@ constexpr inline bool IsPowerOfTwo(T value) {
V8_BASE_EXPORT uint32_t RoundUpToPowerOfTwo32(uint32_t value);
// Same for 64 bit integers. |value| must be <= 2^63
V8_BASE_EXPORT uint64_t RoundUpToPowerOfTwo64(uint64_t value);
+// Same for size_t integers.
+inline size_t RoundUpToPowerOfTwo(size_t value) {
+ if (sizeof(size_t) == sizeof(uint64_t)) {
+ return RoundUpToPowerOfTwo64(value);
+ } else {
+ return RoundUpToPowerOfTwo32(value);
+ }
+}
// RoundDownToPowerOfTwo32(value) returns the greatest power of two which is
// less than or equal to |value|. If you pass in a |value| that is already a
diff --git a/deps/v8/src/base/bounded-page-allocator.cc b/deps/v8/src/base/bounded-page-allocator.cc
new file mode 100644
index 0000000000..ca9dde25f7
--- /dev/null
+++ b/deps/v8/src/base/bounded-page-allocator.cc
@@ -0,0 +1,101 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/bounded-page-allocator.h"
+
+namespace v8 {
+namespace base {
+
+BoundedPageAllocator::BoundedPageAllocator(v8::PageAllocator* page_allocator,
+ Address start, size_t size,
+ size_t allocate_page_size)
+ : allocate_page_size_(allocate_page_size),
+ commit_page_size_(page_allocator->CommitPageSize()),
+ page_allocator_(page_allocator),
+ region_allocator_(start, size, allocate_page_size_) {
+ CHECK_NOT_NULL(page_allocator);
+ CHECK(IsAligned(allocate_page_size, page_allocator->AllocatePageSize()));
+ CHECK(IsAligned(allocate_page_size_, commit_page_size_));
+}
+
+BoundedPageAllocator::Address BoundedPageAllocator::begin() const {
+ return region_allocator_.begin();
+}
+
+size_t BoundedPageAllocator::size() const { return region_allocator_.size(); }
+
+void* BoundedPageAllocator::AllocatePages(void* hint, size_t size,
+ size_t alignment,
+ PageAllocator::Permission access) {
+ LockGuard<Mutex> guard(&mutex_);
+ CHECK(IsAligned(alignment, region_allocator_.page_size()));
+
+ // Region allocator does not support alignments bigger than it's own
+ // allocation alignment.
+ CHECK_LE(alignment, allocate_page_size_);
+
+ // TODO(ishell): Consider using randomized version here.
+ Address address = region_allocator_.AllocateRegion(size);
+ if (address == RegionAllocator::kAllocationFailure) {
+ return nullptr;
+ }
+ CHECK(page_allocator_->SetPermissions(reinterpret_cast<void*>(address), size,
+ access));
+ return reinterpret_cast<void*>(address);
+}
+
+bool BoundedPageAllocator::FreePages(void* raw_address, size_t size) {
+ LockGuard<Mutex> guard(&mutex_);
+
+ Address address = reinterpret_cast<Address>(raw_address);
+ size_t freed_size = region_allocator_.FreeRegion(address);
+ if (freed_size != size) return false;
+ CHECK(page_allocator_->SetPermissions(raw_address, size,
+ PageAllocator::kNoAccess));
+ return true;
+}
+
+bool BoundedPageAllocator::ReleasePages(void* raw_address, size_t size,
+ size_t new_size) {
+ Address address = reinterpret_cast<Address>(raw_address);
+ CHECK(IsAligned(address, allocate_page_size_));
+
+ DCHECK_LT(new_size, size);
+ DCHECK(IsAligned(size - new_size, commit_page_size_));
+
+ // Check if we freed any allocatable pages by this release.
+ size_t allocated_size = RoundUp(size, allocate_page_size_);
+ size_t new_allocated_size = RoundUp(new_size, allocate_page_size_);
+
+#ifdef DEBUG
+ {
+ // There must be an allocated region at given |address| of a size not
+ // smaller than |size|.
+ LockGuard<Mutex> guard(&mutex_);
+ CHECK_EQ(allocated_size, region_allocator_.CheckRegion(address));
+ }
+#endif
+
+ if (new_allocated_size < allocated_size) {
+ LockGuard<Mutex> guard(&mutex_);
+ region_allocator_.TrimRegion(address, new_allocated_size);
+ }
+
+ // Keep the region in "used" state just uncommit some pages.
+ Address free_address = address + new_size;
+ size_t free_size = size - new_size;
+ return page_allocator_->SetPermissions(reinterpret_cast<void*>(free_address),
+ free_size, PageAllocator::kNoAccess);
+}
+
+bool BoundedPageAllocator::SetPermissions(void* address, size_t size,
+ PageAllocator::Permission access) {
+ DCHECK(IsAligned(reinterpret_cast<Address>(address), commit_page_size_));
+ DCHECK(IsAligned(size, commit_page_size_));
+ DCHECK(region_allocator_.contains(reinterpret_cast<Address>(address), size));
+ return page_allocator_->SetPermissions(address, size, access);
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/bounded-page-allocator.h b/deps/v8/src/base/bounded-page-allocator.h
new file mode 100644
index 0000000000..e3d928618b
--- /dev/null
+++ b/deps/v8/src/base/bounded-page-allocator.h
@@ -0,0 +1,79 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_BOUNDED_PAGE_ALLOCATOR_H_
+#define V8_BASE_BOUNDED_PAGE_ALLOCATOR_H_
+
+#include "include/v8-platform.h"
+#include "src/base/platform/mutex.h"
+#include "src/base/region-allocator.h"
+
+namespace v8 {
+namespace base {
+
+// This is a v8::PageAllocator implementation that allocates pages within the
+// pre-reserved region of virtual space. This class requires the virtual space
+// to be kept reserved during the lifetime of this object.
+// The main application of bounded page allocator are
+// - V8 heap pointer compression which requires the whole V8 heap to be
+// allocated within a contiguous range of virtual address space,
+// - executable page allocation, which allows to use PC-relative 32-bit code
+// displacement on certain 64-bit platforms.
+// Bounded page allocator uses other page allocator instance for doing actual
+// page allocations.
+// The implementation is thread-safe.
+class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator {
+ public:
+ typedef uintptr_t Address;
+
+ BoundedPageAllocator(v8::PageAllocator* page_allocator, Address start,
+ size_t size, size_t allocate_page_size);
+ ~BoundedPageAllocator() override = default;
+
+ // These functions are not inlined to avoid https://crbug.com/v8/8275.
+ Address begin() const;
+ size_t size() const;
+
+ // Returns true if given address is in the range controlled by the bounded
+ // page allocator instance.
+ bool contains(Address address) const {
+ return region_allocator_.contains(address);
+ }
+
+ size_t AllocatePageSize() override { return allocate_page_size_; }
+
+ size_t CommitPageSize() override { return commit_page_size_; }
+
+ void SetRandomMmapSeed(int64_t seed) override {
+ page_allocator_->SetRandomMmapSeed(seed);
+ }
+
+ void* GetRandomMmapAddr() override {
+ return page_allocator_->GetRandomMmapAddr();
+ }
+
+ void* AllocatePages(void* address, size_t size, size_t alignment,
+ PageAllocator::Permission access) override;
+
+ bool FreePages(void* address, size_t size) override;
+
+ bool ReleasePages(void* address, size_t size, size_t new_size) override;
+
+ bool SetPermissions(void* address, size_t size,
+ PageAllocator::Permission access) override;
+
+ private:
+ v8::base::Mutex mutex_;
+ const size_t allocate_page_size_;
+ const size_t commit_page_size_;
+ v8::PageAllocator* const page_allocator_;
+ v8::base::RegionAllocator region_allocator_;
+
+ DISALLOW_COPY_AND_ASSIGN(BoundedPageAllocator);
+};
+
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_BOUNDED_PAGE_ALLOCATOR_H_
diff --git a/deps/v8/src/base/build_config.h b/deps/v8/src/base/build_config.h
index df0d1110a5..695e67a618 100644
--- a/deps/v8/src/base/build_config.h
+++ b/deps/v8/src/base/build_config.h
@@ -196,9 +196,9 @@
#endif
#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64)
-#define V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK 1
+#define V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK true
#else
-#define V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK 0
+#define V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK false
#endif
// Number of bits to represent the page size for paged spaces. The value of 19
diff --git a/deps/v8/src/base/debug/stack_trace.cc b/deps/v8/src/base/debug/stack_trace.cc
index 2a3fb87a19..cbf00ad17c 100644
--- a/deps/v8/src/base/debug/stack_trace.cc
+++ b/deps/v8/src/base/debug/stack_trace.cc
@@ -21,7 +21,7 @@ StackTrace::StackTrace(const void* const* trace, size_t count) {
count_ = count;
}
-StackTrace::~StackTrace() {}
+StackTrace::~StackTrace() = default;
const void* const* StackTrace::Addresses(size_t* count) const {
*count = count_;
diff --git a/deps/v8/src/base/debug/stack_trace_posix.cc b/deps/v8/src/base/debug/stack_trace_posix.cc
index 51b821bdd1..ed602af547 100644
--- a/deps/v8/src/base/debug/stack_trace_posix.cc
+++ b/deps/v8/src/base/debug/stack_trace_posix.cc
@@ -61,7 +61,7 @@ char* itoa_r(intptr_t i, char* buf, size_t sz, int base, size_t padding);
namespace {
volatile sig_atomic_t in_signal_handler = 0;
-bool dump_stack_in_signal_handler = 1;
+bool dump_stack_in_signal_handler = true;
// The prefix used for mangled symbols, per the Itanium C++ ABI:
// http://www.codesourcery.com/cxx-abi/abi.html#mangling
@@ -104,7 +104,7 @@ void DemangleSymbols(std::string* text) {
// Try to demangle the mangled symbol candidate.
int status = 0;
std::unique_ptr<char, FreeDeleter> demangled_symbol(
- abi::__cxa_demangle(mangled_symbol.c_str(), nullptr, 0, &status));
+ abi::__cxa_demangle(mangled_symbol.c_str(), nullptr, nullptr, &status));
if (status == 0) { // Demangling is successful.
// Remove the mangled symbol.
text->erase(mangled_start, mangled_end - mangled_start);
@@ -125,7 +125,7 @@ class BacktraceOutputHandler {
virtual void HandleOutput(const char* output) = 0;
protected:
- virtual ~BacktraceOutputHandler() {}
+ virtual ~BacktraceOutputHandler() = default;
};
#if HAVE_EXECINFO_H
@@ -266,7 +266,7 @@ void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) {
class PrintBacktraceOutputHandler : public BacktraceOutputHandler {
public:
- PrintBacktraceOutputHandler() {}
+ PrintBacktraceOutputHandler() = default;
void HandleOutput(const char* output) override {
// NOTE: This code MUST be async-signal safe (it's used by in-process
diff --git a/deps/v8/src/base/debug/stack_trace_win.cc b/deps/v8/src/base/debug/stack_trace_win.cc
index 3fe66d97ad..6b22131233 100644
--- a/deps/v8/src/base/debug/stack_trace_win.cc
+++ b/deps/v8/src/base/debug/stack_trace_win.cc
@@ -7,13 +7,6 @@
#include "src/base/debug/stack_trace.h"
-// This file can't use "src/base/win32-headers.h" because it defines symbols
-// that lead to compilation errors. But `NOMINMAX` should be defined to disable
-// defining of the `min` and `max` MACROS.
-#ifndef NOMINMAX
-#define NOMINMAX
-#endif
-
#include <windows.h>
#include <dbghelp.h>
#include <Shlwapi.h>
diff --git a/deps/v8/src/base/ieee754.cc b/deps/v8/src/base/ieee754.cc
index 7a1cc175cb..8c5641569d 100644
--- a/deps/v8/src/base/ieee754.cc
+++ b/deps/v8/src/base/ieee754.cc
@@ -90,7 +90,7 @@ typedef union {
ew_u.value = (d); \
(ix0) = ew_u.parts.msw; \
(ix1) = ew_u.parts.lsw; \
- } while (0)
+ } while (false)
/* Get a 64-bit int from a double. */
#define EXTRACT_WORD64(ix, d) \
@@ -98,7 +98,7 @@ typedef union {
ieee_double_shape_type ew_u; \
ew_u.value = (d); \
(ix) = ew_u.xparts.w; \
- } while (0)
+ } while (false)
/* Get the more significant 32 bit int from a double. */
@@ -107,7 +107,7 @@ typedef union {
ieee_double_shape_type gh_u; \
gh_u.value = (d); \
(i) = gh_u.parts.msw; \
- } while (0)
+ } while (false)
/* Get the less significant 32 bit int from a double. */
@@ -116,7 +116,7 @@ typedef union {
ieee_double_shape_type gl_u; \
gl_u.value = (d); \
(i) = gl_u.parts.lsw; \
- } while (0)
+ } while (false)
/* Set a double from two 32 bit ints. */
@@ -126,7 +126,7 @@ typedef union {
iw_u.parts.msw = (ix0); \
iw_u.parts.lsw = (ix1); \
(d) = iw_u.value; \
- } while (0)
+ } while (false)
/* Set a double from a 64-bit int. */
#define INSERT_WORD64(d, ix) \
@@ -134,7 +134,7 @@ typedef union {
ieee_double_shape_type iw_u; \
iw_u.xparts.w = (ix); \
(d) = iw_u.value; \
- } while (0)
+ } while (false)
/* Set the more significant 32 bits of a double from an int. */
@@ -144,7 +144,7 @@ typedef union {
sh_u.value = (d); \
sh_u.parts.msw = (v); \
(d) = sh_u.value; \
- } while (0)
+ } while (false)
/* Set the less significant 32 bits of a double from an int. */
@@ -154,7 +154,7 @@ typedef union {
sl_u.value = (d); \
sl_u.parts.lsw = (v); \
(d) = sl_u.value; \
- } while (0)
+ } while (false)
/* Support macro. */
@@ -1210,9 +1210,9 @@ double atan(double x) {
if (ix > 0x7FF00000 || (ix == 0x7FF00000 && (low != 0)))
return x + x; /* NaN */
if (hx > 0)
- return atanhi[3] + *(volatile double *)&atanlo[3];
+ return atanhi[3] + *const_cast<volatile double*>(&atanlo[3]);
else
- return -atanhi[3] - *(volatile double *)&atanlo[3];
+ return -atanhi[3] - *const_cast<volatile double*>(&atanlo[3]);
}
if (ix < 0x3FDC0000) { /* |x| < 0.4375 */
if (ix < 0x3E400000) { /* |x| < 2^-27 */
diff --git a/deps/v8/src/base/logging.h b/deps/v8/src/base/logging.h
index baf6b12ccb..9a9538d065 100644
--- a/deps/v8/src/base/logging.h
+++ b/deps/v8/src/base/logging.h
@@ -49,7 +49,7 @@ V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int,
if (V8_UNLIKELY(!(condition))) { \
FATAL("Check failed: %s.", message); \
} \
- } while (0)
+ } while (false)
#define CHECK(condition) CHECK_WITH_MSG(condition, #condition)
#ifdef DEBUG
@@ -59,7 +59,7 @@ V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int,
if (V8_UNLIKELY(!(condition))) { \
V8_Dcheck(__FILE__, __LINE__, message); \
} \
- } while (0)
+ } while (false)
#define DCHECK(condition) DCHECK_WITH_MSG(condition, #condition)
// Helper macro for binary operators.
@@ -73,7 +73,7 @@ V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int,
FATAL("Check failed: %s.", _msg->c_str()); \
delete _msg; \
} \
- } while (0)
+ } while (false)
#define DCHECK_OP(name, op, lhs, rhs) \
do { \
@@ -84,7 +84,7 @@ V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int,
V8_Dcheck(__FILE__, __LINE__, _msg->c_str()); \
delete _msg; \
} \
- } while (0)
+ } while (false)
#else
@@ -98,7 +98,7 @@ V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int,
typename ::v8::base::pass_value_or_ref<decltype(rhs)>::type>((lhs), \
(rhs)); \
CHECK_WITH_MSG(_cmp, #lhs " " #op " " #rhs); \
- } while (0)
+ } while (false)
#define DCHECK_WITH_MSG(condition, msg) void(0);
diff --git a/deps/v8/src/base/lsan-page-allocator.cc b/deps/v8/src/base/lsan-page-allocator.cc
new file mode 100644
index 0000000000..4840c7ea80
--- /dev/null
+++ b/deps/v8/src/base/lsan-page-allocator.cc
@@ -0,0 +1,59 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/lsan-page-allocator.h"
+
+#include "src/base/logging.h"
+
+#if defined(LEAK_SANITIZER)
+#include <sanitizer/lsan_interface.h>
+#endif
+
+namespace v8 {
+namespace base {
+
+LsanPageAllocator::LsanPageAllocator(v8::PageAllocator* page_allocator)
+ : page_allocator_(page_allocator),
+ allocate_page_size_(page_allocator_->AllocatePageSize()),
+ commit_page_size_(page_allocator_->CommitPageSize()) {
+ DCHECK_NOT_NULL(page_allocator);
+}
+
+void* LsanPageAllocator::AllocatePages(void* address, size_t size,
+ size_t alignment,
+ PageAllocator::Permission access) {
+ void* result =
+ page_allocator_->AllocatePages(address, size, alignment, access);
+#if defined(LEAK_SANITIZER)
+ if (result != nullptr) {
+ __lsan_register_root_region(result, size);
+ }
+#endif
+ return result;
+}
+
+bool LsanPageAllocator::FreePages(void* address, size_t size) {
+ bool result = page_allocator_->FreePages(address, size);
+#if defined(LEAK_SANITIZER)
+ if (result) {
+ __lsan_unregister_root_region(address, size);
+ }
+#endif
+ return result;
+}
+
+bool LsanPageAllocator::ReleasePages(void* address, size_t size,
+ size_t new_size) {
+ bool result = page_allocator_->ReleasePages(address, size, new_size);
+#if defined(LEAK_SANITIZER)
+ if (result) {
+ __lsan_unregister_root_region(address, size);
+ __lsan_register_root_region(address, new_size);
+ }
+#endif
+ return result;
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/lsan-page-allocator.h b/deps/v8/src/base/lsan-page-allocator.h
new file mode 100644
index 0000000000..d95c7fbf1e
--- /dev/null
+++ b/deps/v8/src/base/lsan-page-allocator.h
@@ -0,0 +1,56 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_LSAN_PAGE_ALLOCATOR_H_
+#define V8_BASE_LSAN_PAGE_ALLOCATOR_H_
+
+#include "include/v8-platform.h"
+#include "src/base/base-export.h"
+#include "src/base/compiler-specific.h"
+
+namespace v8 {
+namespace base {
+
+// This is a v8::PageAllocator implementation that decorates provided page
+// allocator object with leak sanitizer notifications when LEAK_SANITIZER
+// is defined.
+class V8_BASE_EXPORT LsanPageAllocator
+ : public NON_EXPORTED_BASE(::v8::PageAllocator) {
+ public:
+ LsanPageAllocator(v8::PageAllocator* page_allocator);
+ ~LsanPageAllocator() override = default;
+
+ size_t AllocatePageSize() override { return allocate_page_size_; }
+
+ size_t CommitPageSize() override { return commit_page_size_; }
+
+ void SetRandomMmapSeed(int64_t seed) override {
+ return page_allocator_->SetRandomMmapSeed(seed);
+ }
+
+ void* GetRandomMmapAddr() override {
+ return page_allocator_->GetRandomMmapAddr();
+ }
+
+ void* AllocatePages(void* address, size_t size, size_t alignment,
+ PageAllocator::Permission access) override;
+
+ bool FreePages(void* address, size_t size) override;
+
+ bool ReleasePages(void* address, size_t size, size_t new_size) override;
+
+ bool SetPermissions(void* address, size_t size,
+ PageAllocator::Permission access) override {
+ return page_allocator_->SetPermissions(address, size, access);
+ }
+
+ private:
+ v8::PageAllocator* const page_allocator_;
+ const size_t allocate_page_size_;
+ const size_t commit_page_size_;
+};
+
+} // namespace base
+} // namespace v8
+#endif // V8_BASE_LSAN_PAGE_ALLOCATOR_H_
diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h
index 081018cc2e..8a2efe61a9 100644
--- a/deps/v8/src/base/macros.h
+++ b/deps/v8/src/base/macros.h
@@ -14,6 +14,9 @@
// No-op macro which is used to work around MSVC's funky VA_ARGS support.
#define EXPAND(x) x
+// This macro does nothing. That's all.
+#define NOTHING(...)
+
// TODO(all) Replace all uses of this macro with C++'s offsetof. To do that, we
// have to make sure that only standard-layout types and simple field
// designators are used.
@@ -195,8 +198,9 @@ V8_INLINE Dest bit_cast(Source const& source) {
#define V8_IMMEDIATE_CRASH() ((void(*)())0)()
#endif
-
-// TODO(all) Replace all uses of this macro with static_assert, remove macro.
+// A convenience wrapper around static_assert without a string message argument.
+// Once C++17 becomes the default, this macro can be removed in favor of the
+// new static_assert(condition) overload.
#define STATIC_ASSERT(test) static_assert(test, #test)
namespace v8 {
@@ -276,6 +280,12 @@ struct Use {
(void)unused_tmp_array_for_use_macro; \
} while (false)
+// Evaluate the instantiations of an expression with parameter packs.
+// Since USE has left-to-right evaluation order of it's arguments,
+// the parameter pack is iterated from left to right and side effects
+// have defined behavior.
+#define ITERATE_PACK(...) USE(0, ((__VA_ARGS__), 0)...)
+
} // namespace base
} // namespace v8
@@ -346,47 +356,37 @@ V8_INLINE A implicit_cast(A x) {
// write V8_2PART_UINT64_C(0x12345678,90123456);
#define V8_2PART_UINT64_C(a, b) (((static_cast<uint64_t>(a) << 32) + 0x##b##u))
-
-// Compute the 0-relative offset of some absolute value x of type T.
-// This allows conversion of Addresses and integral types into
-// 0-relative int offsets.
-template <typename T>
-constexpr inline intptr_t OffsetFrom(T x) {
- return x - static_cast<T>(0);
-}
-
-
-// Compute the absolute value of type T for some 0-relative offset x.
-// This allows conversion of 0-relative int offsets into Addresses and
-// integral types.
-template <typename T>
-constexpr inline T AddressFrom(intptr_t x) {
- return static_cast<T>(static_cast<T>(0) + x);
-}
-
-
// Return the largest multiple of m which is <= x.
template <typename T>
inline T RoundDown(T x, intptr_t m) {
+ STATIC_ASSERT(std::is_integral<T>::value);
// m must be a power of two.
DCHECK(m != 0 && ((m & (m - 1)) == 0));
- return AddressFrom<T>(OffsetFrom(x) & -m);
+ return x & -m;
}
template <intptr_t m, typename T>
constexpr inline T RoundDown(T x) {
+ STATIC_ASSERT(std::is_integral<T>::value);
// m must be a power of two.
STATIC_ASSERT(m != 0 && ((m & (m - 1)) == 0));
- return AddressFrom<T>(OffsetFrom(x) & -m);
+ return x & -m;
}
// Return the smallest multiple of m which is >= x.
template <typename T>
inline T RoundUp(T x, intptr_t m) {
+ STATIC_ASSERT(std::is_integral<T>::value);
return RoundDown<T>(static_cast<T>(x + m - 1), m);
}
template <intptr_t m, typename T>
constexpr inline T RoundUp(T x) {
- return RoundDown<m, T>(static_cast<T>(x + m - 1));
+ STATIC_ASSERT(std::is_integral<T>::value);
+ return RoundDown<m, T>(static_cast<T>(x + (m - 1)));
+}
+
+template <typename T, typename U>
+inline bool IsAligned(T value, U alignment) {
+ return (value & (alignment - 1)) == 0;
}
inline void* AlignedAddress(void* address, size_t alignment) {
diff --git a/deps/v8/src/base/optional.h b/deps/v8/src/base/optional.h
index 6f5276843d..7dfef2d31f 100644
--- a/deps/v8/src/base/optional.h
+++ b/deps/v8/src/base/optional.h
@@ -123,7 +123,7 @@ class Optional {
public:
using value_type = T;
- constexpr Optional() {}
+ constexpr Optional() = default;
constexpr Optional(base::nullopt_t) {} // NOLINT(runtime/explicit)
diff --git a/deps/v8/src/base/page-allocator.cc b/deps/v8/src/base/page-allocator.cc
index 25ee2e4721..c25104739d 100644
--- a/deps/v8/src/base/page-allocator.cc
+++ b/deps/v8/src/base/page-allocator.cc
@@ -24,11 +24,9 @@ STATIC_ASSERT_ENUM(PageAllocator::kReadExecute,
#undef STATIC_ASSERT_ENUM
-size_t PageAllocator::AllocatePageSize() {
- return base::OS::AllocatePageSize();
-}
-
-size_t PageAllocator::CommitPageSize() { return base::OS::CommitPageSize(); }
+PageAllocator::PageAllocator()
+ : allocate_page_size_(base::OS::AllocatePageSize()),
+ commit_page_size_(base::OS::CommitPageSize()) {}
void PageAllocator::SetRandomMmapSeed(int64_t seed) {
base::OS::SetRandomMmapSeed(seed);
diff --git a/deps/v8/src/base/page-allocator.h b/deps/v8/src/base/page-allocator.h
index ff817cdba2..68e17db494 100644
--- a/deps/v8/src/base/page-allocator.h
+++ b/deps/v8/src/base/page-allocator.h
@@ -15,11 +15,12 @@ namespace base {
class V8_BASE_EXPORT PageAllocator
: public NON_EXPORTED_BASE(::v8::PageAllocator) {
public:
- virtual ~PageAllocator() = default;
+ PageAllocator();
+ ~PageAllocator() override = default;
- size_t AllocatePageSize() override;
+ size_t AllocatePageSize() override { return allocate_page_size_; }
- size_t CommitPageSize() override;
+ size_t CommitPageSize() override { return commit_page_size_; }
void SetRandomMmapSeed(int64_t seed) override;
@@ -34,6 +35,10 @@ class V8_BASE_EXPORT PageAllocator
bool SetPermissions(void* address, size_t size,
PageAllocator::Permission access) override;
+
+ private:
+ const size_t allocate_page_size_;
+ const size_t commit_page_size_;
};
} // namespace base
diff --git a/deps/v8/src/base/platform/OWNERS b/deps/v8/src/base/platform/OWNERS
index 5deaa67ce7..cbaed6105d 100644
--- a/deps/v8/src/base/platform/OWNERS
+++ b/deps/v8/src/base/platform/OWNERS
@@ -3,4 +3,6 @@ set noparent
hpayer@chromium.org
mlippautz@chromium.org
+per-file platform-fuchsia.cc=wez@chromium.org
+
# COMPONENT: Blink>JavaScript
diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc
index d1979fb9d8..713ee404bd 100644
--- a/deps/v8/src/base/platform/platform-fuchsia.cc
+++ b/deps/v8/src/base/platform/platform-fuchsia.cc
@@ -57,8 +57,8 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
strlen(kVirtualMemoryName));
uintptr_t reservation;
uint32_t prot = GetProtectionFromMemoryPermission(access);
- zx_status_t status = zx_vmar_map_old(zx_vmar_root_self(), 0, vmo, 0,
- request_size, prot, &reservation);
+ zx_status_t status = zx_vmar_map(zx_vmar_root_self(), prot, 0, vmo, 0,
+ request_size, &reservation);
// Either the vmo is now referenced by the vmar, or we failed and are bailing,
// so close the vmo either way.
zx_handle_close(vmo);
@@ -67,7 +67,8 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
}
uint8_t* base = reinterpret_cast<uint8_t*>(reservation);
- uint8_t* aligned_base = RoundUp(base, alignment);
+ uint8_t* aligned_base = reinterpret_cast<uint8_t*>(
+ RoundUp(reinterpret_cast<uintptr_t>(base), alignment));
// Unmap extra memory reserved before and after the desired block.
if (aligned_base != base) {
@@ -114,9 +115,8 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
DCHECK_EQ(0, size % CommitPageSize());
uint32_t prot = GetProtectionFromMemoryPermission(access);
- return zx_vmar_protect_old(zx_vmar_root_self(),
- reinterpret_cast<uintptr_t>(address), size,
- prot) == ZX_OK;
+ return zx_vmar_protect(zx_vmar_root_self(), prot,
+ reinterpret_cast<uintptr_t>(address), size) == ZX_OK;
}
// static
diff --git a/deps/v8/src/base/platform/platform-linux.cc b/deps/v8/src/base/platform/platform-linux.cc
index 725ad0c6eb..10815f29c5 100644
--- a/deps/v8/src/base/platform/platform-linux.cc
+++ b/deps/v8/src/base/platform/platform-linux.cc
@@ -27,14 +27,6 @@
#include <sys/types.h> // mmap & munmap
#include <unistd.h> // sysconf
-// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
-// Old versions of the C library <signal.h> didn't define the type.
-#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
- (defined(__arm__) || defined(__aarch64__)) && \
- !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
-#include <asm/sigcontext.h> // NOLINT
-#endif
-
#include <cmath>
#undef MAP_TYPE
diff --git a/deps/v8/src/base/platform/platform-posix-time.h b/deps/v8/src/base/platform/platform-posix-time.h
index 4d3373715b..7814296b83 100644
--- a/deps/v8/src/base/platform/platform-posix-time.h
+++ b/deps/v8/src/base/platform/platform-posix-time.h
@@ -15,7 +15,7 @@ class PosixDefaultTimezoneCache : public PosixTimezoneCache {
const char* LocalTimezone(double time_ms) override;
double LocalTimeOffset(double time_ms, bool is_utc) override;
- ~PosixDefaultTimezoneCache() override {}
+ ~PosixDefaultTimezoneCache() override = default;
};
} // namespace base
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index cb25196970..c93974bcfc 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -86,7 +86,7 @@ namespace base {
namespace {
// 0 is never a valid thread id.
-const pthread_t kNoThread = (pthread_t) 0;
+const pthread_t kNoThread = static_cast<pthread_t>(0);
bool g_hard_abort = false;
@@ -254,10 +254,6 @@ void* OS::GetRandomMmapAddr() {
// Little-endian Linux: 46 bits of virtual addressing.
raw_addr &= uint64_t{0x3FFFFFFF0000};
#endif
-#elif V8_TARGET_ARCH_MIPS64
- // We allocate code in 256 MB aligned segments because of optimizations using
- // J instruction that require that all code is within a single 256 MB segment
- raw_addr &= uint64_t{0x3FFFE0000000};
#elif V8_TARGET_ARCH_S390X
// Linux on Z uses bits 22-32 for Region Indexing, which translates to 42 bits
// of virtual addressing. Truncate to 40 bits to allow kernel chance to
@@ -267,6 +263,10 @@ void* OS::GetRandomMmapAddr() {
// 31 bits of virtual addressing. Truncate to 29 bits to allow kernel chance
// to fulfill request.
raw_addr &= 0x1FFFF000;
+#elif V8_TARGET_ARCH_MIPS64
+ // 42 bits of virtual addressing. Truncate to 40 bits to allow kernel chance
+ // to fulfill request.
+ raw_addr &= uint64_t{0xFFFFFF0000};
#else
raw_addr &= 0x3FFFF000;
@@ -313,7 +313,8 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
// Unmap memory allocated before the aligned base address.
uint8_t* base = static_cast<uint8_t*>(result);
- uint8_t* aligned_base = RoundUp(base, alignment);
+ uint8_t* aligned_base = reinterpret_cast<uint8_t*>(
+ RoundUp(reinterpret_cast<uintptr_t>(base), alignment));
if (aligned_base != base) {
DCHECK_LT(base, aligned_base);
size_t prefix_size = static_cast<size_t>(aligned_base - base);
diff --git a/deps/v8/src/base/platform/platform-posix.h b/deps/v8/src/base/platform/platform-posix.h
index 55861bc9ac..8cf5e54604 100644
--- a/deps/v8/src/base/platform/platform-posix.h
+++ b/deps/v8/src/base/platform/platform-posix.h
@@ -15,7 +15,7 @@ class PosixTimezoneCache : public TimezoneCache {
public:
double DaylightSavingsOffset(double time_ms) override;
void Clear() override {}
- ~PosixTimezoneCache() override {}
+ ~PosixTimezoneCache() override = default;
protected:
static const int msPerSecond = 1000;
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index 2e56ac5df1..11a008e6c6 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -822,7 +822,8 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
if (base == nullptr) return nullptr; // Can't allocate, we're OOM.
// If address is suitably aligned, we're done.
- uint8_t* aligned_base = RoundUp(base, alignment);
+ uint8_t* aligned_base = reinterpret_cast<uint8_t*>(
+ RoundUp(reinterpret_cast<uintptr_t>(base), alignment));
if (base == aligned_base) return reinterpret_cast<void*>(base);
// Otherwise, free it and try a larger allocation.
@@ -843,7 +844,8 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
// Try to trim the allocation by freeing the padded allocation and then
// calling VirtualAlloc at the aligned base.
CHECK(Free(base, padded_size));
- aligned_base = RoundUp(base, alignment);
+ aligned_base = reinterpret_cast<uint8_t*>(
+ RoundUp(reinterpret_cast<uintptr_t>(base), alignment));
base = reinterpret_cast<uint8_t*>(
VirtualAlloc(aligned_base, size, flags, protect));
// We might not get the reduced allocation due to a race. In that case,
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index 51b6014821..f9d01edf00 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -188,7 +188,7 @@ class V8_BASE_EXPORT OS {
class V8_BASE_EXPORT MemoryMappedFile {
public:
- virtual ~MemoryMappedFile() {}
+ virtual ~MemoryMappedFile() = default;
virtual void* memory() const = 0;
virtual size_t size() const = 0;
diff --git a/deps/v8/src/base/platform/semaphore.cc b/deps/v8/src/base/platform/semaphore.cc
index 5950664523..a7e50f5880 100644
--- a/deps/v8/src/base/platform/semaphore.cc
+++ b/deps/v8/src/base/platform/semaphore.cc
@@ -91,7 +91,9 @@ void Semaphore::Signal() {
// This check may fail with <libc-2.21, which we use on the try bots, if the
// semaphore is destroyed while sem_post is still executed. A work around is
// to extend the lifetime of the semaphore.
- CHECK_EQ(0, result);
+ if (result != 0) {
+ FATAL("Error when signaling semaphore, errno: %d", errno);
+ }
}
diff --git a/deps/v8/src/base/platform/time.h b/deps/v8/src/base/platform/time.h
index 161092ad8b..9e99166487 100644
--- a/deps/v8/src/base/platform/time.h
+++ b/deps/v8/src/base/platform/time.h
@@ -105,10 +105,7 @@ class V8_BASE_EXPORT TimeDelta final {
static TimeDelta FromTimespec(struct timespec ts);
struct timespec ToTimespec() const;
- TimeDelta& operator=(const TimeDelta& other) {
- delta_ = other.delta_;
- return *this;
- }
+ TimeDelta& operator=(const TimeDelta& other) = default;
// Computations with other deltas.
TimeDelta operator+(const TimeDelta& other) const {
diff --git a/deps/v8/src/base/region-allocator.cc b/deps/v8/src/base/region-allocator.cc
new file mode 100644
index 0000000000..46ceca1857
--- /dev/null
+++ b/deps/v8/src/base/region-allocator.cc
@@ -0,0 +1,291 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/region-allocator.h"
+#include "src/base/bits.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace base {
+
+// If |free_size| < |region_size| * |kMaxLoadFactorForRandomization| stop trying
+// to randomize region allocation.
+constexpr double kMaxLoadFactorForRandomization = 0.40;
+
+// Max number of attempts to allocate page at random address.
+constexpr int kMaxRandomizationAttempts = 3;
+
+RegionAllocator::RegionAllocator(Address memory_region_begin,
+ size_t memory_region_size, size_t page_size)
+ : whole_region_(memory_region_begin, memory_region_size, false),
+ region_size_in_pages_(size() / page_size),
+ max_load_for_randomization_(
+ static_cast<size_t>(size() * kMaxLoadFactorForRandomization)),
+ free_size_(0),
+ page_size_(page_size) {
+ CHECK_LT(begin(), end());
+ CHECK(base::bits::IsPowerOfTwo(page_size_));
+ CHECK(IsAligned(size(), page_size_));
+ CHECK(IsAligned(begin(), page_size_));
+
+ // Initial region.
+ Region* region = new Region(whole_region_);
+
+ all_regions_.insert(region);
+
+ FreeListAddRegion(region);
+}
+
+RegionAllocator::~RegionAllocator() {
+ for (Region* region : all_regions_) {
+ delete region;
+ }
+}
+
+RegionAllocator::AllRegionsSet::iterator RegionAllocator::FindRegion(
+ Address address) {
+ if (!whole_region_.contains(address)) return all_regions_.end();
+
+ Region key(address, 0, false);
+ AllRegionsSet::iterator iter = all_regions_.upper_bound(&key);
+ // Regions in |all_regions_| are compared by end() values and key's end()
+ // points exactly to the address we are querying, so the upper_bound will
+ // find the region whose |end()| is greater than the requested address.
+ DCHECK_NE(iter, all_regions_.end());
+ DCHECK((*iter)->contains(address));
+ return iter;
+}
+
+void RegionAllocator::FreeListAddRegion(Region* region) {
+ free_size_ += region->size();
+ free_regions_.insert(region);
+}
+
+RegionAllocator::Region* RegionAllocator::FreeListFindRegion(size_t size) {
+ Region key(0, size, false);
+ auto iter = free_regions_.lower_bound(&key);
+ return iter == free_regions_.end() ? nullptr : *iter;
+}
+
+void RegionAllocator::FreeListRemoveRegion(Region* region) {
+ DCHECK(!region->is_used());
+ auto iter = free_regions_.find(region);
+ DCHECK_NE(iter, free_regions_.end());
+ DCHECK_EQ(region, *iter);
+ DCHECK_LE(region->size(), free_size_);
+ free_size_ -= region->size();
+ free_regions_.erase(iter);
+}
+
+RegionAllocator::Region* RegionAllocator::Split(Region* region,
+ size_t new_size) {
+ DCHECK(IsAligned(new_size, page_size_));
+ DCHECK_NE(new_size, 0);
+ DCHECK_GT(region->size(), new_size);
+
+ // Create new region and put it to the lists after the |region|.
+ bool used = region->is_used();
+ Region* new_region =
+ new Region(region->begin() + new_size, region->size() - new_size, used);
+ if (!used) {
+ // Remove region from the free list before updating it's size.
+ FreeListRemoveRegion(region);
+ }
+ region->set_size(new_size);
+
+ all_regions_.insert(new_region);
+
+ if (!used) {
+ FreeListAddRegion(region);
+ FreeListAddRegion(new_region);
+ }
+ return new_region;
+}
+
+void RegionAllocator::Merge(AllRegionsSet::iterator prev_iter,
+ AllRegionsSet::iterator next_iter) {
+ Region* prev = *prev_iter;
+ Region* next = *next_iter;
+ DCHECK_EQ(prev->end(), next->begin());
+ prev->set_size(prev->size() + next->size());
+
+ all_regions_.erase(next_iter); // prev_iter stays valid.
+
+ // The |next| region must already not be in the free list.
+ DCHECK_EQ(free_regions_.find(next), free_regions_.end());
+ delete next;
+}
+
+RegionAllocator::Address RegionAllocator::AllocateRegion(size_t size) {
+ DCHECK_NE(size, 0);
+ DCHECK(IsAligned(size, page_size_));
+
+ Region* region = FreeListFindRegion(size);
+ if (region == nullptr) return kAllocationFailure;
+
+ if (region->size() != size) {
+ Split(region, size);
+ }
+ DCHECK(IsAligned(region->begin(), page_size_));
+ DCHECK_EQ(region->size(), size);
+
+ // Mark region as used.
+ FreeListRemoveRegion(region);
+ region->set_is_used(true);
+ return region->begin();
+}
+
+RegionAllocator::Address RegionAllocator::AllocateRegion(
+ RandomNumberGenerator* rng, size_t size) {
+ if (free_size() >= max_load_for_randomization_) {
+ // There is enough free space for trying to randomize the address.
+ size_t random = 0;
+
+ for (int i = 0; i < kMaxRandomizationAttempts; i++) {
+ rng->NextBytes(&random, sizeof(random));
+ size_t random_offset = page_size_ * (random % region_size_in_pages_);
+ Address address = begin() + random_offset;
+ if (AllocateRegionAt(address, size)) {
+ return address;
+ }
+ }
+ // Fall back to free list allocation.
+ }
+ return AllocateRegion(size);
+}
+
+bool RegionAllocator::AllocateRegionAt(Address requested_address, size_t size) {
+ DCHECK(IsAligned(requested_address, page_size_));
+ DCHECK_NE(size, 0);
+ DCHECK(IsAligned(size, page_size_));
+
+ Address requested_end = requested_address + size;
+ DCHECK_LE(requested_end, end());
+
+ Region* region;
+ {
+ AllRegionsSet::iterator region_iter = FindRegion(requested_address);
+ if (region_iter == all_regions_.end()) {
+ return false;
+ }
+ region = *region_iter;
+ }
+ if (region->is_used() || region->end() < requested_end) {
+ return false;
+ }
+ // Found free region that includes the requested one.
+ if (region->begin() != requested_address) {
+ // Split the region at the |requested_address| boundary.
+ size_t new_size = requested_address - region->begin();
+ DCHECK(IsAligned(new_size, page_size_));
+ region = Split(region, new_size);
+ }
+ if (region->end() != requested_end) {
+ // Split the region at the |requested_end| boundary.
+ Split(region, size);
+ }
+ DCHECK_EQ(region->begin(), requested_address);
+ DCHECK_EQ(region->size(), size);
+
+ // Mark region as used.
+ FreeListRemoveRegion(region);
+ region->set_is_used(true);
+ return true;
+}
+
+size_t RegionAllocator::TrimRegion(Address address, size_t new_size) {
+ DCHECK(IsAligned(new_size, page_size_));
+
+ AllRegionsSet::iterator region_iter = FindRegion(address);
+ if (region_iter == all_regions_.end()) {
+ return 0;
+ }
+ Region* region = *region_iter;
+ if (region->begin() != address || !region->is_used()) {
+ return 0;
+ }
+
+ // The region must not be in the free list.
+ DCHECK_EQ(free_regions_.find(*region_iter), free_regions_.end());
+
+ if (new_size > 0) {
+ region = Split(region, new_size);
+ ++region_iter;
+ }
+ size_t size = region->size();
+ region->set_is_used(false);
+
+ // Merge current region with the surrounding ones if they are free.
+ if (region->end() != whole_region_.end()) {
+ // There must be a range after the current one.
+ AllRegionsSet::iterator next_iter = std::next(region_iter);
+ DCHECK_NE(next_iter, all_regions_.end());
+ if (!(*next_iter)->is_used()) {
+ // |next| region object will be deleted during merge, remove it from
+ // the free list.
+ FreeListRemoveRegion(*next_iter);
+ Merge(region_iter, next_iter);
+ }
+ }
+ if (new_size == 0 && region->begin() != whole_region_.begin()) {
+ // There must be a range before the current one.
+ AllRegionsSet::iterator prev_iter = std::prev(region_iter);
+ DCHECK_NE(prev_iter, all_regions_.end());
+ if (!(*prev_iter)->is_used()) {
+ // |prev| region's size will change, we'll have to re-insert it into
+ // the proper place of the free list.
+ FreeListRemoveRegion(*prev_iter);
+ Merge(prev_iter, region_iter);
+ // |prev| region becomes the current region.
+ region_iter = prev_iter;
+ region = *region_iter;
+ }
+ }
+ FreeListAddRegion(region);
+ return size;
+}
+
+size_t RegionAllocator::CheckRegion(Address address) {
+ AllRegionsSet::iterator region_iter = FindRegion(address);
+ if (region_iter == all_regions_.end()) {
+ return 0;
+ }
+ Region* region = *region_iter;
+ if (region->begin() != address || !region->is_used()) {
+ return 0;
+ }
+ return region->size();
+}
+
+void RegionAllocator::Region::Print(std::ostream& os) const {
+ std::ios::fmtflags flags = os.flags(std::ios::hex | std::ios::showbase);
+ os << "[" << begin() << ", " << end() << "), size: " << size();
+ os << ", " << (is_used() ? "used" : "free");
+ os.flags(flags);
+}
+
+void RegionAllocator::Print(std::ostream& os) const {
+ std::ios::fmtflags flags = os.flags(std::ios::hex | std::ios::showbase);
+ os << "RegionAllocator: [" << begin() << ", " << end() << ")";
+ os << "\nsize: " << size();
+ os << "\nfree_size: " << free_size();
+ os << "\npage_size: " << page_size_;
+
+ os << "\nall regions: ";
+ for (const Region* region : all_regions_) {
+ os << "\n ";
+ region->Print(os);
+ }
+
+ os << "\nfree regions: ";
+ for (const Region* region : free_regions_) {
+ os << "\n ";
+ region->Print(os);
+ }
+ os << "\n";
+ os.flags(flags);
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/region-allocator.h b/deps/v8/src/base/region-allocator.h
new file mode 100644
index 0000000000..fb51472fa9
--- /dev/null
+++ b/deps/v8/src/base/region-allocator.h
@@ -0,0 +1,164 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_REGION_ALLOCATOR_H_
+#define V8_BASE_REGION_ALLOCATOR_H_
+
+#include <set>
+
+#include "src/base/address-region.h"
+#include "src/base/utils/random-number-generator.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
+
+namespace v8 {
+namespace base {
+
+// Helper class for managing used/free regions within [address, address+size)
+// region. Minimum allocation unit is |page_size|. Requested allocation size
+// is rounded up to |page_size|.
+// The region allocation algorithm implements best-fit with coalescing strategy:
+// it tries to find a smallest suitable free region upon allocation and tries
+// to merge region with its neighbors upon freeing.
+//
+// This class does not perform any actual region reservation.
+// Not thread-safe.
+class V8_BASE_EXPORT RegionAllocator final {
+ public:
+ typedef uintptr_t Address;
+
+ static constexpr Address kAllocationFailure = static_cast<Address>(-1);
+
+ RegionAllocator(Address address, size_t size, size_t page_size);
+ ~RegionAllocator();
+
+ // Allocates region of |size| (must be |page_size|-aligned). Returns
+ // the address of the region on success or kAllocationFailure.
+ Address AllocateRegion(size_t size);
+ // Same as above but tries to randomize the region displacement.
+ Address AllocateRegion(RandomNumberGenerator* rng, size_t size);
+
+ // Allocates region of |size| at |requested_address| if it's free. Both the
+ // address and the size must be |page_size|-aligned. On success returns
+ // true.
+ // This kind of allocation is supposed to be used during setup phase to mark
+ // certain regions as used or for randomizing regions displacement.
+ bool AllocateRegionAt(Address requested_address, size_t size);
+
+ // Frees region at given |address|, returns the size of the region.
+ // There must be a used region starting at given address otherwise nothing
+ // will be freed and 0 will be returned.
+ size_t FreeRegion(Address address) { return TrimRegion(address, 0); }
+
+ // Decreases size of the previously allocated region at |address|, returns
+ // freed size. |new_size| must be |page_size|-aligned and
+ // less than or equal to current region's size. Setting new size to zero
+ // frees the region.
+ size_t TrimRegion(Address address, size_t new_size);
+
+ // If there is a used region starting at given address returns its size
+ // otherwise 0.
+ size_t CheckRegion(Address address);
+
+ Address begin() const { return whole_region_.begin(); }
+ Address end() const { return whole_region_.end(); }
+ size_t size() const { return whole_region_.size(); }
+
+ bool contains(Address address) const {
+ return whole_region_.contains(address);
+ }
+
+ bool contains(Address address, size_t size) const {
+ return whole_region_.contains(address, size);
+ }
+
+ // Total size of not yet aquired regions.
+ size_t free_size() const { return free_size_; }
+
+ // The alignment of the allocated region's addresses and granularity of
+ // the allocated region's sizes.
+ size_t page_size() const { return page_size_; }
+
+ void Print(std::ostream& os) const;
+
+ private:
+ class Region : public AddressRegion {
+ public:
+ Region(Address address, size_t size, bool is_used)
+ : AddressRegion(address, size), is_used_(is_used) {}
+
+ bool is_used() const { return is_used_; }
+ void set_is_used(bool used) { is_used_ = used; }
+
+ void Print(std::ostream& os) const;
+
+ private:
+ bool is_used_;
+ };
+
+ // The whole region.
+ const Region whole_region_;
+
+ // Number of |page_size_| in the whole region.
+ const size_t region_size_in_pages_;
+
+ // If the free size is less than this value - stop trying to randomize the
+ // allocation addresses.
+ const size_t max_load_for_randomization_;
+
+ // Size of all free regions.
+ size_t free_size_;
+
+ // Minimum region size. Must be a pow of 2.
+ const size_t page_size_;
+
+ struct AddressEndOrder {
+ bool operator()(const Region* a, const Region* b) const {
+ return a->end() < b->end();
+ }
+ };
+ // All regions ordered by addresses.
+ typedef std::set<Region*, AddressEndOrder> AllRegionsSet;
+ AllRegionsSet all_regions_;
+
+ struct SizeAddressOrder {
+ bool operator()(const Region* a, const Region* b) const {
+ if (a->size() != b->size()) return a->size() < b->size();
+ return a->begin() < b->begin();
+ }
+ };
+ // Free regions ordered by sizes and addresses.
+ std::set<Region*, SizeAddressOrder> free_regions_;
+
+ // Returns region containing given address or nullptr.
+ AllRegionsSet::iterator FindRegion(Address address);
+
+ // Adds given region to the set of free regions.
+ void FreeListAddRegion(Region* region);
+
+ // Finds best-fit free region for given size.
+ Region* FreeListFindRegion(size_t size);
+
+ // Removes given region from the set of free regions.
+ void FreeListRemoveRegion(Region* region);
+
+ // Splits given |region| into two: one of |new_size| size and a new one
+ // having the rest. The new region is returned.
+ Region* Split(Region* region, size_t new_size);
+
+ // For two coalescing regions merges |next| to |prev| and deletes |next|.
+ void Merge(AllRegionsSet::iterator prev_iter,
+ AllRegionsSet::iterator next_iter);
+
+ FRIEND_TEST(RegionAllocatorTest, AllocateRegionRandom);
+ FRIEND_TEST(RegionAllocatorTest, Fragmentation);
+ FRIEND_TEST(RegionAllocatorTest, FindRegion);
+ FRIEND_TEST(RegionAllocatorTest, Contains);
+
+ DISALLOW_COPY_AND_ASSIGN(RegionAllocator);
+};
+
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_REGION_ALLOCATOR_H_
diff --git a/deps/v8/src/base/safe_math.h b/deps/v8/src/base/safe_math.h
index 62a2f723f2..700bc3387f 100644
--- a/deps/v8/src/base/safe_math.h
+++ b/deps/v8/src/base/safe_math.h
@@ -49,7 +49,7 @@ class CheckedNumeric {
public:
typedef T type;
- CheckedNumeric() {}
+ CheckedNumeric() = default;
// Copy constructor.
template <typename Src>
diff --git a/deps/v8/src/base/threaded-list.h b/deps/v8/src/base/threaded-list.h
new file mode 100644
index 0000000000..d54bcb8f70
--- /dev/null
+++ b/deps/v8/src/base/threaded-list.h
@@ -0,0 +1,267 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_THREADED_LIST_H_
+#define V8_BASE_THREADED_LIST_H_
+
+#include <iterator>
+
+#include "src/base/compiler-specific.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace base {
+
+template <typename T>
+struct ThreadedListTraits {
+ static T** next(T* t) { return t->next(); }
+};
+
+// Represents a linked list that threads through the nodes in the linked list.
+// Entries in the list are pointers to nodes. By default nodes need to have a
+// T** next() method that returns the location where the next value is stored.
+// The default can be overwritten by providing a ThreadedTraits class.
+template <typename T, typename BaseClass,
+ typename TLTraits = ThreadedListTraits<T>>
+class ThreadedListBase final : public BaseClass {
+ public:
+ ThreadedListBase() : head_(nullptr), tail_(&head_) {}
+ void Add(T* v) {
+ DCHECK_NULL(*tail_);
+ DCHECK_NULL(*TLTraits::next(v));
+ *tail_ = v;
+ tail_ = TLTraits::next(v);
+ }
+
+ void AddFront(T* v) {
+ DCHECK_NULL(*TLTraits::next(v));
+ DCHECK_NOT_NULL(v);
+ T** const next = TLTraits::next(v);
+
+ *next = head_;
+ if (head_ == nullptr) tail_ = next;
+ head_ = v;
+ }
+
+ // Reinitializing the head to a new node, this costs O(n).
+ void ReinitializeHead(T* v) {
+ head_ = v;
+ T* current = v;
+ if (current != nullptr) { // Find tail
+ T* tmp;
+ while ((tmp = *TLTraits::next(current))) {
+ current = tmp;
+ }
+ tail_ = TLTraits::next(current);
+ } else {
+ tail_ = &head_;
+ }
+ }
+
+ void DropHead() {
+ DCHECK_NOT_NULL(head_);
+
+ T* old_head = head_;
+ head_ = *TLTraits::next(head_);
+ if (head_ == nullptr) tail_ = &head_;
+ *TLTraits::next(old_head) = nullptr;
+ }
+
+ void Append(ThreadedListBase&& list) {
+ *tail_ = list.head_;
+ tail_ = list.tail_;
+ list.Clear();
+ }
+
+ void Prepend(ThreadedListBase&& list) {
+ if (list.head_ == nullptr) return;
+
+ T* new_head = list.head_;
+ *list.tail_ = head_;
+ if (head_ == nullptr) {
+ tail_ = list.tail_;
+ }
+ head_ = new_head;
+ list.Clear();
+ }
+
+ void Clear() {
+ head_ = nullptr;
+ tail_ = &head_;
+ }
+
+ ThreadedListBase& operator=(ThreadedListBase&& other) V8_NOEXCEPT {
+ head_ = other.head_;
+ tail_ = other.head_ ? other.tail_ : &head_;
+#ifdef DEBUG
+ other.Clear();
+#endif
+ return *this;
+ }
+
+ ThreadedListBase(ThreadedListBase&& other) V8_NOEXCEPT
+ : head_(other.head_),
+ tail_(other.head_ ? other.tail_ : &head_) {
+#ifdef DEBUG
+ other.Clear();
+#endif
+ }
+
+ bool Remove(T* v) {
+ T* current = first();
+ if (current == v) {
+ DropHead();
+ return true;
+ }
+
+ while (current != nullptr) {
+ T* next = *TLTraits::next(current);
+ if (next == v) {
+ *TLTraits::next(current) = *TLTraits::next(next);
+ *TLTraits::next(next) = nullptr;
+
+ if (TLTraits::next(next) == tail_) {
+ tail_ = TLTraits::next(current);
+ }
+ return true;
+ }
+ current = next;
+ }
+ return false;
+ }
+
+ class Iterator final {
+ public:
+ using iterator_category = std::forward_iterator_tag;
+ using difference_type = std::ptrdiff_t;
+ using value_type = T*;
+ using reference = value_type;
+ using pointer = value_type*;
+
+ public:
+ Iterator& operator++() {
+ entry_ = TLTraits::next(*entry_);
+ return *this;
+ }
+ bool operator==(const Iterator& other) const {
+ return entry_ == other.entry_;
+ }
+ bool operator!=(const Iterator& other) const {
+ return entry_ != other.entry_;
+ }
+ T* operator*() { return *entry_; }
+ T* operator->() { return *entry_; }
+ Iterator& operator=(T* entry) {
+ T* next = *TLTraits::next(*entry_);
+ *TLTraits::next(entry) = next;
+ *entry_ = entry;
+ return *this;
+ }
+
+ private:
+ explicit Iterator(T** entry) : entry_(entry) {}
+
+ T** entry_;
+
+ friend class ThreadedListBase;
+ };
+
+ class ConstIterator final {
+ public:
+ using iterator_category = std::forward_iterator_tag;
+ using difference_type = std::ptrdiff_t;
+ using value_type = T*;
+ using reference = const value_type;
+ using pointer = const value_type*;
+
+ public:
+ ConstIterator& operator++() {
+ entry_ = TLTraits::next(*entry_);
+ return *this;
+ }
+ bool operator==(const ConstIterator& other) const {
+ return entry_ == other.entry_;
+ }
+ bool operator!=(const ConstIterator& other) const {
+ return entry_ != other.entry_;
+ }
+ const T* operator*() const { return *entry_; }
+
+ private:
+ explicit ConstIterator(T* const* entry) : entry_(entry) {}
+
+ T* const* entry_;
+
+ friend class ThreadedListBase;
+ };
+
+ Iterator begin() { return Iterator(&head_); }
+ Iterator end() { return Iterator(tail_); }
+
+ ConstIterator begin() const { return ConstIterator(&head_); }
+ ConstIterator end() const { return ConstIterator(tail_); }
+
+ // Rewinds the list's tail to the reset point, i.e., cutting of the rest of
+ // the list, including the reset_point.
+ void Rewind(Iterator reset_point) {
+ tail_ = reset_point.entry_;
+ *tail_ = nullptr;
+ }
+
+ // Moves the tail of the from_list, starting at the from_location, to the end
+ // of this list.
+ void MoveTail(ThreadedListBase* from_list, Iterator from_location) {
+ if (from_list->end() != from_location) {
+ DCHECK_NULL(*tail_);
+ *tail_ = *from_location;
+ tail_ = from_list->tail_;
+ from_list->Rewind(from_location);
+ }
+ }
+
+ bool is_empty() const { return head_ == nullptr; }
+
+ T* first() const { return head_; }
+
+ // Slow. For testing purposes.
+ int LengthForTest() {
+ int result = 0;
+ for (Iterator t = begin(); t != end(); ++t) ++result;
+ return result;
+ }
+
+ T* AtForTest(int i) {
+ Iterator t = begin();
+ while (i-- > 0) ++t;
+ return *t;
+ }
+
+ bool Verify() {
+ T* last = this->first();
+ if (last == nullptr) {
+ CHECK_EQ(&head_, tail_);
+ } else {
+ while (*TLTraits::next(last) != nullptr) {
+ last = *TLTraits::next(last);
+ }
+ CHECK_EQ(TLTraits::next(last), tail_);
+ }
+ return true;
+ }
+
+ private:
+ T* head_;
+ T** tail_;
+ DISALLOW_COPY_AND_ASSIGN(ThreadedListBase);
+};
+
+struct EmptyBase {};
+
+template <typename T, typename TLTraits = ThreadedListTraits<T>>
+using ThreadedList = ThreadedListBase<T, EmptyBase, TLTraits>;
+
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_THREADED_LIST_H_
diff --git a/deps/v8/src/base/timezone-cache.h b/deps/v8/src/base/timezone-cache.h
index 96ad7bb41f..3d97eee126 100644
--- a/deps/v8/src/base/timezone-cache.h
+++ b/deps/v8/src/base/timezone-cache.h
@@ -27,7 +27,7 @@ class TimezoneCache {
virtual void Clear() = 0;
// Called when tearing down the isolate
- virtual ~TimezoneCache() {}
+ virtual ~TimezoneCache() = default;
};
} // namespace base
diff --git a/deps/v8/src/base/utils/random-number-generator.cc b/deps/v8/src/base/utils/random-number-generator.cc
index afe5a1f098..a3313f4e88 100644
--- a/deps/v8/src/base/utils/random-number-generator.cc
+++ b/deps/v8/src/base/utils/random-number-generator.cc
@@ -99,7 +99,7 @@ int RandomNumberGenerator::NextInt(int max) {
double RandomNumberGenerator::NextDouble() {
XorShift128(&state0_, &state1_);
- return ToDouble(state0_, state1_);
+ return ToDouble(state0_);
}
diff --git a/deps/v8/src/base/utils/random-number-generator.h b/deps/v8/src/base/utils/random-number-generator.h
index b4b67970c7..45ec259305 100644
--- a/deps/v8/src/base/utils/random-number-generator.h
+++ b/deps/v8/src/base/utils/random-number-generator.h
@@ -108,11 +108,10 @@ class V8_BASE_EXPORT RandomNumberGenerator final {
int64_t initial_seed() const { return initial_seed_; }
// Static and exposed for external use.
- static inline double ToDouble(uint64_t state0, uint64_t state1) {
+ static inline double ToDouble(uint64_t state0) {
// Exponent for double values for [1.0 .. 2.0)
static const uint64_t kExponentBits = uint64_t{0x3FF0000000000000};
- static const uint64_t kMantissaMask = uint64_t{0x000FFFFFFFFFFFFF};
- uint64_t random = ((state0 + state1) & kMantissaMask) | kExponentBits;
+ uint64_t random = (state0 >> 12) | kExponentBits;
return bit_cast<double>(random) - 1;
}
@@ -128,6 +127,8 @@ class V8_BASE_EXPORT RandomNumberGenerator final {
*state1 = s1;
}
+ static uint64_t MurmurHash3(uint64_t);
+
private:
static const int64_t kMultiplier = V8_2PART_UINT64_C(0x5, deece66d);
static const int64_t kAddend = 0xb;
@@ -135,8 +136,6 @@ class V8_BASE_EXPORT RandomNumberGenerator final {
int Next(int bits) V8_WARN_UNUSED_RESULT;
- static uint64_t MurmurHash3(uint64_t);
-
int64_t initial_seed_;
uint64_t state0_;
uint64_t state1_;