aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/base
diff options
context:
space:
mode:
authorMichaƫl Zasso <targos@protonmail.com>2018-01-24 20:16:06 +0100
committerMyles Borins <mylesborins@google.com>2018-01-24 15:02:20 -0800
commit4c4af643e5042d615a60c6bbc05aee9d81b903e5 (patch)
tree3fb0a97988fe4439ae3ae06f26915d1dcf8cab92 /deps/v8/src/base
parentfa9f31a4fda5a3782c652e56e394465805ebb50f (diff)
downloadandroid-node-v8-4c4af643e5042d615a60c6bbc05aee9d81b903e5.tar.gz
android-node-v8-4c4af643e5042d615a60c6bbc05aee9d81b903e5.tar.bz2
android-node-v8-4c4af643e5042d615a60c6bbc05aee9d81b903e5.zip
deps: update V8 to 6.4.388.40
PR-URL: https://github.com/nodejs/node/pull/17489 Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Myles Borins <myles.borins@gmail.com> Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com>
Diffstat (limited to 'deps/v8/src/base')
-rw-r--r--deps/v8/src/base/bits.cc4
-rw-r--r--deps/v8/src/base/bits.h187
-rw-r--r--deps/v8/src/base/cpu.cc84
-rw-r--r--deps/v8/src/base/debug/stack_trace.cc2
-rw-r--r--deps/v8/src/base/debug/stack_trace_android.cc2
-rw-r--r--deps/v8/src/base/debug/stack_trace_posix.cc28
-rw-r--r--deps/v8/src/base/debug/stack_trace_win.cc16
-rw-r--r--deps/v8/src/base/division-by-constant.cc2
-rw-r--r--deps/v8/src/base/logging.h21
-rw-r--r--deps/v8/src/base/once.h3
-rw-r--r--deps/v8/src/base/platform/OWNERS6
-rw-r--r--deps/v8/src/base/platform/condition-variable.cc2
-rw-r--r--deps/v8/src/base/platform/elapsed-timer.h4
-rw-r--r--deps/v8/src/base/platform/mutex.cc2
-rw-r--r--deps/v8/src/base/platform/platform-aix.cc106
-rw-r--r--deps/v8/src/base/platform/platform-cygwin.cc180
-rw-r--r--deps/v8/src/base/platform/platform-freebsd.cc104
-rw-r--r--deps/v8/src/base/platform/platform-fuchsia.cc104
-rw-r--r--deps/v8/src/base/platform/platform-linux.cc116
-rw-r--r--deps/v8/src/base/platform/platform-macos.cc113
-rw-r--r--deps/v8/src/base/platform/platform-openbsd.cc111
-rw-r--r--deps/v8/src/base/platform/platform-posix-time.cc2
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc350
-rw-r--r--deps/v8/src/base/platform/platform-posix.h2
-rw-r--r--deps/v8/src/base/platform/platform-qnx.cc105
-rw-r--r--deps/v8/src/base/platform/platform-solaris.cc104
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc366
-rw-r--r--deps/v8/src/base/platform/platform.h92
-rw-r--r--deps/v8/src/base/platform/semaphore.cc8
-rw-r--r--deps/v8/src/base/platform/time.cc18
-rw-r--r--deps/v8/src/base/platform/time.h2
-rw-r--r--deps/v8/src/base/sys-info.cc8
-rw-r--r--deps/v8/src/base/template-utils.h43
-rw-r--r--deps/v8/src/base/utils/random-number-generator.cc87
-rw-r--r--deps/v8/src/base/utils/random-number-generator.h20
35 files changed, 976 insertions, 1428 deletions
diff --git a/deps/v8/src/base/bits.cc b/deps/v8/src/base/bits.cc
index 049dc4a1b1..fedbdb2d2d 100644
--- a/deps/v8/src/base/bits.cc
+++ b/deps/v8/src/base/bits.cc
@@ -18,7 +18,7 @@ uint32_t RoundUpToPowerOfTwo32(uint32_t value) {
if (value) --value;
// Use computation based on leading zeros if we have compiler support for that.
#if V8_HAS_BUILTIN_CLZ || V8_CC_MSVC
- return 1u << (32 - CountLeadingZeros32(value));
+ return 1u << (32 - CountLeadingZeros(value));
#else
value |= value >> 1;
value |= value >> 2;
@@ -34,7 +34,7 @@ uint64_t RoundUpToPowerOfTwo64(uint64_t value) {
if (value) --value;
// Use computation based on leading zeros if we have compiler support for that.
#if V8_HAS_BUILTIN_CLZ
- return uint64_t{1} << (64 - CountLeadingZeros64(value));
+ return uint64_t{1} << (64 - CountLeadingZeros(value));
#else
value |= value >> 1;
value |= value >> 2;
diff --git a/deps/v8/src/base/bits.h b/deps/v8/src/base/bits.h
index 504be0370a..731a7181d7 100644
--- a/deps/v8/src/base/bits.h
+++ b/deps/v8/src/base/bits.h
@@ -27,98 +27,32 @@ class CheckedNumeric;
namespace bits {
-// Define overloaded |Name| for |Name32| and |Name64|, depending on the size of
-// the given value.
-//
-// The overloads are only defined for input types of size 4 and 8, respectively,
-// using enable_if and SFINAE to disable them otherwise. enable_if<bool,
-// typename> only has a "type" member if the first parameter is true, in which
-// case "type" is a typedef to the second member (here, set to "unsigned").
-// Otherwise, enable_if::type doesn't exist, making the function signature
-// invalid, and so the entire function is thrown away (without an error) due to
-// SFINAE.
-//
-// Not that we cannot simply check sizeof(T) using an if statement, as we need
-// both branches of the if to be syntactically valid even if one of the branches
-// is dead.
-#define DEFINE_32_64_OVERLOADS(Name) \
- template <typename T> \
- inline typename std::enable_if<sizeof(T) == 4, unsigned>::type Name( \
- T value) { \
- return Name##32(value); \
- } \
- \
- template <typename T> \
- inline typename std::enable_if<sizeof(T) == 8, unsigned>::type Name( \
- T value) { \
- return Name##64(value); \
- }
-
-// CountPopulation32(value) returns the number of bits set in |value|.
-inline unsigned CountPopulation32(uint32_t value) {
+// CountPopulation(value) returns the number of bits set in |value|.
+template <typename T>
+constexpr inline
+ typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 8,
+ unsigned>::type
+ CountPopulation(T value) {
#if V8_HAS_BUILTIN_POPCOUNT
- return __builtin_popcount(value);
+ return sizeof(T) == 8 ? __builtin_popcountll(static_cast<uint64_t>(value))
+ : __builtin_popcount(static_cast<uint32_t>(value));
#else
- value = ((value >> 1) & 0x55555555) + (value & 0x55555555);
- value = ((value >> 2) & 0x33333333) + (value & 0x33333333);
- value = ((value >> 4) & 0x0f0f0f0f) + (value & 0x0f0f0f0f);
- value = ((value >> 8) & 0x00ff00ff) + (value & 0x00ff00ff);
- value = ((value >> 16) & 0x0000ffff) + (value & 0x0000ffff);
+ constexpr uint64_t mask[] = {0x5555555555555555, 0x3333333333333333,
+ 0x0f0f0f0f0f0f0f0f, 0x00ff00ff00ff00ff,
+ 0x0000ffff0000ffff, 0x00000000ffffffff};
+ value = ((value >> 1) & mask[0]) + (value & mask[0]);
+ value = ((value >> 2) & mask[1]) + (value & mask[1]);
+ value = ((value >> 4) & mask[2]) + (value & mask[2]);
+ if (sizeof(T) > 1)
+ value = ((value >> (sizeof(T) > 1 ? 8 : 0)) & mask[3]) + (value & mask[3]);
+ if (sizeof(T) > 2)
+ value = ((value >> (sizeof(T) > 2 ? 16 : 0)) & mask[4]) + (value & mask[4]);
+ if (sizeof(T) > 4)
+ value = ((value >> (sizeof(T) > 4 ? 32 : 0)) & mask[5]) + (value & mask[5]);
return static_cast<unsigned>(value);
#endif
}
-
-// CountPopulation64(value) returns the number of bits set in |value|.
-inline unsigned CountPopulation64(uint64_t value) {
-#if V8_HAS_BUILTIN_POPCOUNT
- return __builtin_popcountll(value);
-#else
- return CountPopulation32(static_cast<uint32_t>(value)) +
- CountPopulation32(static_cast<uint32_t>(value >> 32));
-#endif
-}
-
-DEFINE_32_64_OVERLOADS(CountPopulation)
-
-// CountLeadingZeros32(value) returns the number of zero bits following the most
-// significant 1 bit in |value| if |value| is non-zero, otherwise it returns 32.
-inline unsigned CountLeadingZeros32(uint32_t value) {
-#if V8_HAS_BUILTIN_CLZ
- return value ? __builtin_clz(value) : 32;
-#elif V8_CC_MSVC
- unsigned long result; // NOLINT(runtime/int)
- if (!_BitScanReverse(&result, value)) return 32;
- return static_cast<unsigned>(31 - result);
-#else
- value = value | (value >> 1);
- value = value | (value >> 2);
- value = value | (value >> 4);
- value = value | (value >> 8);
- value = value | (value >> 16);
- return CountPopulation32(~value);
-#endif
-}
-
-
-// CountLeadingZeros64(value) returns the number of zero bits following the most
-// significant 1 bit in |value| if |value| is non-zero, otherwise it returns 64.
-inline unsigned CountLeadingZeros64(uint64_t value) {
-#if V8_HAS_BUILTIN_CLZ
- return value ? __builtin_clzll(value) : 64;
-#else
- value = value | (value >> 1);
- value = value | (value >> 2);
- value = value | (value >> 4);
- value = value | (value >> 8);
- value = value | (value >> 16);
- value = value | (value >> 32);
- return CountPopulation64(~value);
-#endif
-}
-
-DEFINE_32_64_OVERLOADS(CountLeadingZeros)
-
// ReverseBits(value) returns |value| in reverse bit order.
template <typename T>
T ReverseBits(T value) {
@@ -132,46 +66,73 @@ T ReverseBits(T value) {
return result;
}
-// CountTrailingZeros32(value) returns the number of zero bits preceding the
-// least significant 1 bit in |value| if |value| is non-zero, otherwise it
-// returns 32.
-inline unsigned CountTrailingZeros32(uint32_t value) {
-#if V8_HAS_BUILTIN_CTZ
- return value ? __builtin_ctz(value) : 32;
-#elif V8_CC_MSVC
- unsigned long result; // NOLINT(runtime/int)
- if (!_BitScanForward(&result, value)) return 32;
- return static_cast<unsigned>(result);
+// CountLeadingZeros(value) returns the number of zero bits following the most
+// significant 1 bit in |value| if |value| is non-zero, otherwise it returns
+// {sizeof(T) * 8}.
+template <typename T, unsigned bits = sizeof(T) * 8>
+inline constexpr
+ typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 8,
+ unsigned>::type
+ CountLeadingZeros(T value) {
+ static_assert(bits > 0, "invalid instantiation");
+#if V8_HAS_BUILTIN_CLZ
+ return value == 0
+ ? bits
+ : bits == 64
+ ? __builtin_clzll(static_cast<uint64_t>(value))
+ : __builtin_clz(static_cast<uint32_t>(value)) - (32 - bits);
#else
- if (value == 0) return 32;
- unsigned count = 0;
- for (value ^= value - 1; value >>= 1; ++count) {
- }
- return count;
+ // Binary search algorithm taken from "Hacker's Delight" (by Henry S. Warren,
+ // Jr.), figures 5-11 and 5-12.
+ if (bits == 1) return static_cast<unsigned>(value) ^ 1;
+ T upper_half = value >> (bits / 2);
+ T next_value = upper_half != 0 ? upper_half : value;
+ unsigned add = upper_half != 0 ? 0 : bits / 2;
+ constexpr unsigned next_bits = bits == 1 ? 1 : bits / 2;
+ return CountLeadingZeros<T, next_bits>(next_value) + add;
#endif
}
+inline constexpr unsigned CountLeadingZeros32(uint32_t value) {
+ return CountLeadingZeros(value);
+}
+inline constexpr unsigned CountLeadingZeros64(uint64_t value) {
+ return CountLeadingZeros(value);
+}
-// CountTrailingZeros64(value) returns the number of zero bits preceding the
+// CountTrailingZeros(value) returns the number of zero bits preceding the
// least significant 1 bit in |value| if |value| is non-zero, otherwise it
-// returns 64.
-inline unsigned CountTrailingZeros64(uint64_t value) {
+// returns {sizeof(T) * 8}.
+template <typename T, unsigned bits = sizeof(T) * 8>
+inline constexpr
+ typename std::enable_if<std::is_integral<T>::value && sizeof(T) <= 8,
+ unsigned>::type
+ CountTrailingZeros(T value) {
#if V8_HAS_BUILTIN_CTZ
- return value ? __builtin_ctzll(value) : 64;
+ return value == 0 ? bits
+ : bits == 64 ? __builtin_ctzll(static_cast<uint64_t>(value))
+ : __builtin_ctz(static_cast<uint32_t>(value));
#else
- if (value == 0) return 64;
- unsigned count = 0;
- for (value ^= value - 1; value >>= 1; ++count) {
- }
- return count;
+ // Fall back to popcount (see "Hacker's Delight" by Henry S. Warren, Jr.),
+ // chapter 5-4. On x64, since is faster than counting in a loop and faster
+ // than doing binary search.
+ using U = typename std::make_unsigned<T>::type;
+ U u = value;
+ return CountPopulation(static_cast<U>(~u & (u - 1u)));
#endif
}
-DEFINE_32_64_OVERLOADS(CountTrailingZeros)
+inline constexpr unsigned CountTrailingZeros32(uint32_t value) {
+ return CountTrailingZeros(value);
+}
+inline constexpr unsigned CountTrailingZeros64(uint64_t value) {
+ return CountTrailingZeros(value);
+}
// Returns true iff |value| is a power of 2.
template <typename T,
- typename = typename std::enable_if<std::is_integral<T>::value>::type>
+ typename = typename std::enable_if<std::is_integral<T>::value ||
+ std::is_enum<T>::value>::type>
constexpr inline bool IsPowerOfTwo(T value) {
return value > 0 && (value & (value - 1)) == 0;
}
@@ -338,8 +299,6 @@ V8_BASE_EXPORT int64_t SignedSaturatedAdd64(int64_t lhs, int64_t rhs);
// checks and returns the result.
V8_BASE_EXPORT int64_t SignedSaturatedSub64(int64_t lhs, int64_t rhs);
-#undef DEFINE_32_64_OVERLOADS
-
} // namespace bits
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/cpu.cc b/deps/v8/src/base/cpu.cc
index abbb824b6d..f449612e6a 100644
--- a/deps/v8/src/base/cpu.cc
+++ b/deps/v8/src/base/cpu.cc
@@ -75,8 +75,7 @@ static V8_INLINE void __cpuid(int cpu_info[4], int info_type) {
#endif // !V8_LIBC_MSVCRT
-#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 \
- || V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
+#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
#if V8_OS_LINUX
@@ -116,7 +115,7 @@ static uint32_t ReadELFHWCaps() {
#else
// Read the ELF HWCAP flags by parsing /proc/self/auxv.
FILE* fp = fopen("/proc/self/auxv", "r");
- if (fp != NULL) {
+ if (fp != nullptr) {
struct { uint32_t tag; uint32_t value; } entry;
for (;;) {
size_t n = fread(&entry, sizeof(entry), 1, fp);
@@ -176,7 +175,7 @@ int __detect_mips_arch_revision(void) {
// Fall-back to the least common denominator which is mips32 revision 1.
return result ? 1 : 6;
}
-#endif
+#endif // V8_HOST_ARCH_MIPS
// Extract the information exposed by the kernel via /proc/cpuinfo.
class CPUInfo final {
@@ -187,7 +186,7 @@ class CPUInfo final {
// when using fseek(0, SEEK_END) + ftell(). Nor can the be mmap()-ed.
static const char PATHNAME[] = "/proc/cpuinfo";
FILE* fp = fopen(PATHNAME, "r");
- if (fp != NULL) {
+ if (fp != nullptr) {
for (;;) {
char buffer[256];
size_t n = fread(buffer, 1, sizeof(buffer), fp);
@@ -202,7 +201,7 @@ class CPUInfo final {
// Read the contents of the cpuinfo file.
data_ = new char[datalen_ + 1];
fp = fopen(PATHNAME, "r");
- if (fp != NULL) {
+ if (fp != nullptr) {
for (size_t offset = 0; offset < datalen_; ) {
size_t n = fread(data_ + offset, 1, datalen_ - offset, fp);
if (n == 0) {
@@ -224,17 +223,17 @@ class CPUInfo final {
// Extract the content of a the first occurrence of a given field in
// the content of the cpuinfo file and return it as a heap-allocated
// string that must be freed by the caller using delete[].
- // Return NULL if not found.
+ // Return nullptr if not found.
char* ExtractField(const char* field) const {
- DCHECK(field != NULL);
+ DCHECK_NOT_NULL(field);
// Look for first field occurrence, and ensure it starts the line.
size_t fieldlen = strlen(field);
char* p = data_;
for (;;) {
p = strstr(p, field);
- if (p == NULL) {
- return NULL;
+ if (p == nullptr) {
+ return nullptr;
}
if (p == data_ || p[-1] == '\n') {
break;
@@ -244,21 +243,21 @@ class CPUInfo final {
// Skip to the first colon followed by a space.
p = strchr(p + fieldlen, ':');
- if (p == NULL || !isspace(p[1])) {
- return NULL;
+ if (p == nullptr || !isspace(p[1])) {
+ return nullptr;
}
p += 2;
// Find the end of the line.
char* q = strchr(p, '\n');
- if (q == NULL) {
+ if (q == nullptr) {
q = data_ + datalen_;
}
// Copy the line into a heap-allocated buffer.
size_t len = q - p;
char* result = new char[len + 1];
- if (result != NULL) {
+ if (result != nullptr) {
memcpy(result, p, len);
result[len] = '\0';
}
@@ -270,13 +269,11 @@ class CPUInfo final {
size_t datalen_;
};
-#if V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
-
// Checks that a space-separated list of items contains one given 'item'.
static bool HasListItem(const char* list, const char* item) {
ssize_t item_len = strlen(item);
const char* p = list;
- if (p != NULL) {
+ if (p != nullptr) {
while (*p != '\0') {
// Skip whitespace.
while (isspace(*p)) ++p;
@@ -296,11 +293,9 @@ static bool HasListItem(const char* list, const char* item) {
return false;
}
-#endif // V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
-
#endif // V8_OS_LINUX
-#endif // V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+#endif // V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
CPU::CPU()
: stepping_(0),
@@ -432,7 +427,7 @@ CPU::CPU()
// Extract implementor from the "CPU implementer" field.
char* implementer = cpu_info.ExtractField("CPU implementer");
- if (implementer != NULL) {
+ if (implementer != nullptr) {
char* end;
implementer_ = strtol(implementer, &end, 0);
if (end == implementer) {
@@ -442,7 +437,7 @@ CPU::CPU()
}
char* variant = cpu_info.ExtractField("CPU variant");
- if (variant != NULL) {
+ if (variant != nullptr) {
char* end;
variant_ = strtol(variant, &end, 0);
if (end == variant) {
@@ -453,7 +448,7 @@ CPU::CPU()
// Extract part number from the "CPU part" field.
char* part = cpu_info.ExtractField("CPU part");
- if (part != NULL) {
+ if (part != nullptr) {
char* end;
part_ = strtol(part, &end, 0);
if (end == part) {
@@ -469,7 +464,7 @@ CPU::CPU()
// $KERNEL/arch/arm/kernel/setup.c and the 'c_show' function in
// same file.
char* architecture = cpu_info.ExtractField("CPU architecture");
- if (architecture != NULL) {
+ if (architecture != nullptr) {
char* end;
architecture_ = strtol(architecture, &end, 10);
if (end == architecture) {
@@ -572,7 +567,7 @@ CPU::CPU()
// QNX doesn't say if Thumb2 is available.
// Assume false for the architectures older than ARMv7.
}
- DCHECK(architecture_ >= 6);
+ DCHECK_GE(architecture_, 6);
has_fpu_ = (cpu_flags & CPU_FLAG_FPU) != 0;
has_vfp_ = has_fpu_;
if (cpu_flags & ARM_CPU_FLAG_NEON) {
@@ -606,49 +601,16 @@ CPU::CPU()
#endif
#elif V8_HOST_ARCH_ARM64
-
- CPUInfo cpu_info;
-
- // Extract implementor from the "CPU implementer" field.
- char* implementer = cpu_info.ExtractField("CPU implementer");
- if (implementer != NULL) {
- char* end;
- implementer_ = static_cast<int>(strtol(implementer, &end, 0));
- if (end == implementer) {
- implementer_ = 0;
- }
- delete[] implementer;
- }
-
- char* variant = cpu_info.ExtractField("CPU variant");
- if (variant != NULL) {
- char* end;
- variant_ = static_cast<int>(strtol(variant, &end, 0));
- if (end == variant) {
- variant_ = -1;
- }
- delete[] variant;
- }
-
- // Extract part number from the "CPU part" field.
- char* part = cpu_info.ExtractField("CPU part");
- if (part != NULL) {
- char* end;
- part_ = static_cast<int>(strtol(part, &end, 0));
- if (end == part) {
- part_ = 0;
- }
- delete[] part;
- }
+// Implementer, variant and part are currently unused under ARM64.
#elif V8_HOST_ARCH_PPC
#ifndef USE_SIMULATOR
#if V8_OS_LINUX
// Read processor info from /proc/self/auxv.
- char* auxv_cpu_type = NULL;
+ char* auxv_cpu_type = nullptr;
FILE* fp = fopen("/proc/self/auxv", "r");
- if (fp != NULL) {
+ if (fp != nullptr) {
#if V8_TARGET_ARCH_PPC64
Elf64_auxv_t entry;
#else
diff --git a/deps/v8/src/base/debug/stack_trace.cc b/deps/v8/src/base/debug/stack_trace.cc
index 0a7a3f9ab9..2a3fb87a19 100644
--- a/deps/v8/src/base/debug/stack_trace.cc
+++ b/deps/v8/src/base/debug/stack_trace.cc
@@ -26,7 +26,7 @@ StackTrace::~StackTrace() {}
const void* const* StackTrace::Addresses(size_t* count) const {
*count = count_;
if (count_) return trace_;
- return NULL;
+ return nullptr;
}
std::string StackTrace::ToString() const {
diff --git a/deps/v8/src/base/debug/stack_trace_android.cc b/deps/v8/src/base/debug/stack_trace_android.cc
index e1d5fd2e57..16fbf9890c 100644
--- a/deps/v8/src/base/debug/stack_trace_android.cc
+++ b/deps/v8/src/base/debug/stack_trace_android.cc
@@ -63,7 +63,7 @@ bool EnableInProcessStackDumping() {
memset(&action, 0, sizeof(action));
action.sa_handler = SIG_IGN;
sigemptyset(&action.sa_mask);
- return (sigaction(SIGPIPE, &action, NULL) == 0);
+ return (sigaction(SIGPIPE, &action, nullptr) == 0);
}
void DisableSignalStackDump() {
diff --git a/deps/v8/src/base/debug/stack_trace_posix.cc b/deps/v8/src/base/debug/stack_trace_posix.cc
index 87c0a73d19..67f86c634f 100644
--- a/deps/v8/src/base/debug/stack_trace_posix.cc
+++ b/deps/v8/src/base/debug/stack_trace_posix.cc
@@ -51,8 +51,8 @@ namespace internal {
// POSIX doesn't define any async-signal safe function for converting
// an integer to ASCII. We'll have to define our own version.
// itoa_r() converts a (signed) integer to ASCII. It returns "buf", if the
-// conversion was successful or NULL otherwise. It never writes more than "sz"
-// bytes. Output will be truncated as needed, and a NUL character is always
+// conversion was successful or nullptr otherwise. It never writes more than
+// "sz" bytes. Output will be truncated as needed, and a NUL character is always
// appended.
char* itoa_r(intptr_t i, char* buf, size_t sz, int base, size_t padding);
@@ -104,7 +104,7 @@ void DemangleSymbols(std::string* text) {
// Try to demangle the mangled symbol candidate.
int status = 0;
std::unique_ptr<char, FreeDeleter> demangled_symbol(
- abi::__cxa_demangle(mangled_symbol.c_str(), NULL, 0, &status));
+ abi::__cxa_demangle(mangled_symbol.c_str(), nullptr, 0, &status));
if (status == 0) { // Demangling is successful.
// Remove the mangled symbol.
text->erase(mangled_start, mangled_end - mangled_start);
@@ -334,7 +334,7 @@ bool EnableInProcessStackDumping() {
memset(&sigpipe_action, 0, sizeof(sigpipe_action));
sigpipe_action.sa_handler = SIG_IGN;
sigemptyset(&sigpipe_action.sa_mask);
- bool success = (sigaction(SIGPIPE, &sigpipe_action, NULL) == 0);
+ bool success = (sigaction(SIGPIPE, &sigpipe_action, nullptr) == 0);
// Avoid hangs during backtrace initialization, see above.
WarmUpBacktrace();
@@ -345,12 +345,12 @@ bool EnableInProcessStackDumping() {
action.sa_sigaction = &StackDumpSignalHandler;
sigemptyset(&action.sa_mask);
- success &= (sigaction(SIGILL, &action, NULL) == 0);
- success &= (sigaction(SIGABRT, &action, NULL) == 0);
- success &= (sigaction(SIGFPE, &action, NULL) == 0);
- success &= (sigaction(SIGBUS, &action, NULL) == 0);
- success &= (sigaction(SIGSEGV, &action, NULL) == 0);
- success &= (sigaction(SIGSYS, &action, NULL) == 0);
+ success &= (sigaction(SIGILL, &action, nullptr) == 0);
+ success &= (sigaction(SIGABRT, &action, nullptr) == 0);
+ success &= (sigaction(SIGFPE, &action, nullptr) == 0);
+ success &= (sigaction(SIGBUS, &action, nullptr) == 0);
+ success &= (sigaction(SIGSEGV, &action, nullptr) == 0);
+ success &= (sigaction(SIGSYS, &action, nullptr) == 0);
dump_stack_in_signal_handler = true;
@@ -397,11 +397,11 @@ namespace internal {
char* itoa_r(intptr_t i, char* buf, size_t sz, int base, size_t padding) {
// Make sure we can write at least one NUL byte.
size_t n = 1;
- if (n > sz) return NULL;
+ if (n > sz) return nullptr;
if (base < 2 || base > 16) {
buf[0] = '\000';
- return NULL;
+ return nullptr;
}
char* start = buf;
@@ -416,7 +416,7 @@ char* itoa_r(intptr_t i, char* buf, size_t sz, int base, size_t padding) {
// Make sure we can write the '-' character.
if (++n > sz) {
buf[0] = '\000';
- return NULL;
+ return nullptr;
}
*start++ = '-';
}
@@ -428,7 +428,7 @@ char* itoa_r(intptr_t i, char* buf, size_t sz, int base, size_t padding) {
// Make sure there is still enough space left in our output buffer.
if (++n > sz) {
buf[0] = '\000';
- return NULL;
+ return nullptr;
}
// Output the next digit.
diff --git a/deps/v8/src/base/debug/stack_trace_win.cc b/deps/v8/src/base/debug/stack_trace_win.cc
index 64e6309122..7a7e4f5168 100644
--- a/deps/v8/src/base/debug/stack_trace_win.cc
+++ b/deps/v8/src/base/debug/stack_trace_win.cc
@@ -24,9 +24,9 @@ namespace debug {
namespace {
-// Previous unhandled filter. Will be called if not NULL when we intercept an
+// Previous unhandled filter. Will be called if not nullptr when we intercept an
// exception. Only used in unit tests.
-LPTOP_LEVEL_EXCEPTION_FILTER g_previous_filter = NULL;
+LPTOP_LEVEL_EXCEPTION_FILTER g_previous_filter = nullptr;
bool g_dump_stack_in_signal_handler = true;
bool g_initialized_symbols = false;
@@ -43,7 +43,7 @@ long WINAPI StackDumpExceptionFilter(EXCEPTION_POINTERS* info) { // NOLINT
}
void GetExePath(wchar_t* path_out) {
- GetModuleFileName(NULL, path_out, MAX_PATH);
+ GetModuleFileName(nullptr, path_out, MAX_PATH);
path_out[MAX_PATH - 1] = L'\0';
PathRemoveFileSpec(path_out);
}
@@ -54,7 +54,7 @@ bool InitializeSymbols() {
// Defer symbol load until they're needed, use undecorated names, and get line
// numbers.
SymSetOptions(SYMOPT_DEFERRED_LOADS | SYMOPT_UNDNAME | SYMOPT_LOAD_LINES);
- if (!SymInitialize(GetCurrentProcess(), NULL, TRUE)) {
+ if (!SymInitialize(GetCurrentProcess(), nullptr, TRUE)) {
g_init_error = GetLastError();
// TODO(awong): Handle error: SymInitialize can fail with
// ERROR_INVALID_PARAMETER.
@@ -174,7 +174,7 @@ void DisableSignalStackDump() {
StackTrace::StackTrace() {
// When walking our own stack, use CaptureStackBackTrace().
- count_ = CaptureStackBackTrace(0, arraysize(trace_), trace_, NULL);
+ count_ = CaptureStackBackTrace(0, arraysize(trace_), trace_, nullptr);
}
#if defined(V8_CC_MSVC)
@@ -216,13 +216,13 @@ void StackTrace::InitTrace(const CONTEXT* context_record) {
stack_frame.AddrFrame.Mode = AddrModeFlat;
stack_frame.AddrStack.Mode = AddrModeFlat;
while (StackWalk64(machine_type, GetCurrentProcess(), GetCurrentThread(),
- &stack_frame, &context_copy, NULL,
- &SymFunctionTableAccess64, &SymGetModuleBase64, NULL) &&
+ &stack_frame, &context_copy, nullptr,
+ &SymFunctionTableAccess64, &SymGetModuleBase64, nullptr) &&
count_ < arraysize(trace_)) {
trace_[count_++] = reinterpret_cast<void*>(stack_frame.AddrPC.Offset);
}
- for (size_t i = count_; i < arraysize(trace_); ++i) trace_[i] = NULL;
+ for (size_t i = count_; i < arraysize(trace_); ++i) trace_[i] = nullptr;
}
void StackTrace::Print() const { OutputToStream(&std::cerr); }
diff --git a/deps/v8/src/base/division-by-constant.cc b/deps/v8/src/base/division-by-constant.cc
index 03d198e9bf..4e0900fa24 100644
--- a/deps/v8/src/base/division-by-constant.cc
+++ b/deps/v8/src/base/division-by-constant.cc
@@ -53,7 +53,7 @@ template <class T>
MagicNumbersForDivision<T> UnsignedDivisionByConstant(T d,
unsigned leading_zeros) {
STATIC_ASSERT(static_cast<T>(0) < static_cast<T>(-1));
- DCHECK(d != 0);
+ DCHECK_NE(d, 0);
const unsigned bits = static_cast<unsigned>(sizeof(T)) * 8;
const T ones = ~static_cast<T>(0) >> leading_zeros;
const T min = static_cast<T>(1) << (bits - 1);
diff --git a/deps/v8/src/base/logging.h b/deps/v8/src/base/logging.h
index 889c6885b2..9f3a1e6991 100644
--- a/deps/v8/src/base/logging.h
+++ b/deps/v8/src/base/logging.h
@@ -192,7 +192,8 @@ EXPLICIT_CHECK_OP_INSTANTIATION(void const*)
#undef EXPLICIT_CHECK_OP_INSTANTIATION
// comparison_underlying_type provides the underlying integral type of an enum,
-// or std::decay<T>::type if T is not an enum.
+// or std::decay<T>::type if T is not an enum. Booleans are converted to
+// "unsigned int", to allow "unsigned int == bool" comparisons.
template <typename T>
struct comparison_underlying_type {
// std::underlying_type must only be used with enum types, thus use this
@@ -202,8 +203,15 @@ struct comparison_underlying_type {
static constexpr bool is_enum = std::is_enum<decay>::value;
using underlying = typename std::underlying_type<
typename std::conditional<is_enum, decay, Dummy>::type>::type;
- using type = typename std::conditional<is_enum, underlying, decay>::type;
+ using type_or_bool =
+ typename std::conditional<is_enum, underlying, decay>::type;
+ using type =
+ typename std::conditional<std::is_same<type_or_bool, bool>::value,
+ unsigned int, type_or_bool>::type;
};
+// Cast a value to its underlying type
+#define MAKE_UNDERLYING(Type, value) \
+ static_cast<typename comparison_underlying_type<Type>::type>(value)
// is_signed_vs_unsigned::value is true if both types are integral, Lhs is
// signed, and Rhs is unsigned. False in all other cases.
@@ -233,11 +241,14 @@ struct is_unsigned_vs_signed : public is_signed_vs_unsigned<Rhs, Lhs> {};
return IMPL; \
}
DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, EQ,
- lhs >= 0 && MAKE_UNSIGNED(Lhs, lhs) == rhs)
+ lhs >= 0 && MAKE_UNSIGNED(Lhs, lhs) ==
+ MAKE_UNDERLYING(Rhs, rhs))
DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, LT,
- lhs < 0 || MAKE_UNSIGNED(Lhs, lhs) < rhs)
+ lhs < 0 || MAKE_UNSIGNED(Lhs, lhs) <
+ MAKE_UNDERLYING(Rhs, rhs))
DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, LE,
- lhs <= 0 || MAKE_UNSIGNED(Lhs, lhs) <= rhs)
+ lhs <= 0 || MAKE_UNSIGNED(Lhs, lhs) <=
+ MAKE_UNDERLYING(Rhs, rhs))
DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, NE, !CmpEQImpl(lhs, rhs))
DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, GT, !CmpLEImpl(lhs, rhs))
DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, GE, !CmpLTImpl(lhs, rhs))
diff --git a/deps/v8/src/base/once.h b/deps/v8/src/base/once.h
index 8008812d75..ea9c2fa88d 100644
--- a/deps/v8/src/base/once.h
+++ b/deps/v8/src/base/once.h
@@ -85,7 +85,8 @@ V8_BASE_EXPORT void CallOnceImpl(OnceType* once, PointerArgFunction init_func,
inline void CallOnce(OnceType* once, NoArgFunction init_func) {
if (Acquire_Load(once) != ONCE_STATE_DONE) {
- CallOnceImpl(once, reinterpret_cast<PointerArgFunction>(init_func), NULL);
+ CallOnceImpl(once, reinterpret_cast<PointerArgFunction>(init_func),
+ nullptr);
}
}
diff --git a/deps/v8/src/base/platform/OWNERS b/deps/v8/src/base/platform/OWNERS
new file mode 100644
index 0000000000..5deaa67ce7
--- /dev/null
+++ b/deps/v8/src/base/platform/OWNERS
@@ -0,0 +1,6 @@
+set noparent
+
+hpayer@chromium.org
+mlippautz@chromium.org
+
+# COMPONENT: Blink>JavaScript
diff --git a/deps/v8/src/base/platform/condition-variable.cc b/deps/v8/src/base/platform/condition-variable.cc
index 6df8599def..165651aae1 100644
--- a/deps/v8/src/base/platform/condition-variable.cc
+++ b/deps/v8/src/base/platform/condition-variable.cc
@@ -28,7 +28,7 @@ ConditionVariable::ConditionVariable() {
DCHECK_EQ(0, result);
result = pthread_condattr_destroy(&attr);
#else
- int result = pthread_cond_init(&native_handle_, NULL);
+ int result = pthread_cond_init(&native_handle_, nullptr);
#endif
DCHECK_EQ(0, result);
USE(result);
diff --git a/deps/v8/src/base/platform/elapsed-timer.h b/deps/v8/src/base/platform/elapsed-timer.h
index f9a9ef4361..3406831cbe 100644
--- a/deps/v8/src/base/platform/elapsed-timer.h
+++ b/deps/v8/src/base/platform/elapsed-timer.h
@@ -56,7 +56,7 @@ class ElapsedTimer final {
DCHECK(IsStarted());
TimeTicks ticks = Now();
TimeDelta elapsed = ticks - start_ticks_;
- DCHECK(elapsed.InMicroseconds() >= 0);
+ DCHECK_GE(elapsed.InMicroseconds(), 0);
start_ticks_ = ticks;
DCHECK(IsStarted());
return elapsed;
@@ -67,7 +67,7 @@ class ElapsedTimer final {
TimeDelta Elapsed() const {
DCHECK(IsStarted());
TimeDelta elapsed = Now() - start_ticks_;
- DCHECK(elapsed.InMicroseconds() >= 0);
+ DCHECK_GE(elapsed.InMicroseconds(), 0);
return elapsed;
}
diff --git a/deps/v8/src/base/platform/mutex.cc b/deps/v8/src/base/platform/mutex.cc
index 191f07ffb1..a044075c16 100644
--- a/deps/v8/src/base/platform/mutex.cc
+++ b/deps/v8/src/base/platform/mutex.cc
@@ -25,7 +25,7 @@ static V8_INLINE void InitializeNativeHandle(pthread_mutex_t* mutex) {
result = pthread_mutexattr_destroy(&attr);
#else
// Use a fast mutex (default attributes).
- result = pthread_mutex_init(mutex, NULL);
+ result = pthread_mutex_init(mutex, nullptr);
#endif // defined(DEBUG)
DCHECK_EQ(0, result);
USE(result);
diff --git a/deps/v8/src/base/platform/platform-aix.cc b/deps/v8/src/base/platform/platform-aix.cc
index 6c1bde7b85..39559552bb 100644
--- a/deps/v8/src/base/platform/platform-aix.cc
+++ b/deps/v8/src/base/platform/platform-aix.cc
@@ -49,116 +49,24 @@ const char* AIXTimezoneCache::LocalTimezone(double time) {
time_t tv = static_cast<time_t>(floor(time / msPerSecond));
struct tm tm;
struct tm* t = localtime_r(&tv, &tm);
- if (NULL == t) return "";
+ if (nullptr == t) return "";
return tzname[0]; // The location of the timezone string on AIX.
}
double AIXTimezoneCache::LocalTimeOffset() {
// On AIX, struct tm does not contain a tm_gmtoff field.
- time_t utc = time(NULL);
- DCHECK(utc != -1);
+ time_t utc = time(nullptr);
+ DCHECK_NE(utc, -1);
struct tm tm;
struct tm* loc = localtime_r(&utc, &tm);
- DCHECK(loc != NULL);
+ DCHECK_NOT_NULL(loc);
return static_cast<double>((mktime(loc) - utc) * msPerSecond);
}
TimezoneCache* OS::CreateTimezoneCache() { return new AIXTimezoneCache(); }
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-void* OS::Allocate(const size_t requested, size_t* allocated,
- OS::MemoryPermission access, void* hint) {
- const size_t msize = RoundUp(requested, getpagesize());
- int prot = GetProtectionFromMemoryPermission(access);
- void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd,
- kMmapFdOffset);
-
- if (mbase == MAP_FAILED) return NULL;
- *allocated = msize;
- return mbase;
-}
-
-// static
-void* OS::ReserveRegion(size_t size, void* hint) {
- void* result = mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS,
- kMmapFd, kMmapFdOffset);
-
- if (result == MAP_FAILED) return nullptr;
-
- return result;
-}
-
-// static
-void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
- size_t* allocated) {
- DCHECK((alignment % OS::AllocateAlignment()) == 0);
- hint = AlignedAddress(hint, alignment);
- size_t request_size =
- RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
- void* result = ReserveRegion(request_size, hint);
- if (result == nullptr) {
- *allocated = 0;
- return nullptr;
- }
-
- uint8_t* base = static_cast<uint8_t*>(result);
- uint8_t* aligned_base = RoundUp(base, alignment);
- DCHECK_LE(base, aligned_base);
-
- // Unmap extra memory reserved before and after the desired block.
- if (aligned_base != base) {
- size_t prefix_size = static_cast<size_t>(aligned_base - base);
- OS::Free(base, prefix_size);
- request_size -= prefix_size;
- }
-
- size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
- DCHECK_LE(aligned_size, request_size);
-
- if (aligned_size != request_size) {
- size_t suffix_size = request_size - aligned_size;
- OS::Free(aligned_base + aligned_size, suffix_size);
- request_size -= suffix_size;
- }
-
- DCHECK(aligned_size == request_size);
-
- *allocated = aligned_size;
- return static_cast<void*>(aligned_base);
-}
-
-// static
-bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-
- if (mprotect(address, size, prot) == -1) return false;
-
- return true;
-}
-
-// static
-bool OS::UncommitRegion(void* address, size_t size) {
- return mprotect(address, size, PROT_NONE) != -1;
-}
-
-// static
-bool OS::ReleaseRegion(void* address, size_t size) {
- return munmap(address, size) == 0;
-}
-
-// static
-bool OS::ReleasePartialRegion(void* address, size_t size) {
- return munmap(address, size) == 0;
-}
-
-// static
-bool OS::HasLazyCommits() { return true; }
-
static unsigned StringToLong(char* buffer) {
- return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
+ return static_cast<unsigned>(strtol(buffer, nullptr, 16)); // NOLINT
}
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
@@ -193,7 +101,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
if (buffer[3] != 'x') continue;
char* start_of_path = index(buffer, '/');
// There may be no filename in this line. Skip to next.
- if (start_of_path == NULL) continue;
+ if (start_of_path == nullptr) continue;
buffer[bytes_read] = 0;
result.push_back(SharedLibraryAddress(start_of_path, start, end));
}
@@ -201,7 +109,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
return result;
}
-void OS::SignalCodeMovingGC(void* hint) {}
+void OS::SignalCodeMovingGC() {}
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-cygwin.cc b/deps/v8/src/base/platform/platform-cygwin.cc
index f20c530d67..eabd53570f 100644
--- a/deps/v8/src/base/platform/platform-cygwin.cc
+++ b/deps/v8/src/base/platform/platform-cygwin.cc
@@ -28,25 +28,37 @@ namespace base {
namespace {
-// The VirtualMemory implementation is taken from platform-win32.cc.
-// The mmap-based virtual memory implementation as it is used on most posix
-// platforms does not work well because Cygwin does not support MAP_FIXED.
-// This causes VirtualMemory::Commit to not always commit the memory region
-// specified.
-
-static void* RandomizedVirtualAlloc(size_t size, int action, int protection,
- void* hint) {
- LPVOID base = NULL;
-
- if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
- // For exectutable pages try and randomize the allocation address
- base = VirtualAlloc(hint, size, action, protection);
+// The memory allocation implementation is taken from platform-win32.cc.
+
+DWORD GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
+ switch (access) {
+ case OS::MemoryPermission::kNoAccess:
+ return PAGE_NOACCESS;
+ case OS::MemoryPermission::kReadWrite:
+ return PAGE_READWRITE;
+ case OS::MemoryPermission::kReadWriteExecute:
+ return PAGE_EXECUTE_READWRITE;
+ case OS::MemoryPermission::kReadExecute:
+ return PAGE_EXECUTE_READ;
+ }
+ UNREACHABLE();
+}
+
+uint8_t* RandomizedVirtualAlloc(size_t size, DWORD flags, DWORD protect,
+ void* hint) {
+ LPVOID base = nullptr;
+
+ // For executable or reserved pages try to use the address hint.
+ if (protect != PAGE_READWRITE) {
+ base = VirtualAlloc(hint, size, flags, protect);
}
- // After three attempts give up and let the OS find an address to use.
- if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
+ // If that fails, let the OS find an address to use.
+ if (base == nullptr) {
+ base = VirtualAlloc(nullptr, size, flags, protect);
+ }
- return base;
+ return reinterpret_cast<uint8_t*>(base);
}
} // namespace
@@ -64,93 +76,97 @@ const char* CygwinTimezoneCache::LocalTimezone(double time) {
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm tm;
struct tm* t = localtime_r(&tv, &tm);
- if (NULL == t) return "";
+ if (nullptr == t) return "";
return tzname[0]; // The location of the timezone string on Cygwin.
}
double CygwinTimezoneCache::LocalTimeOffset() {
// On Cygwin, struct tm does not contain a tm_gmtoff field.
- time_t utc = time(NULL);
- DCHECK(utc != -1);
+ time_t utc = time(nullptr);
+ DCHECK_NE(utc, -1);
struct tm tm;
struct tm* loc = localtime_r(&utc, &tm);
- DCHECK(loc != NULL);
+ DCHECK_NOT_NULL(loc);
// time - localtime includes any daylight savings offset, so subtract it.
return static_cast<double>((mktime(loc) - utc) * msPerSecond -
(loc->tm_isdst > 0 ? 3600 * msPerSecond : 0));
}
-void* OS::Allocate(const size_t requested, size_t* allocated,
- OS::MemoryPermission access, void* hint) {
- const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
- int prot = GetProtectionFromMemoryPermission(access);
- void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- if (mbase == MAP_FAILED) return NULL;
- *allocated = msize;
- return mbase;
-}
-
// static
-void* OS::ReserveRegion(size_t size, void* hint) {
- return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS, hint);
-}
-
-// static
-void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
- size_t* allocated) {
- hint = AlignedAddress(hint, alignment);
- DCHECK((alignment % OS::AllocateAlignment()) == 0);
- size_t request_size =
- RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
- void* address = ReserveRegion(request_size, hint);
- if (address == NULL) {
- *allocated = 0;
- return nullptr;
+void* OS::Allocate(void* address, size_t size, size_t alignment,
+ MemoryPermission access) {
+ size_t page_size = AllocatePageSize();
+ DCHECK_EQ(0, size % page_size);
+ DCHECK_EQ(0, alignment % page_size);
+ DCHECK_LE(page_size, alignment);
+ address = AlignedAddress(address, alignment);
+
+ DWORD flags = (access == OS::MemoryPermission::kNoAccess)
+ ? MEM_RESERVE
+ : MEM_RESERVE | MEM_COMMIT;
+ DWORD protect = GetProtectionFromMemoryPermission(access);
+
+ // First, try an exact size aligned allocation.
+ uint8_t* base = RandomizedVirtualAlloc(size, flags, protect, address);
+ if (base == nullptr) return nullptr; // Can't allocate, we're OOM.
+
+ // If address is suitably aligned, we're done.
+ uint8_t* aligned_base = RoundUp(base, alignment);
+ if (base == aligned_base) return reinterpret_cast<void*>(base);
+
+ // Otherwise, free it and try a larger allocation.
+ CHECK(Free(base, size));
+
+ // Clear the hint. It's unlikely we can allocate at this address.
+ address = nullptr;
+
+ // Add the maximum misalignment so we are guaranteed an aligned base address
+ // in the allocated region.
+ size_t padded_size = size + (alignment - page_size);
+ const int kMaxAttempts = 3;
+ aligned_base = nullptr;
+ for (int i = 0; i < kMaxAttempts; ++i) {
+ base = RandomizedVirtualAlloc(padded_size, flags, protect, address);
+ if (base == nullptr) return nullptr; // Can't allocate, we're OOM.
+
+ // Try to trim the allocation by freeing the padded allocation and then
+ // calling VirtualAlloc at the aligned base.
+ CHECK(Free(base, padded_size));
+ aligned_base = RoundUp(base, alignment);
+ base = reinterpret_cast<uint8_t*>(
+ VirtualAlloc(aligned_base, size, flags, protect));
+ // We might not get the reduced allocation due to a race. In that case,
+ // base will be nullptr.
+ if (base != nullptr) break;
}
- uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
- // Try reducing the size by freeing and then reallocating a specific area.
- bool result = ReleaseRegion(address, request_size);
- USE(result);
- DCHECK(result);
- address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
- if (address != nullptr) {
- request_size = size;
- DCHECK(base == static_cast<uint8_t*>(address));
- } else {
- // Resizing failed, just go with a bigger area.
- address = ReserveRegion(request_size, hint);
- if (address == nullptr) {
- *allocated = 0;
- return nullptr;
- }
- }
-
- *allocated = request_size;
- return static_cast<void*>(address);
+ DCHECK_EQ(base, aligned_base);
+ return reinterpret_cast<void*>(base);
}
// static
-bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
- int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
- if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
- return false;
- }
- return true;
+bool OS::Free(void* address, const size_t size) {
+ DCHECK_EQ(0, static_cast<uintptr_t>(address) % AllocatePageSize());
+ DCHECK_EQ(0, size % AllocatePageSize());
+ USE(size);
+ return VirtualFree(address, 0, MEM_RELEASE) != 0;
}
// static
-bool OS::UncommitRegion(void* address, size_t size) {
+bool OS::Release(void* address, size_t size) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
+ DCHECK_EQ(0, size % CommitPageSize());
return VirtualFree(address, size, MEM_DECOMMIT) != 0;
}
// static
-bool OS::ReleaseRegion(void* address, size_t size) {
- return VirtualFree(address, 0, MEM_RELEASE) != 0;
-}
-
-// static
-bool OS::ReleasePartialRegion(void* address, size_t size) {
- return VirtualFree(address, size, MEM_DECOMMIT) != 0;
+bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
+ DCHECK_EQ(0, size % CommitPageSize());
+ if (access == MemoryPermission::kNoAccess) {
+ return VirtualFree(address, size, MEM_DECOMMIT) != 0;
+ }
+ DWORD protect = GetProtectionFromMemoryPermission(access);
+ return VirtualAlloc(address, size, MEM_COMMIT, protect) != nullptr;
}
// static
@@ -165,7 +181,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
// hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
// If we encounter an unexpected situation we abort scanning further entries.
FILE* fp = fopen("/proc/self/maps", "r");
- if (fp == NULL) return result;
+ if (fp == nullptr) return result;
// Allocate enough room to be able to store a full file name.
const int kLibNameLen = FILENAME_MAX + 1;
@@ -193,7 +209,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
ungetc(c, fp); // Push the '/' back into the stream to be read below.
// Read to the end of the line. Exit if the read fails.
- if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
+ if (fgets(lib_name, kLibNameLen, fp) == nullptr) break;
// Drop the newline character read by fgets. We do not need to check
// for a zero-length string because we know that we at least read the
@@ -219,7 +235,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
return result;
}
-void OS::SignalCodeMovingGC(void* hint) {
+void OS::SignalCodeMovingGC() {
// Nothing to do on Cygwin.
}
diff --git a/deps/v8/src/base/platform/platform-freebsd.cc b/deps/v8/src/base/platform/platform-freebsd.cc
index a1eb7e8928..2b9779b843 100644
--- a/deps/v8/src/base/platform/platform-freebsd.cc
+++ b/deps/v8/src/base/platform/platform-freebsd.cc
@@ -40,106 +40,8 @@ TimezoneCache* OS::CreateTimezoneCache() {
return new PosixDefaultTimezoneCache();
}
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-void* OS::Allocate(const size_t requested, size_t* allocated,
- OS::MemoryPermission access, void* hint) {
- const size_t msize = RoundUp(requested, getpagesize());
- int prot = GetProtectionFromMemoryPermission(access);
- void* mbase =
- mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, kMmapFd, kMmapFdOffset);
-
- if (mbase == MAP_FAILED) return NULL;
- *allocated = msize;
- return mbase;
-}
-
-// static
-void* OS::ReserveRegion(size_t size, void* hint) {
- void* result = mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON, kMmapFd,
- kMmapFdOffset);
-
- if (result == MAP_FAILED) return NULL;
-
- return result;
-}
-
-// static
-void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
- size_t* allocated) {
- hint = AlignedAddress(hint, alignment);
- DCHECK((alignment % OS::AllocateAlignment()) == 0);
- size_t request_size = RoundUp(size + alignment,
- static_cast<intptr_t>(OS::AllocateAlignment()));
- void* result = ReserveRegion(request_size, hint);
- if (result == nullptr) {
- *allocated = 0;
- return nullptr;
- }
-
- uint8_t* base = static_cast<uint8_t*>(result);
- uint8_t* aligned_base = RoundUp(base, alignment);
- DCHECK_LE(base, aligned_base);
-
- // Unmap extra memory reserved before and after the desired block.
- if (aligned_base != base) {
- size_t prefix_size = static_cast<size_t>(aligned_base - base);
- OS::Free(base, prefix_size);
- request_size -= prefix_size;
- }
-
- size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
- DCHECK_LE(aligned_size, request_size);
-
- if (aligned_size != request_size) {
- size_t suffix_size = request_size - aligned_size;
- OS::Free(aligned_base + aligned_size, suffix_size);
- request_size -= suffix_size;
- }
-
- DCHECK(aligned_size == request_size);
-
- *allocated = aligned_size;
- return static_cast<void*>(aligned_base);
-}
-
-// static
-bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(address, size, prot,
- MAP_PRIVATE | MAP_ANON | MAP_FIXED, kMmapFd,
- kMmapFdOffset)) {
- return false;
- }
- return true;
-}
-
-// static
-bool OS::UncommitRegion(void* address, size_t size) {
- return mmap(address, size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_FIXED,
- kMmapFd, kMmapFdOffset) != MAP_FAILED;
-}
-
-// static
-bool OS::ReleaseRegion(void* address, size_t size) {
- return munmap(address, size) == 0;
-}
-
-// static
-bool OS::ReleasePartialRegion(void* address, size_t size) {
- return munmap(address, size) == 0;
-}
-
-// static
-bool OS::HasLazyCommits() {
- // TODO(alph): implement for the platform.
- return false;
-}
-
static unsigned StringToLong(char* buffer) {
- return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
+ return static_cast<unsigned>(strtol(buffer, nullptr, 16)); // NOLINT
}
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
@@ -174,7 +76,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
if (buffer[3] != 'x') continue;
char* start_of_path = index(buffer, '/');
// There may be no filename in this line. Skip to next.
- if (start_of_path == NULL) continue;
+ if (start_of_path == nullptr) continue;
buffer[bytes_read] = 0;
result.push_back(SharedLibraryAddress(start_of_path, start, end));
}
@@ -182,7 +84,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
return result;
}
-void OS::SignalCodeMovingGC(void* hint) {}
+void OS::SignalCodeMovingGC() {}
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc
index 16e6f1d2b0..83a8a23c48 100644
--- a/deps/v8/src/base/platform/platform-fuchsia.cc
+++ b/deps/v8/src/base/platform/platform-fuchsia.cc
@@ -13,79 +13,73 @@
namespace v8 {
namespace base {
-TimezoneCache* OS::CreateTimezoneCache() {
- return new PosixDefaultTimezoneCache();
+namespace {
+
+uint32_t GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
+ switch (access) {
+ case OS::MemoryPermission::kNoAccess:
+ return 0; // no permissions
+ case OS::MemoryPermission::kReadWrite:
+ return ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE;
+ case OS::MemoryPermission::kReadWriteExecute:
+ return ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE |
+ ZX_VM_FLAG_PERM_EXECUTE;
+ case OS::MemoryPermission::kReadExecute:
+ return ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_EXECUTE;
+ }
+ UNREACHABLE();
}
-// static
-void* OS::Allocate(const size_t requested, size_t* allocated,
- OS::MemoryPermission access, void* hint) {
- CHECK(false); // TODO(scottmg): Port, https://crbug.com/731217.
- return nullptr;
-}
+} // namespace
-// static
-void OS::Guard(void* address, size_t size) {
- CHECK_EQ(ZX_OK, zx_vmar_protect(zx_vmar_root_self(),
- reinterpret_cast<uintptr_t>(address), size,
- 0 /*no permissions*/));
-}
-
-// static
-void* OS::ReserveRegion(size_t size, void* hint) {
- zx_handle_t vmo;
- if (zx_vmo_create(size, 0, &vmo) != ZX_OK) return nullptr;
- uintptr_t result;
- zx_status_t status = zx_vmar_map(zx_vmar_root_self(), 0, vmo, 0, size,
- 0 /*no permissions*/, &result);
- zx_handle_close(vmo);
- if (status != ZX_OK) return nullptr;
- return reinterpret_cast<void*>(result);
+TimezoneCache* OS::CreateTimezoneCache() {
+ return new PosixDefaultTimezoneCache();
}
// static
-void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
- size_t* allocated) {
- DCHECK((alignment % OS::AllocateAlignment()) == 0);
- hint = AlignedAddress(hint, alignment);
- size_t request_size =
- RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
+void* OS::Allocate(void* address, size_t size, size_t alignment,
+ OS::MemoryPermission access) {
+ size_t page_size = OS::AllocatePageSize();
+ DCHECK_EQ(0, size % page_size);
+ DCHECK_EQ(0, alignment % page_size);
+ address = AlignedAddress(address, alignment);
+ // Add the maximum misalignment so we are guaranteed an aligned base address.
+ size_t request_size = size + (alignment - page_size);
zx_handle_t vmo;
if (zx_vmo_create(request_size, 0, &vmo) != ZX_OK) {
- *allocated = 0;
return nullptr;
}
static const char kVirtualMemoryName[] = "v8-virtualmem";
zx_object_set_property(vmo, ZX_PROP_NAME, kVirtualMemoryName,
strlen(kVirtualMemoryName));
uintptr_t reservation;
+ uint32_t prot = GetProtectionFromMemoryPermission(access);
zx_status_t status = zx_vmar_map(zx_vmar_root_self(), 0, vmo, 0, request_size,
- 0 /*no permissions*/, &reservation);
+ prot, &reservation);
// Either the vmo is now referenced by the vmar, or we failed and are bailing,
// so close the vmo either way.
zx_handle_close(vmo);
if (status != ZX_OK) {
- *allocated = 0;
return nullptr;
}
uint8_t* base = reinterpret_cast<uint8_t*>(reservation);
uint8_t* aligned_base = RoundUp(base, alignment);
- DCHECK_LE(base, aligned_base);
// Unmap extra memory reserved before and after the desired block.
if (aligned_base != base) {
+ DCHECK_LT(base, aligned_base);
size_t prefix_size = static_cast<size_t>(aligned_base - base);
zx_vmar_unmap(zx_vmar_root_self(), reinterpret_cast<uintptr_t>(base),
prefix_size);
request_size -= prefix_size;
}
- size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
- DCHECK_LE(aligned_size, request_size);
+ size_t aligned_size = RoundUp(size, page_size);
if (aligned_size != request_size) {
+ DCHECK_LT(aligned_size, request_size);
size_t suffix_size = request_size - aligned_size;
zx_vmar_unmap(zx_vmar_root_self(),
reinterpret_cast<uintptr_t>(aligned_base + aligned_size),
@@ -94,37 +88,33 @@ void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
}
DCHECK(aligned_size == request_size);
-
- *allocated = aligned_size;
return static_cast<void*>(aligned_base);
}
// static
-bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
- uint32_t prot = ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE |
- (is_executable ? ZX_VM_FLAG_PERM_EXECUTE : 0);
- return zx_vmar_protect(zx_vmar_root_self(),
- reinterpret_cast<uintptr_t>(address), size,
- prot) == ZX_OK;
-}
-
-// static
-bool OS::UncommitRegion(void* address, size_t size) {
- return zx_vmar_protect(zx_vmar_root_self(),
- reinterpret_cast<uintptr_t>(address), size,
- 0 /*no permissions*/) == ZX_OK;
+bool OS::Free(void* address, const size_t size) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % AllocatePageSize());
+ DCHECK_EQ(0, size % AllocatePageSize());
+ return zx_vmar_unmap(zx_vmar_root_self(),
+ reinterpret_cast<uintptr_t>(address), size) == ZX_OK;
}
// static
-bool OS::ReleaseRegion(void* address, size_t size) {
+bool OS::Release(void* address, size_t size) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
+ DCHECK_EQ(0, size % CommitPageSize());
return zx_vmar_unmap(zx_vmar_root_self(),
reinterpret_cast<uintptr_t>(address), size) == ZX_OK;
}
// static
-bool OS::ReleasePartialRegion(void* address, size_t size) {
- return zx_vmar_unmap(zx_vmar_root_self(),
- reinterpret_cast<uintptr_t>(address), size) == ZX_OK;
+bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
+ DCHECK_EQ(0, size % CommitPageSize());
+ uint32_t prot = GetProtectionFromMemoryPermission(access);
+ return zx_vmar_protect(zx_vmar_root_self(),
+ reinterpret_cast<uintptr_t>(address), size,
+ prot) == ZX_OK;
}
// static
@@ -138,7 +128,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
return std::vector<SharedLibraryAddress>();
}
-void OS::SignalCodeMovingGC(void* hint) {
+void OS::SignalCodeMovingGC() {
CHECK(false); // TODO(scottmg): Port, https://crbug.com/731217.
}
diff --git a/deps/v8/src/base/platform/platform-linux.cc b/deps/v8/src/base/platform/platform-linux.cc
index 2299a2c3e3..725ad0c6eb 100644
--- a/deps/v8/src/base/platform/platform-linux.cc
+++ b/deps/v8/src/base/platform/platform-linux.cc
@@ -93,109 +93,13 @@ TimezoneCache* OS::CreateTimezoneCache() {
return new PosixDefaultTimezoneCache();
}
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-void* OS::Allocate(const size_t requested, size_t* allocated,
- OS::MemoryPermission access, void* hint) {
- const size_t msize = RoundUp(requested, AllocateAlignment());
- int prot = GetProtectionFromMemoryPermission(access);
- void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd,
- kMmapFdOffset);
- if (mbase == MAP_FAILED) return nullptr;
- *allocated = msize;
- return mbase;
-}
-
-// static
-void* OS::ReserveRegion(size_t size, void* hint) {
- void* result =
- mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
- kMmapFd, kMmapFdOffset);
-
- if (result == MAP_FAILED) return nullptr;
- return result;
-}
-
-// static
-void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
- size_t* allocated) {
- DCHECK((alignment % OS::AllocateAlignment()) == 0);
- hint = AlignedAddress(hint, alignment);
- size_t request_size =
- RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
- void* result = ReserveRegion(request_size, hint);
- if (result == nullptr) {
- *allocated = 0;
- return nullptr;
- }
-
- uint8_t* base = static_cast<uint8_t*>(result);
- uint8_t* aligned_base = RoundUp(base, alignment);
- DCHECK_LE(base, aligned_base);
-
- // Unmap extra memory reserved before and after the desired block.
- if (aligned_base != base) {
- size_t prefix_size = static_cast<size_t>(aligned_base - base);
- OS::Free(base, prefix_size);
- request_size -= prefix_size;
- }
-
- size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
- DCHECK_LE(aligned_size, request_size);
-
- if (aligned_size != request_size) {
- size_t suffix_size = request_size - aligned_size;
- OS::Free(aligned_base + aligned_size, suffix_size);
- request_size -= suffix_size;
- }
-
- DCHECK(aligned_size == request_size);
-
- *allocated = aligned_size;
- return static_cast<void*>(aligned_base);
-}
-
-// static
-bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(address, size, prot,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, kMmapFd,
- kMmapFdOffset)) {
- return false;
- }
-
- return true;
-}
-
-// static
-bool OS::UncommitRegion(void* address, size_t size) {
- return mmap(address, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, kMmapFd,
- kMmapFdOffset) != MAP_FAILED;
-}
-
-// static
-bool OS::ReleaseRegion(void* address, size_t size) {
- return munmap(address, size) == 0;
-}
-
-// static
-bool OS::ReleasePartialRegion(void* address, size_t size) {
- return munmap(address, size) == 0;
-}
-
-// static
-bool OS::HasLazyCommits() { return true; }
-
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
std::vector<SharedLibraryAddress> result;
// This function assumes that the layout of the file is as follows:
// hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
// If we encounter an unexpected situation we abort scanning further entries.
FILE* fp = fopen("/proc/self/maps", "r");
- if (fp == NULL) return result;
+ if (fp == nullptr) return result;
// Allocate enough room to be able to store a full file name.
const int kLibNameLen = FILENAME_MAX + 1;
@@ -203,11 +107,15 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
// This loop will terminate once the scanning hits an EOF.
while (true) {
- uintptr_t start, end;
+ uintptr_t start, end, offset;
char attr_r, attr_w, attr_x, attr_p;
// Parse the addresses and permission bits at the beginning of the line.
if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
+ if (fscanf(fp, "%" V8PRIxPTR, &offset) != 1) break;
+
+ // Adjust {start} based on {offset}.
+ start -= offset;
int c;
if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
@@ -224,7 +132,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
ungetc(c, fp);
// Read to the end of the line. Exit if the read fails.
- if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
+ if (fgets(lib_name, kLibNameLen, fp) == nullptr) break;
// Drop the newline character read by fgets. We do not need to check
// for a zero-length string because we know that we at least read the
@@ -250,7 +158,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
return result;
}
-void OS::SignalCodeMovingGC(void* hint) {
+void OS::SignalCodeMovingGC() {
// Support for ll_prof.py.
//
// The Linux profiler built into the kernel logs all mmap's with
@@ -261,14 +169,14 @@ void OS::SignalCodeMovingGC(void* hint) {
// kernel log.
long size = sysconf(_SC_PAGESIZE); // NOLINT(runtime/int)
FILE* f = fopen(OS::GetGCFakeMMapFile(), "w+");
- if (f == NULL) {
+ if (f == nullptr) {
OS::PrintError("Failed to open %s\n", OS::GetGCFakeMMapFile());
OS::Abort();
}
- void* addr =
- mmap(hint, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, fileno(f), 0);
+ void* addr = mmap(OS::GetRandomMmapAddr(), size, PROT_READ | PROT_EXEC,
+ MAP_PRIVATE, fileno(f), 0);
DCHECK_NE(MAP_FAILED, addr);
- OS::Free(addr, size);
+ CHECK(Free(addr, size));
fclose(f);
}
diff --git a/deps/v8/src/base/platform/platform-macos.cc b/deps/v8/src/base/platform/platform-macos.cc
index 3c19962186..081e434589 100644
--- a/deps/v8/src/base/platform/platform-macos.cc
+++ b/deps/v8/src/base/platform/platform-macos.cc
@@ -43,119 +43,12 @@
namespace v8 {
namespace base {
-
-// Constants used for mmap.
-// kMmapFd is used to pass vm_alloc flags to tag the region with the user
-// defined tag 255 This helps identify V8-allocated regions in memory analysis
-// tools like vmmap(1).
-static const int kMmapFd = VM_MAKE_TAG(255);
-static const off_t kMmapFdOffset = 0;
-
-// static
-void* OS::Allocate(const size_t requested, size_t* allocated,
- OS::MemoryPermission access, void* hint) {
- const size_t msize = RoundUp(requested, getpagesize());
- int prot = GetProtectionFromMemoryPermission(access);
- void* mbase =
- mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, kMmapFd, kMmapFdOffset);
- if (mbase == MAP_FAILED) return NULL;
- *allocated = msize;
- return mbase;
-}
-
-// static
-void* OS::ReserveRegion(size_t size, void* hint) {
- void* result =
- mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd, kMmapFdOffset);
-
- if (result == MAP_FAILED) return nullptr;
-
- return result;
-}
-
-// static
-void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
- size_t* allocated) {
- DCHECK((alignment % OS::AllocateAlignment()) == 0);
- hint = AlignedAddress(hint, alignment);
- size_t request_size = RoundUp(size + alignment,
- static_cast<intptr_t>(OS::AllocateAlignment()));
- void* result = ReserveRegion(request_size, hint);
- if (result == nullptr) {
- *allocated = 0;
- return nullptr;
- }
-
- uint8_t* base = static_cast<uint8_t*>(result);
- uint8_t* aligned_base = RoundUp(base, alignment);
- DCHECK_LE(base, aligned_base);
-
- // Unmap extra memory reserved before and after the desired block.
- if (aligned_base != base) {
- size_t prefix_size = static_cast<size_t>(aligned_base - base);
- OS::Free(base, prefix_size);
- request_size -= prefix_size;
- }
-
- size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
- DCHECK_LE(aligned_size, request_size);
-
- if (aligned_size != request_size) {
- size_t suffix_size = request_size - aligned_size;
- OS::Free(aligned_base + aligned_size, suffix_size);
- request_size -= suffix_size;
- }
-
- DCHECK(aligned_size == request_size);
-
- *allocated = aligned_size;
- return static_cast<void*>(aligned_base);
-}
-
-// static
-bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(address,
- size,
- prot,
- MAP_PRIVATE | MAP_ANON | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset)) {
- return false;
- }
- return true;
-}
-
-// static
-bool OS::UncommitRegion(void* address, size_t size) {
- return mmap(address,
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset) != MAP_FAILED;
-}
-
-// static
-bool OS::ReleaseRegion(void* address, size_t size) {
- return munmap(address, size) == 0;
-}
-
-// static
-bool OS::ReleasePartialRegion(void* address, size_t size) {
- return munmap(address, size) == 0;
-}
-
-// static
-bool OS::HasLazyCommits() { return true; }
-
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
std::vector<SharedLibraryAddress> result;
unsigned int images_count = _dyld_image_count();
for (unsigned int i = 0; i < images_count; ++i) {
const mach_header* header = _dyld_get_image_header(i);
- if (header == NULL) continue;
+ if (header == nullptr) continue;
#if V8_HOST_ARCH_X64
uint64_t size;
char* code_ptr = getsectdatafromheader_64(
@@ -165,7 +58,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
unsigned int size;
char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
#endif
- if (code_ptr == NULL) continue;
+ if (code_ptr == nullptr) continue;
const intptr_t slide = _dyld_get_image_vmaddr_slide(i);
const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
result.push_back(SharedLibraryAddress(_dyld_get_image_name(i), start,
@@ -174,7 +67,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
return result;
}
-void OS::SignalCodeMovingGC(void* hint) {}
+void OS::SignalCodeMovingGC() {}
TimezoneCache* OS::CreateTimezoneCache() {
return new PosixDefaultTimezoneCache();
diff --git a/deps/v8/src/base/platform/platform-openbsd.cc b/deps/v8/src/base/platform/platform-openbsd.cc
index 910d4a8104..9084c3075e 100644
--- a/deps/v8/src/base/platform/platform-openbsd.cc
+++ b/deps/v8/src/base/platform/platform-openbsd.cc
@@ -38,112 +38,13 @@ TimezoneCache* OS::CreateTimezoneCache() {
return new PosixDefaultTimezoneCache();
}
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-void* OS::Allocate(const size_t requested, size_t* allocated,
- OS::MemoryPermission access, void* hint) {
- const size_t msize = RoundUp(requested, AllocateAlignment());
- int prot = GetProtectionFromMemoryPermission(access);
- void* mbase =
- mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, kMmapFd, kMmapFdOffset);
- if (mbase == MAP_FAILED) return NULL;
- *allocated = msize;
- return mbase;
-}
-
-// static
-void* OS::ReserveRegion(size_t size, void* hint) {
- void* result =
- mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd, kMmapFdOffset);
-
- if (result == MAP_FAILED) return NULL;
-
- return result;
-}
-
-// static
-void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
- size_t* allocated) {
- DCHECK((alignment % OS::AllocateAlignment()) == 0);
- hint = AlignedAddress(hint, alignment);
- size_t request_size =
- RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
- void* result = ReserveRegion(request_size, hint);
- if (result == nullptr) {
- *allocated = 0;
- return nullptr;
- }
-
- uint8_t* base = static_cast<uint8_t*>(result);
- uint8_t* aligned_base = RoundUp(base, alignment);
- DCHECK_LE(base, aligned_base);
-
- // Unmap extra memory reserved before and after the desired block.
- if (aligned_base != base) {
- size_t prefix_size = static_cast<size_t>(aligned_base - base);
- OS::Free(base, prefix_size);
- request_size -= prefix_size;
- }
-
- size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
- DCHECK_LE(aligned_size, request_size);
-
- if (aligned_size != request_size) {
- size_t suffix_size = request_size - aligned_size;
- OS::Free(aligned_base + aligned_size, suffix_size);
- request_size -= suffix_size;
- }
-
- DCHECK(aligned_size == request_size);
-
- *allocated = aligned_size;
- return static_cast<void*>(aligned_base);
-}
-
-// static
-bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(address, size, prot,
- MAP_PRIVATE | MAP_ANON | MAP_FIXED, kMmapFd,
- kMmapFdOffset)) {
- return false;
- }
- return true;
-}
-
-// static
-bool OS::UncommitRegion(void* address, size_t size) {
- return mmap(address, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, kMmapFd,
- kMmapFdOffset) != MAP_FAILED;
-}
-
-// static
-bool OS::ReleaseRegion(void* address, size_t size) {
- return munmap(address, size) == 0;
-}
-
-// static
-bool OS::ReleasePartialRegion(void* address, size_t size) {
- return munmap(address, size) == 0;
-}
-
-// static
-bool OS::HasLazyCommits() {
- // TODO(alph): implement for the platform.
- return false;
-}
-
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
std::vector<SharedLibraryAddress> result;
// This function assumes that the layout of the file is as follows:
// hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
// If we encounter an unexpected situation we abort scanning further entries.
FILE* fp = fopen("/proc/self/maps", "r");
- if (fp == NULL) return result;
+ if (fp == nullptr) return result;
// Allocate enough room to be able to store a full file name.
const int kLibNameLen = FILENAME_MAX + 1;
@@ -171,7 +72,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
ungetc(c, fp); // Push the '/' back into the stream to be read below.
// Read to the end of the line. Exit if the read fails.
- if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
+ if (fgets(lib_name, kLibNameLen, fp) == nullptr) break;
// Drop the newline character read by fgets. We do not need to check
// for a zero-length string because we know that we at least read the
@@ -197,7 +98,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
return result;
}
-void OS::SignalCodeMovingGC(void* hint) {
+void OS::SignalCodeMovingGC() {
// Support for ll_prof.py.
//
// The Linux profiler built into the kernel logs all mmap's with
@@ -208,13 +109,13 @@ void OS::SignalCodeMovingGC(void* hint) {
// kernel log.
int size = sysconf(_SC_PAGESIZE);
FILE* f = fopen(OS::GetGCFakeMMapFile(), "w+");
- if (f == NULL) {
+ if (f == nullptr) {
OS::PrintError("Failed to open %s\n", OS::GetGCFakeMMapFile());
OS::Abort();
}
void* addr =
- mmap(hint, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, fileno(f), 0);
- DCHECK_NE(MAP_FAILED, addr);
+ mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, fileno(f), 0);
+ DCHECK(addr != MAP_FAILED);
OS::Free(addr, size);
fclose(f);
}
diff --git a/deps/v8/src/base/platform/platform-posix-time.cc b/deps/v8/src/base/platform/platform-posix-time.cc
index a960f7237e..54618810c2 100644
--- a/deps/v8/src/base/platform/platform-posix-time.cc
+++ b/deps/v8/src/base/platform/platform-posix-time.cc
@@ -19,7 +19,7 @@ const char* PosixDefaultTimezoneCache::LocalTimezone(double time) {
}
double PosixDefaultTimezoneCache::LocalTimeOffset() {
- time_t tv = time(NULL);
+ time_t tv = time(nullptr);
struct tm tm;
struct tm* t = localtime_r(&tv, &tm);
// tm_gmtoff includes any daylight savings offset, so subtract it.
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index 8f658b95cb..b873197d3b 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -27,8 +27,6 @@
#include <sys/sysctl.h> // NOLINT, for sysctl
#endif
-#undef MAP_TYPE
-
#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
#define LOG_TAG "v8"
#include <android/log.h> // NOLINT
@@ -61,6 +59,22 @@
#include <sys/syscall.h>
#endif
+#if V8_OS_FREEBSD || V8_OS_MACOSX || V8_OS_OPENBSD || V8_OS_SOLARIS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+#if defined(V8_OS_SOLARIS)
+#if (defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE > 2) || defined(__EXTENSIONS__)
+extern "C" int madvise(caddr_t, size_t, int);
+#else
+extern int madvise(caddr_t, size_t, int);
+#endif
+#endif
+
+#ifndef MADV_FREE
+#define MADV_FREE MADV_DONTNEED
+#endif
+
namespace v8 {
namespace base {
@@ -71,10 +85,96 @@ const pthread_t kNoThread = (pthread_t) 0;
bool g_hard_abort = false;
-const char* g_gc_fake_mmap = NULL;
+const char* g_gc_fake_mmap = nullptr;
+
+static LazyInstance<RandomNumberGenerator>::type
+ platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
+
+#if !V8_OS_FUCHSIA
+#if V8_OS_MACOSX
+// kMmapFd is used to pass vm_alloc flags to tag the region with the user
+// defined tag 255 This helps identify V8-allocated regions in memory analysis
+// tools like vmmap(1).
+const int kMmapFd = VM_MAKE_TAG(255);
+#else // !V8_OS_MACOSX
+const int kMmapFd = -1;
+#endif // !V8_OS_MACOSX
+
+const int kMmapFdOffset = 0;
+
+int GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
+ switch (access) {
+ case OS::MemoryPermission::kNoAccess:
+ return PROT_NONE;
+ case OS::MemoryPermission::kReadWrite:
+ return PROT_READ | PROT_WRITE;
+ case OS::MemoryPermission::kReadWriteExecute:
+ return PROT_READ | PROT_WRITE | PROT_EXEC;
+ case OS::MemoryPermission::kReadExecute:
+ return PROT_READ | PROT_EXEC;
+ }
+ UNREACHABLE();
+}
+
+int GetFlagsForMemoryPermission(OS::MemoryPermission access) {
+ int flags = MAP_PRIVATE | MAP_ANONYMOUS;
+ if (access == OS::MemoryPermission::kNoAccess) {
+#if !V8_OS_AIX && !V8_OS_FREEBSD && !V8_OS_QNX
+ flags |= MAP_NORESERVE;
+#endif // !V8_OS_AIX && !V8_OS_FREEBSD && !V8_OS_QNX
+#if V8_OS_QNX
+ flags |= MAP_LAZY;
+#endif // V8_OS_QNX
+ }
+ return flags;
+}
+
+void* Allocate(void* address, size_t size, OS::MemoryPermission access) {
+ const size_t actual_size = RoundUp(size, OS::AllocatePageSize());
+ int prot = GetProtectionFromMemoryPermission(access);
+ int flags = GetFlagsForMemoryPermission(access);
+ void* result =
+ mmap(address, actual_size, prot, flags, kMmapFd, kMmapFdOffset);
+ if (result == MAP_FAILED) return nullptr;
+ return result;
+}
+
+int ReclaimInaccessibleMemory(void* address, size_t size) {
+#if defined(OS_MACOSX)
+ // On OSX, MADV_FREE_REUSABLE has comparable behavior to MADV_FREE, but also
+ // marks the pages with the reusable bit, which allows both Activity Monitor
+ // and memory-infra to correctly track the pages.
+ int ret = madvise(address, size, MADV_FREE_REUSABLE);
+#elif defined(_AIX) || defined(V8_OS_SOLARIS)
+ int ret = madvise(reinterpret_cast<caddr_t>(address), size, MADV_FREE);
+#else
+ int ret = madvise(address, size, MADV_FREE);
+#endif
+ if (ret != 0 && errno == EINVAL) {
+ // MADV_FREE only works on Linux 4.5+ . If request failed, retry with older
+ // MADV_DONTNEED . Note that MADV_FREE being defined at compile time doesn't
+ // imply runtime support.
+#if defined(_AIX) || defined(V8_OS_SOLARIS)
+ ret = madvise(reinterpret_cast<caddr_t>(address), size, MADV_DONTNEED);
+#else
+ ret = madvise(address, size, MADV_DONTNEED);
+#endif
+ }
+ return ret;
+}
+
+#endif // !V8_OS_FUCHSIA
} // namespace
+void OS::Initialize(int64_t random_seed, bool hard_abort,
+ const char* const gc_fake_mmap) {
+ if (random_seed) {
+ platform_random_number_generator.Pointer()->SetSeed(random_seed);
+ }
+ g_hard_abort = hard_abort;
+ g_gc_fake_mmap = gc_fake_mmap;
+}
int OS::ActivationFrameAlignment() {
#if V8_TARGET_ARCH_ARM
@@ -95,77 +195,161 @@ int OS::ActivationFrameAlignment() {
#endif
}
+// static
+size_t OS::AllocatePageSize() {
+ return static_cast<size_t>(sysconf(_SC_PAGESIZE));
+}
-intptr_t OS::CommitPageSize() {
- static intptr_t page_size = getpagesize();
+// static
+size_t OS::CommitPageSize() {
+ static size_t page_size = getpagesize();
return page_size;
}
-void* OS::Allocate(const size_t requested, size_t* allocated,
- bool is_executable, void* hint) {
- return OS::Allocate(requested, allocated,
- is_executable ? OS::MemoryPermission::kReadWriteExecute
- : OS::MemoryPermission::kReadWrite,
- hint);
+// static
+void* OS::GetRandomMmapAddr() {
+#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
+ defined(THREAD_SANITIZER)
+ // Dynamic tools do not support custom mmap addresses.
+ return nullptr;
+#endif
+ uintptr_t raw_addr;
+ platform_random_number_generator.Pointer()->NextBytes(&raw_addr,
+ sizeof(raw_addr));
+#if V8_TARGET_ARCH_X64
+ // Currently available CPUs have 48 bits of virtual addressing. Truncate
+ // the hint address to 46 bits to give the kernel a fighting chance of
+ // fulfilling our placement request.
+ raw_addr &= V8_UINT64_C(0x3ffffffff000);
+#elif V8_TARGET_ARCH_PPC64
+#if V8_OS_AIX
+ // AIX: 64 bits of virtual addressing, but we limit address range to:
+ // a) minimize Segment Lookaside Buffer (SLB) misses and
+ raw_addr &= V8_UINT64_C(0x3ffff000);
+ // Use extra address space to isolate the mmap regions.
+ raw_addr += V8_UINT64_C(0x400000000000);
+#elif V8_TARGET_BIG_ENDIAN
+ // Big-endian Linux: 44 bits of virtual addressing.
+ raw_addr &= V8_UINT64_C(0x03fffffff000);
+#else
+ // Little-endian Linux: 48 bits of virtual addressing.
+ raw_addr &= V8_UINT64_C(0x3ffffffff000);
+#endif
+#elif V8_TARGET_ARCH_S390X
+ // Linux on Z uses bits 22-32 for Region Indexing, which translates to 42 bits
+ // of virtual addressing. Truncate to 40 bits to allow kernel chance to
+ // fulfill request.
+ raw_addr &= V8_UINT64_C(0xfffffff000);
+#elif V8_TARGET_ARCH_S390
+ // 31 bits of virtual addressing. Truncate to 29 bits to allow kernel chance
+ // to fulfill request.
+ raw_addr &= 0x1ffff000;
+#else
+ raw_addr &= 0x3ffff000;
+
+#ifdef __sun
+ // For our Solaris/illumos mmap hint, we pick a random address in the bottom
+ // half of the top half of the address space (that is, the third quarter).
+ // Because we do not MAP_FIXED, this will be treated only as a hint -- the
+ // system will not fail to mmap() because something else happens to already
+ // be mapped at our random address. We deliberately set the hint high enough
+ // to get well above the system's break (that is, the heap); Solaris and
+ // illumos will try the hint and if that fails allocate as if there were
+ // no hint at all. The high hint prevents the break from getting hemmed in
+ // at low values, ceding half of the address space to the system heap.
+ raw_addr += 0x80000000;
+#elif V8_OS_AIX
+ // The range 0x30000000 - 0xD0000000 is available on AIX;
+ // choose the upper range.
+ raw_addr += 0x90000000;
+#else
+ // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
+ // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
+ // 10.6 and 10.7.
+ raw_addr += 0x20000000;
+#endif
+#endif
+ return reinterpret_cast<void*>(raw_addr);
}
-void OS::Free(void* address, const size_t size) {
- // TODO(1240712): munmap has a return value which is ignored here.
- int result = munmap(address, size);
- USE(result);
- DCHECK(result == 0);
-}
+// TODO(bbudge) Move Cygwin and Fuschia stuff into platform-specific files.
+#if !V8_OS_CYGWIN && !V8_OS_FUCHSIA
+// static
+void* OS::Allocate(void* address, size_t size, size_t alignment,
+ MemoryPermission access) {
+ size_t page_size = AllocatePageSize();
+ DCHECK_EQ(0, size % page_size);
+ DCHECK_EQ(0, alignment % page_size);
+ address = AlignedAddress(address, alignment);
+ // Add the maximum misalignment so we are guaranteed an aligned base address.
+ size_t request_size = size + (alignment - page_size);
+ void* result = base::Allocate(address, request_size, access);
+ if (result == nullptr) return nullptr;
+
+ // Unmap memory allocated before the aligned base address.
+ uint8_t* base = static_cast<uint8_t*>(result);
+ uint8_t* aligned_base = RoundUp(base, alignment);
+ if (aligned_base != base) {
+ DCHECK_LT(base, aligned_base);
+ size_t prefix_size = static_cast<size_t>(aligned_base - base);
+ CHECK(Free(base, prefix_size));
+ request_size -= prefix_size;
+ }
+ // Unmap memory allocated after the potentially unaligned end.
+ if (size != request_size) {
+ DCHECK_LT(size, request_size);
+ size_t suffix_size = request_size - size;
+ CHECK(Free(aligned_base + size, suffix_size));
+ request_size -= suffix_size;
+ }
+ DCHECK_EQ(size, request_size);
+ return static_cast<void*>(aligned_base);
+}
-// Get rid of writable permission on code allocations.
-void OS::ProtectCode(void* address, const size_t size) {
-#if V8_OS_CYGWIN
- DWORD old_protect;
- VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
-#else
- mprotect(address, size, PROT_READ | PROT_EXEC);
-#endif
+// static
+bool OS::Free(void* address, const size_t size) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % AllocatePageSize());
+ DCHECK_EQ(0, size % AllocatePageSize());
+ return munmap(address, size) == 0;
}
+// static
+bool OS::Release(void* address, size_t size) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
+ DCHECK_EQ(0, size % CommitPageSize());
+ return munmap(address, size) == 0;
+}
-// Create guard pages.
-#if !V8_OS_FUCHSIA
-void OS::Guard(void* address, const size_t size) {
-#if V8_OS_CYGWIN
- DWORD oldprotect;
- VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
-#else
- mprotect(address, size, PROT_NONE);
-#endif
+// static
+bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
+ DCHECK_EQ(0, size % CommitPageSize());
+
+ int prot = GetProtectionFromMemoryPermission(access);
+ int ret = mprotect(address, size, prot);
+ if (ret == 0 && access == OS::MemoryPermission::kNoAccess) {
+ ret = ReclaimInaccessibleMemory(address, size);
+ }
+ return ret == 0;
}
-#endif // !V8_OS_FUCHSIA
-// Make a region of memory readable and writable.
-void OS::Unprotect(void* address, const size_t size) {
-#if V8_OS_CYGWIN
- DWORD oldprotect;
- VirtualProtect(address, size, PAGE_READWRITE, &oldprotect);
+// static
+bool OS::HasLazyCommits() {
+#if V8_OS_AIX || V8_OS_LINUX || V8_OS_MACOSX
+ return true;
#else
- mprotect(address, size, PROT_READ | PROT_WRITE);
+ // TODO(bbudge) Return true for all POSIX platforms.
+ return false;
#endif
}
-
-void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) {
- g_hard_abort = hard_abort;
- g_gc_fake_mmap = gc_fake_mmap;
-}
-
+#endif // !V8_OS_CYGWIN && !V8_OS_FUCHSIA
const char* OS::GetGCFakeMMapFile() {
return g_gc_fake_mmap;
}
-size_t OS::AllocateAlignment() {
- return static_cast<size_t>(sysconf(_SC_PAGESIZE));
-}
-
-
void OS::Sleep(TimeDelta interval) {
usleep(static_cast<useconds_t>(interval.InMicroseconds()));
}
@@ -220,13 +404,14 @@ class PosixMemoryMappedFile final : public OS::MemoryMappedFile {
// static
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name, void* hint) {
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
if (FILE* file = fopen(name, "r+")) {
if (fseek(file, 0, SEEK_END) == 0) {
long size = ftell(file); // NOLINT(runtime/int)
if (size >= 0) {
- void* const memory = mmap(hint, size, PROT_READ | PROT_WRITE,
- MAP_SHARED, fileno(file), 0);
+ void* const memory =
+ mmap(OS::GetRandomMmapAddr(), size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fileno(file), 0);
if (memory != MAP_FAILED) {
return new PosixMemoryMappedFile(file, memory, size);
}
@@ -239,13 +424,13 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name, void* hint) {
// static
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, void* hint,
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name,
size_t size, void* initial) {
if (FILE* file = fopen(name, "w+")) {
size_t result = fwrite(initial, 1, size, file);
if (result == size && !ferror(file)) {
- void* memory = mmap(hint, result, PROT_READ | PROT_WRITE, MAP_SHARED,
- fileno(file), 0);
+ void* memory = mmap(OS::GetRandomMmapAddr(), result,
+ PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
if (memory != MAP_FAILED) {
return new PosixMemoryMappedFile(file, memory, result);
}
@@ -257,7 +442,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, void* hint,
PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) OS::Free(memory_, size_);
+ if (memory_) CHECK(OS::Free(memory_, size_));
fclose(file_);
}
@@ -309,7 +494,7 @@ double PosixTimezoneCache::DaylightSavingsOffset(double time) {
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm tm;
struct tm* t = localtime_r(&tv, &tm);
- if (NULL == t) return std::numeric_limits<double>::quiet_NaN();
+ if (nullptr == t) return std::numeric_limits<double>::quiet_NaN();
return t->tm_isdst > 0 ? 3600 * msPerSecond : 0;
}
@@ -325,16 +510,16 @@ int OS::GetLastError() {
FILE* OS::FOpen(const char* path, const char* mode) {
FILE* file = fopen(path, mode);
- if (file == NULL) return NULL;
+ if (file == nullptr) return nullptr;
struct stat file_stat;
if (fstat(fileno(file), &file_stat) != 0) {
fclose(file);
- return NULL;
+ return nullptr;
}
bool is_regular_file = ((file_stat.st_mode & S_IFREG) != 0);
if (is_regular_file) return file;
fclose(file);
- return NULL;
+ return nullptr;
}
@@ -462,7 +647,7 @@ class Thread::PlatformData {
Thread::Thread(const Options& options)
: data_(new PlatformData),
stack_size_(options.stack_size()),
- start_semaphore_(NULL) {
+ start_semaphore_(nullptr) {
if (stack_size_ > 0 && static_cast<size_t>(stack_size_) < PTHREAD_STACK_MIN) {
stack_size_ = PTHREAD_STACK_MIN;
}
@@ -487,8 +672,7 @@ static void SetThreadName(const char* name) {
int (*dynamic_pthread_setname_np)(const char*);
*reinterpret_cast<void**>(&dynamic_pthread_setname_np) =
dlsym(RTLD_DEFAULT, "pthread_setname_np");
- if (dynamic_pthread_setname_np == NULL)
- return;
+ if (dynamic_pthread_setname_np == nullptr) return;
// Mac OS X does not expose the length limit of the name, so hardcode it.
static const int kMaxNameLength = 63;
@@ -509,9 +693,9 @@ static void* ThreadEntry(void* arg) {
// one).
{ LockGuard<Mutex> lock_guard(&thread->data()->thread_creation_mutex_); }
SetThreadName(thread->name());
- DCHECK(thread->data()->thread_ != kNoThread);
+ DCHECK_NE(thread->data()->thread_, kNoThread);
thread->NotifyStartedAndRun();
- return NULL;
+ return nullptr;
}
@@ -548,15 +732,11 @@ void Thread::Start() {
DCHECK_EQ(0, result);
result = pthread_attr_destroy(&attr);
DCHECK_EQ(0, result);
- DCHECK(data_->thread_ != kNoThread);
+ DCHECK_NE(data_->thread_, kNoThread);
USE(result);
}
-
-void Thread::Join() {
- pthread_join(data_->thread_, NULL);
-}
-
+void Thread::Join() { pthread_join(data_->thread_, nullptr); }
static Thread::LocalStorageKey PthreadKeyToLocalKey(pthread_key_t pthread_key) {
#if V8_OS_CYGWIN
@@ -595,7 +775,7 @@ static void InitializeTlsBaseOffset() {
char buffer[kBufferSize];
size_t buffer_size = kBufferSize;
int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
- if (sysctl(ctl_name, 2, buffer, &buffer_size, NULL, 0) != 0) {
+ if (sysctl(ctl_name, 2, buffer, &buffer_size, nullptr, 0) != 0) {
V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version");
}
// The buffer now contains a string of the form XX.YY.ZZ, where
@@ -605,7 +785,7 @@ static void InitializeTlsBaseOffset() {
char* period_pos = strchr(buffer, '.');
*period_pos = '\0';
int kernel_version_major =
- static_cast<int>(strtol(buffer, NULL, 10)); // NOLINT
+ static_cast<int>(strtol(buffer, nullptr, 10)); // NOLINT
// The constants below are taken from pthreads.s from the XNU kernel
// sources archive at www.opensource.apple.com.
if (kernel_version_major < 11) {
@@ -633,7 +813,7 @@ static void CheckFastTls(Thread::LocalStorageKey key) {
V8_Fatal(__FILE__, __LINE__,
"V8 failed to initialize fast TLS on current kernel");
}
- Thread::SetThreadLocal(key, NULL);
+ Thread::SetThreadLocal(key, nullptr);
}
#endif // V8_FAST_TLS_SUPPORTED
@@ -648,7 +828,7 @@ Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
}
#endif
pthread_key_t key;
- int result = pthread_key_create(&key, NULL);
+ int result = pthread_key_create(&key, nullptr);
DCHECK_EQ(0, result);
USE(result);
LocalStorageKey local_key = PthreadKeyToLocalKey(key);
@@ -681,17 +861,9 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
USE(result);
}
-int GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
- switch (access) {
- case OS::MemoryPermission::kNoAccess:
- return PROT_NONE;
- case OS::MemoryPermission::kReadWrite:
- return PROT_READ | PROT_WRITE;
- case OS::MemoryPermission::kReadWriteExecute:
- return PROT_READ | PROT_WRITE | PROT_EXEC;
- }
- UNREACHABLE();
-}
+#undef LOG_TAG
+#undef MAP_ANONYMOUS
+#undef MADV_FREE
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-posix.h b/deps/v8/src/base/platform/platform-posix.h
index b092bb526d..55861bc9ac 100644
--- a/deps/v8/src/base/platform/platform-posix.h
+++ b/deps/v8/src/base/platform/platform-posix.h
@@ -21,8 +21,6 @@ class PosixTimezoneCache : public TimezoneCache {
static const int msPerSecond = 1000;
};
-int GetProtectionFromMemoryPermission(OS::MemoryPermission access);
-
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-qnx.cc b/deps/v8/src/base/platform/platform-qnx.cc
index 68bc0efbf9..640b77c816 100644
--- a/deps/v8/src/base/platform/platform-qnx.cc
+++ b/deps/v8/src/base/platform/platform-qnx.cc
@@ -89,106 +89,9 @@ TimezoneCache* OS::CreateTimezoneCache() {
return new PosixDefaultTimezoneCache();
}
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-void* OS::Allocate(const size_t requested, size_t* allocated,
- OS::MemoryPermission access, void* hint) {
- const size_t msize = RoundUp(requested, AllocateAlignment());
- int prot = GetProtectionFromMemoryPermission(access);
- void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd,
- kMmapFdOffset);
- if (mbase == MAP_FAILED) return NULL;
- *allocated = msize;
- return mbase;
-}
-
-// static
-void* OS::ReserveRegion(size_t size, void* hint) {
- void* result =
- mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY,
- kMmapFd, kMmapFdOffset);
-
- if (result == MAP_FAILED) return NULL;
-
- return result;
-}
-
-// static
-void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
- size_t* allocated) {
- DCHECK((alignment % OS::AllocateAlignment()) == 0);
- hint = AlignedAddress(hint, alignment);
- size_t request_size = RoundUp(size + alignment,
- static_cast<intptr_t>(OS::AllocateAlignment()));
- void* result = ReserveRegion(request_size, hint);
- if (result == nullptr) {
- *allocated = 0;
- return nullptr;
- }
-
- uint8_t* base = static_cast<uint8_t*>(result);
- uint8_t* aligned_base = RoundUp(base, alignment);
- DCHECK_LE(base, aligned_base);
-
- // Unmap extra memory reserved before and after the desired block.
- if (aligned_base != base) {
- size_t prefix_size = static_cast<size_t>(aligned_base - base);
- OS::Free(base, prefix_size);
- request_size -= prefix_size;
- }
-
- size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
- DCHECK_LE(aligned_size, request_size);
-
- if (aligned_size != request_size) {
- size_t suffix_size = request_size - aligned_size;
- OS::Free(aligned_base + aligned_size, suffix_size);
- request_size -= suffix_size;
- }
-
- DCHECK(aligned_size == request_size);
-
- *allocated = aligned_size;
- return static_cast<void*>(aligned_base);
-}
-
-// static
-bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(address, size, prot,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, kMmapFd,
- kMmapFdOffset)) {
- return false;
- }
-
- return true;
-}
-
-// static
-bool OS::UncommitRegion(void* address, size_t size) {
- return mmap(address, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_LAZY, kMmapFd,
- kMmapFdOffset) != MAP_FAILED;
-}
-
-// static
-bool OS::ReleaseRegion(void* address, size_t size) {
- return munmap(address, size) == 0;
-}
-
-// static
-bool OS::ReleasePartialRegion(void* address, size_t size) {
- return munmap(address, size) == 0;
-}
-
-// static
-bool OS::HasLazyCommits() { return false; }
-
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
std::vector<SharedLibraryAddress> result;
- procfs_mapinfo *mapinfos = NULL, *mapinfo;
+ procfs_mapinfo *mapinfos = nullptr, *mapinfo;
int proc_fd, num, i;
struct {
@@ -205,14 +108,14 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
}
/* Get the number of map entries. */
- if (devctl(proc_fd, DCMD_PROC_MAPINFO, NULL, 0, &num) != EOK) {
+ if (devctl(proc_fd, DCMD_PROC_MAPINFO, nullptr, 0, &num) != EOK) {
close(proc_fd);
return result;
}
mapinfos =
reinterpret_cast<procfs_mapinfo*>(malloc(num * sizeof(procfs_mapinfo)));
- if (mapinfos == NULL) {
+ if (mapinfos == nullptr) {
close(proc_fd);
return result;
}
@@ -241,7 +144,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
return result;
}
-void OS::SignalCodeMovingGC(void* hint) {}
+void OS::SignalCodeMovingGC() {}
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-solaris.cc b/deps/v8/src/base/platform/platform-solaris.cc
index 2ea6ef4a6c..b81895a3fb 100644
--- a/deps/v8/src/base/platform/platform-solaris.cc
+++ b/deps/v8/src/base/platform/platform-solaris.cc
@@ -47,7 +47,7 @@ const char* SolarisTimezoneCache::LocalTimezone(double time) {
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm tm;
struct tm* t = localtime_r(&tv, &tm);
- if (NULL == t) return "";
+ if (nullptr == t) return "";
return tzname[0]; // The location of the timezone string on Solaris.
}
@@ -58,111 +58,11 @@ double SolarisTimezoneCache::LocalTimeOffset() {
TimezoneCache* OS::CreateTimezoneCache() { return new SolarisTimezoneCache(); }
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-void* OS::Allocate(const size_t requested, size_t* allocated,
- OS::MemoryPermission access, void* hint) {
- const size_t msize = RoundUp(requested, getpagesize());
- int prot = GetProtectionFromMemoryPermission(access);
- void* mbase =
- mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, kMmapFd, kMmapFdOffset);
-
- if (mbase == MAP_FAILED) return NULL;
- *allocated = msize;
- return mbase;
-}
-
-// static
-void* OS::ReserveRegion(size_t size, void* hint) {
- void* result =
- mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
- kMmapFd, kMmapFdOffset);
-
- if (result == MAP_FAILED) return NULL;
-
- return result;
-}
-
-// static
-void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
- size_t* allocated) {
- DCHECK((alignment % OS::AllocateAlignment()) == 0);
- hint = AlignedAddress(hint, alignment);
- size_t request_size = RoundUp(size + alignment,
- static_cast<intptr_t>(OS::AllocateAlignment()));
- void* result = ReserveRegion(request_size, hint);
- if (result == nullptr) {
- *allocated = 0;
- return nullptr;
- }
-
- uint8_t* base = static_cast<uint8_t*>(result);
- uint8_t* aligned_base = RoundUp(base, alignment);
- DCHECK_LE(base, aligned_base);
-
- // Unmap extra memory reserved before and after the desired block.
- if (aligned_base != base) {
- size_t prefix_size = static_cast<size_t>(aligned_base - base);
- OS::Free(base, prefix_size);
- request_size -= prefix_size;
- }
-
- size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
- DCHECK_LE(aligned_size, request_size);
-
- if (aligned_size != request_size) {
- size_t suffix_size = request_size - aligned_size;
- OS::Free(aligned_base + aligned_size, suffix_size);
- request_size -= suffix_size;
- }
-
- DCHECK(aligned_size == request_size);
-
- *allocated = aligned_size;
- return static_cast<void*>(aligned_base);
-}
-
-// static
-bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(address, size, prot,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, kMmapFd,
- kMmapFdOffset)) {
- return false;
- }
- return true;
-}
-
-// static
-bool OS::UncommitRegion(void* address, size_t size) {
- return mmap(address, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, kMmapFd,
- kMmapFdOffset) != MAP_FAILED;
-}
-
-// static
-bool OS::ReleaseRegion(void* address, size_t size) {
- return munmap(address, size) == 0;
-}
-
-// static
-bool OS::ReleasePartialRegion(void* address, size_t size) {
- return munmap(address, size) == 0;
-}
-
-// static
-bool OS::HasLazyCommits() {
- // TODO(alph): implement for the platform.
- return false;
-}
-
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
return std::vector<SharedLibraryAddress>();
}
-void OS::SignalCodeMovingGC(void* hint) {}
+void OS::SignalCodeMovingGC() {}
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index de1a27506f..e026d7edae 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -20,10 +20,12 @@
#include "src/base/win32-headers.h"
#include "src/base/bits.h"
+#include "src/base/lazy-instance.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/time.h"
#include "src/base/timezone-cache.h"
+#include "src/base/utils/random-number-generator.h"
// Extra functions for MinGW. Most of these are the _s functions which are in
// the Microsoft Visual Studio C++ CRT.
@@ -45,14 +47,14 @@ inline void MemoryFence() {
int localtime_s(tm* out_tm, const time_t* time) {
tm* posix_local_time_struct = localtime_r(time, out_tm);
- if (posix_local_time_struct == NULL) return 1;
+ if (posix_local_time_struct == nullptr) return 1;
return 0;
}
int fopen_s(FILE** pFile, const char* filename, const char* mode) {
*pFile = fopen(filename, mode);
- return *pFile != NULL ? 0 : 1;
+ return *pFile != nullptr ? 0 : 1;
}
int _vsnprintf_s(char* buffer, size_t sizeOfBuffer, size_t count,
@@ -63,8 +65,8 @@ int _vsnprintf_s(char* buffer, size_t sizeOfBuffer, size_t count,
int strncpy_s(char* dest, size_t dest_size, const char* source, size_t count) {
- CHECK(source != NULL);
- CHECK(dest != NULL);
+ CHECK(source != nullptr);
+ CHECK(dest != nullptr);
CHECK_GT(dest_size, 0);
if (count == _TRUNCATE) {
@@ -137,11 +139,11 @@ class WindowsTimezoneCache : public TimezoneCache {
}
// Make standard and DST timezone names.
- WideCharToMultiByte(CP_UTF8, 0, tzinfo_.StandardName, -1,
- std_tz_name_, kTzNameSize, NULL, NULL);
+ WideCharToMultiByte(CP_UTF8, 0, tzinfo_.StandardName, -1, std_tz_name_,
+ kTzNameSize, nullptr, nullptr);
std_tz_name_[kTzNameSize - 1] = '\0';
- WideCharToMultiByte(CP_UTF8, 0, tzinfo_.DaylightName, -1,
- dst_tz_name_, kTzNameSize, NULL, NULL);
+ WideCharToMultiByte(CP_UTF8, 0, tzinfo_.DaylightName, -1, dst_tz_name_,
+ kTzNameSize, nullptr, nullptr);
dst_tz_name_[kTzNameSize - 1] = '\0';
// If OS returned empty string or resource id (like "@tzres.dll,-211")
@@ -551,7 +553,7 @@ FILE* OS::FOpen(const char* path, const char* mode) {
if (fopen_s(&result, path, mode) == 0) {
return result;
} else {
- return NULL;
+ return nullptr;
}
}
@@ -572,13 +574,13 @@ FILE* OS::OpenTemporaryFile() {
char tempPathBuffer[MAX_PATH];
DWORD path_result = 0;
path_result = GetTempPathA(MAX_PATH, tempPathBuffer);
- if (path_result > MAX_PATH || path_result == 0) return NULL;
+ if (path_result > MAX_PATH || path_result == 0) return nullptr;
UINT name_result = 0;
char tempNameBuffer[MAX_PATH];
name_result = GetTempFileNameA(tempPathBuffer, "", 0, tempNameBuffer);
- if (name_result == 0) return NULL;
+ if (name_result == 0) return nullptr;
FILE* result = FOpen(tempNameBuffer, "w+"); // Same mode as tmpfile uses.
- if (result != NULL) {
+ if (result != nullptr) {
Remove(tempNameBuffer); // Delete on close.
}
return result;
@@ -672,42 +674,81 @@ void OS::StrNCpy(char* dest, int length, const char* src, size_t n) {
#undef _TRUNCATE
#undef STRUNCATE
+// The allocation alignment is the guaranteed alignment for
+// VirtualAlloc'ed blocks of memory.
+size_t OS::AllocatePageSize() {
+ static size_t allocate_alignment = 0;
+ if (allocate_alignment == 0) {
+ SYSTEM_INFO info;
+ GetSystemInfo(&info);
+ allocate_alignment = info.dwAllocationGranularity;
+ }
+ return allocate_alignment;
+}
-// Get the system's page size used by VirtualAlloc() or the next power
-// of two. The reason for always returning a power of two is that the
-// rounding up in OS::Allocate expects that.
-static size_t GetPageSize() {
+size_t OS::CommitPageSize() {
static size_t page_size = 0;
if (page_size == 0) {
SYSTEM_INFO info;
GetSystemInfo(&info);
- page_size = base::bits::RoundUpToPowerOfTwo32(info.dwPageSize);
+ page_size = info.dwPageSize;
+ DCHECK_EQ(4096, page_size);
}
return page_size;
}
+static LazyInstance<RandomNumberGenerator>::type
+ platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
-// The allocation alignment is the guaranteed alignment for
-// VirtualAlloc'ed blocks of memory.
-size_t OS::AllocateAlignment() {
- static size_t allocate_alignment = 0;
- if (allocate_alignment == 0) {
- SYSTEM_INFO info;
- GetSystemInfo(&info);
- allocate_alignment = info.dwAllocationGranularity;
+void OS::Initialize(int64_t random_seed, bool hard_abort,
+ const char* const gc_fake_mmap) {
+ if (random_seed) {
+ platform_random_number_generator.Pointer()->SetSeed(random_seed);
}
- return allocate_alignment;
+ g_hard_abort = hard_abort;
}
-void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) {
- g_hard_abort = hard_abort;
+void* OS::GetRandomMmapAddr() {
+// The address range used to randomize RWX allocations in OS::Allocate
+// Try not to map pages into the default range that windows loads DLLs
+// Use a multiple of 64k to prevent committing unused memory.
+// Note: This does not guarantee RWX regions will be within the
+// range kAllocationRandomAddressMin to kAllocationRandomAddressMax
+#ifdef V8_HOST_ARCH_64_BIT
+ static const uintptr_t kAllocationRandomAddressMin = 0x0000000080000000;
+ static const uintptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000;
+#else
+ static const uintptr_t kAllocationRandomAddressMin = 0x04000000;
+ static const uintptr_t kAllocationRandomAddressMax = 0x3FFF0000;
+#endif
+ uintptr_t address;
+ platform_random_number_generator.Pointer()->NextBytes(&address,
+ sizeof(address));
+ address <<= kPageSizeBits;
+ address += kAllocationRandomAddressMin;
+ address &= kAllocationRandomAddressMax;
+ return reinterpret_cast<void*>(address);
}
namespace {
-static void* RandomizedVirtualAlloc(size_t size, int action, int protection,
- void* hint) {
- LPVOID base = NULL;
+DWORD GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
+ switch (access) {
+ case OS::MemoryPermission::kNoAccess:
+ return PAGE_NOACCESS;
+ case OS::MemoryPermission::kReadWrite:
+ return PAGE_READWRITE;
+ case OS::MemoryPermission::kReadWriteExecute:
+ return PAGE_EXECUTE_READWRITE;
+ case OS::MemoryPermission::kReadExecute:
+ return PAGE_EXECUTE_READ;
+ }
+ UNREACHABLE();
+}
+
+uint8_t* RandomizedVirtualAlloc(size_t size, DWORD flags, DWORD protect,
+ void* hint) {
+ LPVOID base = nullptr;
static BOOL use_aslr = -1;
#ifdef V8_HOST_ARCH_32_BIT
// Don't bother randomizing on 32-bit hosts, because they lack the room and
@@ -718,146 +759,96 @@ static void* RandomizedVirtualAlloc(size_t size, int action, int protection,
use_aslr = TRUE;
#endif
- if (use_aslr &&
- (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS)) {
- // For executable pages try and randomize the allocation address
- base = VirtualAlloc(hint, size, action, protection);
+ if (use_aslr && protect != PAGE_READWRITE) {
+ // For executable or reserved pages try to randomize the allocation address.
+ base = VirtualAlloc(hint, size, flags, protect);
}
- // After three attempts give up and let the OS find an address to use.
- if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
-
- return base;
-}
-
-} // namespace
-
-void* OS::Allocate(const size_t requested, size_t* allocated,
- bool is_executable, void* hint) {
- return OS::Allocate(requested, allocated,
- is_executable ? OS::MemoryPermission::kReadWriteExecute
- : OS::MemoryPermission::kReadWrite,
- hint);
-}
-
-void* OS::Allocate(const size_t requested, size_t* allocated,
- OS::MemoryPermission access, void* hint) {
- // VirtualAlloc rounds allocated size to page size automatically.
- size_t msize = RoundUp(requested, static_cast<int>(GetPageSize()));
-
- // Windows XP SP2 allows Data Excution Prevention (DEP).
- int prot = PAGE_NOACCESS;
- switch (access) {
- case OS::MemoryPermission::kNoAccess: {
- prot = PAGE_NOACCESS;
- break;
- }
- case OS::MemoryPermission::kReadWrite: {
- prot = PAGE_READWRITE;
- break;
- }
- case OS::MemoryPermission::kReadWriteExecute: {
- prot = PAGE_EXECUTE_READWRITE;
- break;
- }
+ // On failure, let the OS find an address to use.
+ if (base == nullptr) {
+ base = VirtualAlloc(nullptr, size, flags, protect);
}
-
- LPVOID mbase =
- RandomizedVirtualAlloc(msize, MEM_COMMIT | MEM_RESERVE, prot, hint);
-
- if (mbase == NULL) return NULL;
-
- DCHECK((reinterpret_cast<uintptr_t>(mbase) % OS::AllocateAlignment()) == 0);
-
- *allocated = msize;
- return mbase;
+ return reinterpret_cast<uint8_t*>(base);
}
-void OS::Free(void* address, const size_t size) {
- // TODO(1240712): VirtualFree has a return value which is ignored here.
- VirtualFree(address, 0, MEM_RELEASE);
- USE(size);
-}
-
-intptr_t OS::CommitPageSize() {
- return 4096;
-}
-
-void OS::ProtectCode(void* address, const size_t size) {
- DWORD old_protect;
- VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
-}
-
-void OS::Guard(void* address, const size_t size) {
- DWORD oldprotect;
- VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
-}
-
-void OS::Unprotect(void* address, const size_t size) {
- LPVOID result = VirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE);
- USE(result);
-}
+} // namespace
// static
-void* OS::ReserveRegion(size_t size, void* hint) {
- return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS, hint);
-}
-
-void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
- size_t* allocated) {
- DCHECK((alignment % OS::AllocateAlignment()) == 0);
- hint = AlignedAddress(hint, alignment);
- size_t request_size =
- RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
- void* address = ReserveRegion(request_size, hint);
- if (address == nullptr) {
- *allocated = 0;
- return nullptr;
+void* OS::Allocate(void* address, size_t size, size_t alignment,
+ MemoryPermission access) {
+ size_t page_size = AllocatePageSize();
+ DCHECK_EQ(0, size % page_size);
+ DCHECK_EQ(0, alignment % page_size);
+ DCHECK_LE(page_size, alignment);
+ address = AlignedAddress(address, alignment);
+
+ DWORD flags = (access == OS::MemoryPermission::kNoAccess)
+ ? MEM_RESERVE
+ : MEM_RESERVE | MEM_COMMIT;
+ DWORD protect = GetProtectionFromMemoryPermission(access);
+
+ // First, try an exact size aligned allocation.
+ uint8_t* base = RandomizedVirtualAlloc(size, flags, protect, address);
+ if (base == nullptr) return nullptr; // Can't allocate, we're OOM.
+
+ // If address is suitably aligned, we're done.
+ uint8_t* aligned_base = RoundUp(base, alignment);
+ if (base == aligned_base) return reinterpret_cast<void*>(base);
+
+ // Otherwise, free it and try a larger allocation.
+ CHECK(Free(base, size));
+
+ // Clear the hint. It's unlikely we can allocate at this address.
+ address = nullptr;
+
+ // Add the maximum misalignment so we are guaranteed an aligned base address
+ // in the allocated region.
+ size_t padded_size = size + (alignment - page_size);
+ const int kMaxAttempts = 3;
+ aligned_base = nullptr;
+ for (int i = 0; i < kMaxAttempts; ++i) {
+ base = RandomizedVirtualAlloc(padded_size, flags, protect, address);
+ if (base == nullptr) return nullptr; // Can't allocate, we're OOM.
+
+ // Try to trim the allocation by freeing the padded allocation and then
+ // calling VirtualAlloc at the aligned base.
+ CHECK(Free(base, padded_size));
+ aligned_base = RoundUp(base, alignment);
+ base = reinterpret_cast<uint8_t*>(
+ VirtualAlloc(aligned_base, size, flags, protect));
+ // We might not get the reduced allocation due to a race. In that case,
+ // base will be nullptr.
+ if (base != nullptr) break;
}
- uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
- // Try reducing the size by freeing and then reallocating a specific area.
- bool result = ReleaseRegion(address, request_size);
- USE(result);
- DCHECK(result);
- address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
- if (address != nullptr) {
- request_size = size;
- DCHECK(base == static_cast<uint8_t*>(address));
- } else {
- // Resizing failed, just go with a bigger area.
- address = ReserveRegion(request_size, hint);
- if (address == nullptr) {
- *allocated = 0;
- return nullptr;
- }
- }
-
- *allocated = request_size;
- return static_cast<void*>(address);
+ DCHECK_EQ(base, aligned_base);
+ return reinterpret_cast<void*>(base);
}
// static
-bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
- int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
- if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
- return false;
- }
- return true;
+bool OS::Free(void* address, const size_t size) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % AllocatePageSize());
+ // TODO(bbudge) Add DCHECK_EQ(0, size % AllocatePageSize()) when callers
+ // pass the correct size on Windows.
+ USE(size);
+ return VirtualFree(address, 0, MEM_RELEASE) != 0;
}
// static
-bool OS::UncommitRegion(void* address, size_t size) {
+bool OS::Release(void* address, size_t size) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
+ DCHECK_EQ(0, size % CommitPageSize());
return VirtualFree(address, size, MEM_DECOMMIT) != 0;
}
// static
-bool OS::ReleaseRegion(void* address, size_t size) {
- return VirtualFree(address, 0, MEM_RELEASE) != 0;
-}
-
-// static
-bool OS::ReleasePartialRegion(void* address, size_t size) {
- return VirtualFree(address, size, MEM_DECOMMIT) != 0;
+bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
+ DCHECK_EQ(0, size % CommitPageSize());
+ if (access == MemoryPermission::kNoAccess) {
+ return VirtualFree(address, size, MEM_DECOMMIT) != 0;
+ }
+ DWORD protect = GetProtectionFromMemoryPermission(access);
+ return VirtualAlloc(address, size, MEM_COMMIT, protect) != nullptr;
}
// static
@@ -916,18 +907,19 @@ class Win32MemoryMappedFile final : public OS::MemoryMappedFile {
// static
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name, void* hint) {
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
// Open a physical file
HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
- FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0, NULL);
- if (file == INVALID_HANDLE_VALUE) return NULL;
+ FILE_SHARE_READ | FILE_SHARE_WRITE, nullptr,
+ OPEN_EXISTING, 0, nullptr);
+ if (file == INVALID_HANDLE_VALUE) return nullptr;
- DWORD size = GetFileSize(file, NULL);
+ DWORD size = GetFileSize(file, nullptr);
- // Create a file mapping for the physical file. Ignore hint on Windows.
+ // Create a file mapping for the physical file
HANDLE file_mapping =
- CreateFileMapping(file, NULL, PAGE_READWRITE, 0, size, NULL);
- if (file_mapping == NULL) return NULL;
+ CreateFileMapping(file, nullptr, PAGE_READWRITE, 0, size, nullptr);
+ if (file_mapping == nullptr) return nullptr;
// Map a view of the file into memory
void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
@@ -936,17 +928,17 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name, void* hint) {
// static
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, void* hint,
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name,
size_t size, void* initial) {
// Open a physical file
HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
- FILE_SHARE_READ | FILE_SHARE_WRITE, NULL,
- OPEN_ALWAYS, 0, NULL);
- if (file == NULL) return NULL;
- // Create a file mapping for the physical file. Ignore hint on Windows.
- HANDLE file_mapping = CreateFileMapping(file, NULL, PAGE_READWRITE, 0,
- static_cast<DWORD>(size), NULL);
- if (file_mapping == NULL) return NULL;
+ FILE_SHARE_READ | FILE_SHARE_WRITE, nullptr,
+ OPEN_ALWAYS, 0, nullptr);
+ if (file == nullptr) return nullptr;
+ // Create a file mapping for the physical file
+ HANDLE file_mapping = CreateFileMapping(file, nullptr, PAGE_READWRITE, 0,
+ static_cast<DWORD>(size), nullptr);
+ if (file_mapping == nullptr) return nullptr;
// Map a view of the file into memory
void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
if (memory) memmove(memory, initial, size);
@@ -1062,7 +1054,7 @@ typedef BOOL (__stdcall *DLL_FUNC_TYPE(Module32NextW))(HANDLE hSnapshot,
#undef VOID
// Declare a variable for each dynamically loaded DLL function.
-#define DEF_DLL_FUNCTION(name) DLL_FUNC_TYPE(name) DLL_FUNC_VAR(name) = NULL;
+#define DEF_DLL_FUNCTION(name) DLL_FUNC_TYPE(name) DLL_FUNC_VAR(name) = nullptr;
DBGHELP_FUNCTION_LIST(DEF_DLL_FUNCTION)
TLHELP32_FUNCTION_LIST(DEF_DLL_FUNCTION)
#undef DEF_DLL_FUNCTION
@@ -1079,7 +1071,7 @@ static bool LoadDbgHelpAndTlHelp32() {
// Load functions from the dbghelp.dll module.
module = LoadLibrary(TEXT("dbghelp.dll"));
- if (module == NULL) {
+ if (module == nullptr) {
return false;
}
@@ -1094,7 +1086,7 @@ DBGHELP_FUNCTION_LIST(LOAD_DLL_FUNC)
// Load functions from the kernel32.dll module (the TlHelp32.h function used
// to be in tlhelp32.dll but are now moved to kernel32.dll).
module = LoadLibrary(TEXT("kernel32.dll"));
- if (module == NULL) {
+ if (module == nullptr) {
return false;
}
@@ -1107,14 +1099,14 @@ TLHELP32_FUNCTION_LIST(LOAD_DLL_FUNC)
#undef LOAD_DLL_FUNC
// Check that all functions where loaded.
- bool result =
-#define DLL_FUNC_LOADED(name) (DLL_FUNC_VAR(name) != NULL) &&
+bool result =
+#define DLL_FUNC_LOADED(name) (DLL_FUNC_VAR(name) != nullptr)&&
-DBGHELP_FUNCTION_LIST(DLL_FUNC_LOADED)
-TLHELP32_FUNCTION_LIST(DLL_FUNC_LOADED)
+ DBGHELP_FUNCTION_LIST(DLL_FUNC_LOADED)
+ TLHELP32_FUNCTION_LIST(DLL_FUNC_LOADED)
#undef DLL_FUNC_LOADED
- true;
+ true;
dbghelp_loaded = result;
return result;
@@ -1141,7 +1133,7 @@ static std::vector<OS::SharedLibraryAddress> LoadSymbols(
// Initialize the symbol engine.
ok = _SymInitialize(process_handle, // hProcess
- NULL, // UserSearchPath
+ nullptr, // UserSearchPath
false); // fInvadeProcess
if (!ok) return result;
@@ -1185,10 +1177,10 @@ static std::vector<OS::SharedLibraryAddress> LoadSymbols(
}
}
int lib_name_length = WideCharToMultiByte(
- CP_UTF8, 0, module_entry.szExePath, -1, NULL, 0, NULL, NULL);
+ CP_UTF8, 0, module_entry.szExePath, -1, nullptr, 0, nullptr, nullptr);
std::string lib_name(lib_name_length, 0);
WideCharToMultiByte(CP_UTF8, 0, module_entry.szExePath, -1, &lib_name[0],
- lib_name_length, NULL, NULL);
+ lib_name_length, nullptr, nullptr);
result.push_back(OS::SharedLibraryAddress(
lib_name, reinterpret_cast<uintptr_t>(module_entry.modBaseAddr),
reinterpret_cast<uintptr_t>(module_entry.modBaseAddr +
@@ -1212,13 +1204,16 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
return LoadSymbols(process_handle);
}
+void OS::SignalCodeMovingGC() {}
+
#else // __MINGW32__
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
return std::vector<OS::SharedLibraryAddress>();
}
+
+void OS::SignalCodeMovingGC() {}
#endif // __MINGW32__
-void OS::SignalCodeMovingGC(void* hint) {}
int OS::ActivationFrameAlignment() {
#ifdef _WIN64
@@ -1261,8 +1256,7 @@ class Thread::PlatformData {
// handle until it is started.
Thread::Thread(const Options& options)
- : stack_size_(options.stack_size()),
- start_semaphore_(NULL) {
+ : stack_size_(options.stack_size()), start_semaphore_(nullptr) {
data_ = new PlatformData(kNoThread);
set_name(options.name());
}
@@ -1286,12 +1280,8 @@ Thread::~Thread() {
// initialize thread specific structures in the C runtime library.
void Thread::Start() {
data_->thread_ = reinterpret_cast<HANDLE>(
- _beginthreadex(NULL,
- static_cast<unsigned>(stack_size_),
- ThreadEntry,
- this,
- 0,
- &data_->thread_id_));
+ _beginthreadex(nullptr, static_cast<unsigned>(stack_size_), ThreadEntry,
+ this, 0, &data_->thread_id_));
}
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index 0ff8599b0c..dd454ecd43 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -62,7 +62,7 @@ inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
kPointerSize * index));
}
intptr_t extra = static_cast<intptr_t>(__readfsdword(kTibExtraTlsOffset));
- DCHECK(extra != 0);
+ DCHECK_NE(extra, 0);
return *reinterpret_cast<intptr_t*>(extra +
kPointerSize * (index - kMaxInlineSlots));
}
@@ -107,9 +107,11 @@ class TimezoneCache;
class V8_BASE_EXPORT OS {
public:
// Initialize the OS class.
+ // - random_seed: Used for the GetRandomMmapAddress() if non-zero.
// - hard_abort: If true, OS::Abort() will crash instead of aborting.
// - gc_fake_mmap: Name of the file for fake gc mmap used in ll_prof.
- static void Initialize(bool hard_abort, const char* const gc_fake_mmap);
+ static void Initialize(int64_t random_seed, bool hard_abort,
+ const char* const gc_fake_mmap);
// Returns the accumulated user time for thread. This routine
// can be used for profiling. The implementation should
@@ -155,55 +157,47 @@ class V8_BASE_EXPORT OS {
static PRINTF_FORMAT(1, 2) void PrintError(const char* format, ...);
static PRINTF_FORMAT(1, 0) void VPrintError(const char* format, va_list args);
- // Memory access permissions. Only the modes currently used by V8 are listed
- // here even though most systems support additional modes.
- enum class MemoryPermission { kNoAccess, kReadWrite, kReadWriteExecute };
-
- // Allocate/Free memory used by JS heap. Permissions are set according to the
- // is_* flags. Returns the address of allocated memory, or NULL if failed.
- static void* Allocate(const size_t requested, size_t* allocated,
- MemoryPermission access, void* hint = nullptr);
- // Allocate/Free memory used by JS heap. Pages are readable/writable, but
- // they are not guaranteed to be executable unless 'executable' is true.
- // Returns the address of allocated memory, or NULL if failed.
- static void* Allocate(const size_t requested, size_t* allocated,
- bool is_executable, void* hint = nullptr);
- static void Free(void* address, const size_t size);
-
- // Allocates a region of memory that is inaccessible. On Windows this reserves
- // but does not commit the memory. On POSIX systems it allocates memory as
- // PROT_NONE, which also prevents it from being committed.
- static void* AllocateGuarded(const size_t requested);
-
- // This is the granularity at which the ProtectCode(...) call can set page
- // permissions.
- static intptr_t CommitPageSize();
-
- // Mark code segments non-writable.
- static void ProtectCode(void* address, const size_t size);
-
- // Assign memory as a guard page so that access will cause an exception.
- static void Guard(void* address, const size_t size);
-
- // Make a region of memory readable and writable.
- static void Unprotect(void* address, const size_t size);
+ enum class MemoryPermission {
+ kNoAccess,
+ kReadWrite,
+ // TODO(hpayer): Remove this flag. Memory should never be rwx.
+ kReadWriteExecute,
+ kReadExecute
+ };
- // Get the Alignment guaranteed by Allocate().
- static size_t AllocateAlignment();
+ // Gets the page granularity for Allocate. Addresses returned by Allocate are
+ // aligned to this size.
+ static size_t AllocatePageSize();
- static void* ReserveRegion(size_t size, void* hint);
+ // Gets the granularity at which the permissions and commit calls can be made.
+ static size_t CommitPageSize();
- static void* ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
- size_t* allocated);
+ // Generate a random address to be used for hinting allocation calls.
+ static void* GetRandomMmapAddr();
- static bool CommitRegion(void* address, size_t size, bool is_executable);
+ // Allocates memory. Permissions are set according to the access argument.
+ // The address parameter is a hint. The size and alignment parameters must be
+ // multiples of AllocatePageSize(). Returns the address of the allocated
+ // memory, with the specified size and alignment, or nullptr on failure.
+ V8_WARN_UNUSED_RESULT static void* Allocate(void* address, size_t size,
+ size_t alignment,
+ MemoryPermission access);
- static bool UncommitRegion(void* address, size_t size);
+ // Frees memory allocated by a call to Allocate. address and size must be
+ // multiples of AllocatePageSize(). Returns true on success, otherwise false.
+ V8_WARN_UNUSED_RESULT static bool Free(void* address, const size_t size);
- static bool ReleaseRegion(void* address, size_t size);
+ // Releases memory that is no longer needed. The range specified by address
+ // and size must be part of an allocated memory region, and must be multiples
+ // of CommitPageSize(). Released memory is left in an undefined state, so it
+ // should not be accessed. Returns true on success, otherwise false.
+ V8_WARN_UNUSED_RESULT static bool Release(void* address, size_t size);
- // Release part of a reserved address range.
- static bool ReleasePartialRegion(void* address, size_t size);
+ // Sets permissions according to the access argument. address and size must be
+ // multiples of CommitPageSize(). Setting permission to kNoAccess may cause
+ // the memory contents to be lost. Returns true on success, otherwise false.
+ V8_WARN_UNUSED_RESULT static bool SetPermissions(void* address, size_t size,
+ MemoryPermission access);
static bool HasLazyCommits();
@@ -231,8 +225,8 @@ class V8_BASE_EXPORT OS {
virtual void* memory() const = 0;
virtual size_t size() const = 0;
- static MemoryMappedFile* open(const char* name, void* hint);
- static MemoryMappedFile* create(const char* name, void* hint, size_t size,
+ static MemoryMappedFile* open(const char* name);
+ static MemoryMappedFile* create(const char* name, size_t size,
void* initial);
};
@@ -271,7 +265,7 @@ class V8_BASE_EXPORT OS {
// process that a code moving garbage collection starts. Can do
// nothing, in which case the code objects must not move (e.g., by
// using --never-compact) if accurate profiling is desired.
- static void SignalCodeMovingGC(void* hint);
+ static void SignalCodeMovingGC();
// Support runtime detection of whether the hard float option of the
// EABI is used.
@@ -335,7 +329,7 @@ class V8_BASE_EXPORT Thread {
Start();
start_semaphore_->Wait();
delete start_semaphore_;
- start_semaphore_ = NULL;
+ start_semaphore_ = nullptr;
}
// Wait until thread terminates.
@@ -360,7 +354,7 @@ class V8_BASE_EXPORT Thread {
SetThreadLocal(key, reinterpret_cast<void*>(static_cast<intptr_t>(value)));
}
static bool HasThreadLocal(LocalStorageKey key) {
- return GetThreadLocal(key) != NULL;
+ return GetThreadLocal(key) != nullptr;
}
#ifdef V8_FAST_TLS_SUPPORTED
diff --git a/deps/v8/src/base/platform/semaphore.cc b/deps/v8/src/base/platform/semaphore.cc
index 346705fd02..9a7ef7a8f4 100644
--- a/deps/v8/src/base/platform/semaphore.cc
+++ b/deps/v8/src/base/platform/semaphore.cc
@@ -73,7 +73,7 @@ bool Semaphore::WaitFor(const TimeDelta& rel_time) {
#elif V8_OS_POSIX
Semaphore::Semaphore(int count) {
- DCHECK(count >= 0);
+ DCHECK_GE(count, 0);
int result = sem_init(&native_handle_, 0, count);
DCHECK_EQ(0, result);
USE(result);
@@ -135,9 +135,9 @@ bool Semaphore::WaitFor(const TimeDelta& rel_time) {
#elif V8_OS_WIN
Semaphore::Semaphore(int count) {
- DCHECK(count >= 0);
- native_handle_ = ::CreateSemaphoreA(NULL, count, 0x7fffffff, NULL);
- DCHECK(native_handle_ != NULL);
+ DCHECK_GE(count, 0);
+ native_handle_ = ::CreateSemaphoreA(nullptr, count, 0x7fffffff, nullptr);
+ DCHECK_NOT_NULL(native_handle_);
}
diff --git a/deps/v8/src/base/platform/time.cc b/deps/v8/src/base/platform/time.cc
index 6695bf8e57..3529d55875 100644
--- a/deps/v8/src/base/platform/time.cc
+++ b/deps/v8/src/base/platform/time.cc
@@ -38,7 +38,7 @@ int64_t ComputeThreadTicks() {
THREAD_BASIC_INFO,
reinterpret_cast<thread_info_t>(&thread_info_data),
&thread_info_count);
- CHECK(kr == KERN_SUCCESS);
+ CHECK_EQ(kr, KERN_SUCCESS);
v8::base::CheckedNumeric<int64_t> absolute_micros(
thread_info_data.user_time.seconds +
@@ -195,7 +195,7 @@ TimeDelta TimeDelta::FromMachTimespec(struct mach_timespec ts) {
struct mach_timespec TimeDelta::ToMachTimespec() const {
struct mach_timespec ts;
- DCHECK(delta_ >= 0);
+ DCHECK_GE(delta_, 0);
ts.tv_sec = static_cast<unsigned>(delta_ / Time::kMicrosecondsPerSecond);
ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
Time::kNanosecondsPerMicrosecond;
@@ -316,7 +316,7 @@ Time Time::FromFiletime(FILETIME ft) {
FILETIME Time::ToFiletime() const {
- DCHECK(us_ >= 0);
+ DCHECK_GE(us_, 0);
FILETIME ft;
if (IsNull()) {
ft.dwLowDateTime = 0;
@@ -338,7 +338,7 @@ FILETIME Time::ToFiletime() const {
Time Time::Now() {
struct timeval tv;
- int result = gettimeofday(&tv, NULL);
+ int result = gettimeofday(&tv, nullptr);
DCHECK_EQ(0, result);
USE(result);
return FromTimeval(tv);
@@ -351,8 +351,8 @@ Time Time::NowFromSystemTime() {
Time Time::FromTimespec(struct timespec ts) {
- DCHECK(ts.tv_nsec >= 0);
- DCHECK(ts.tv_nsec < static_cast<long>(kNanosecondsPerSecond)); // NOLINT
+ DCHECK_GE(ts.tv_nsec, 0);
+ DCHECK_LT(ts.tv_nsec, kNanosecondsPerSecond);
if (ts.tv_nsec == 0 && ts.tv_sec == 0) {
return Time();
}
@@ -384,7 +384,7 @@ struct timespec Time::ToTimespec() const {
Time Time::FromTimeval(struct timeval tv) {
- DCHECK(tv.tv_usec >= 0);
+ DCHECK_GE(tv.tv_usec, 0);
DCHECK(tv.tv_usec < static_cast<suseconds_t>(kMicrosecondsPerSecond));
if (tv.tv_usec == 0 && tv.tv_sec == 0) {
return Time();
@@ -577,7 +577,7 @@ static LazyDynamicInstance<TickClock, CreateHighResTickClockTrait,
ThreadSafeInitOnceTrait>::type high_res_tick_clock =
LAZY_DYNAMIC_INSTANCE_INITIALIZER;
-
+// static
TimeTicks TimeTicks::Now() {
// Make sure we never return 0 here.
TimeTicks ticks(tick_clock.Pointer()->Now());
@@ -585,7 +585,7 @@ TimeTicks TimeTicks::Now() {
return ticks;
}
-
+// static
TimeTicks TimeTicks::HighResolutionNow() {
// Make sure we never return 0 here.
TimeTicks ticks(high_res_tick_clock.Pointer()->Now());
diff --git a/deps/v8/src/base/platform/time.h b/deps/v8/src/base/platform/time.h
index ed1751268f..25dee1c419 100644
--- a/deps/v8/src/base/platform/time.h
+++ b/deps/v8/src/base/platform/time.h
@@ -280,7 +280,7 @@ class TimeBase {
class V8_BASE_EXPORT Time final : public time_internal::TimeBase<Time> {
public:
- // Contains the NULL time. Use Time::Now() to get the current time.
+ // Contains the nullptr time. Use Time::Now() to get the current time.
Time() : TimeBase(0) {}
// Returns the current time. Watch out, the system might adjust its clock
diff --git a/deps/v8/src/base/sys-info.cc b/deps/v8/src/base/sys-info.cc
index 1b6d39397e..28ff780dd3 100644
--- a/deps/v8/src/base/sys-info.cc
+++ b/deps/v8/src/base/sys-info.cc
@@ -33,7 +33,7 @@ int SysInfo::NumberOfProcessors() {
int mib[2] = {CTL_HW, HW_NCPU};
int ncpu = 0;
size_t len = sizeof(ncpu);
- if (sysctl(mib, arraysize(mib), &ncpu, &len, NULL, 0) != 0) {
+ if (sysctl(mib, arraysize(mib), &ncpu, &len, nullptr, 0) != 0) {
return 1;
}
return ncpu;
@@ -57,15 +57,15 @@ int64_t SysInfo::AmountOfPhysicalMemory() {
int mib[2] = {CTL_HW, HW_MEMSIZE};
int64_t memsize = 0;
size_t len = sizeof(memsize);
- if (sysctl(mib, arraysize(mib), &memsize, &len, NULL, 0) != 0) {
+ if (sysctl(mib, arraysize(mib), &memsize, &len, nullptr, 0) != 0) {
return 0;
}
return memsize;
#elif V8_OS_FREEBSD
int pages, page_size;
size_t size = sizeof(pages);
- sysctlbyname("vm.stats.vm.v_page_count", &pages, &size, NULL, 0);
- sysctlbyname("vm.stats.vm.v_page_size", &page_size, &size, NULL, 0);
+ sysctlbyname("vm.stats.vm.v_page_count", &pages, &size, nullptr, 0);
+ sysctlbyname("vm.stats.vm.v_page_size", &page_size, &size, nullptr, 0);
if (pages == -1 || page_size == -1) {
return 0;
}
diff --git a/deps/v8/src/base/template-utils.h b/deps/v8/src/base/template-utils.h
index a7bb290929..18850695cb 100644
--- a/deps/v8/src/base/template-utils.h
+++ b/deps/v8/src/base/template-utils.h
@@ -22,8 +22,9 @@ struct make_array_helper;
template <class Function, std::size_t... Indexes>
struct make_array_helper<Function, 0, Indexes...> {
- constexpr static auto make_array(Function f)
- -> std::array<decltype(f(std::size_t{0})), sizeof...(Indexes) + 1> {
+ constexpr static std::array<typename std::result_of<Function(size_t)>::type,
+ sizeof...(Indexes) + 1>
+ make_array(Function f) {
return {{f(0), f(Indexes)...}};
}
};
@@ -41,8 +42,8 @@ struct make_array_helper<Function, FirstIndex, Indexes...>
// [](std::size_t i) { return static_cast<int>(2 * i); });
// The resulting array will be constexpr if the passed function is constexpr.
template <std::size_t Size, class Function>
-constexpr auto make_array(Function f)
- -> std::array<decltype(f(std::size_t{0})), Size> {
+constexpr std::array<typename std::result_of<Function(size_t)>::type, Size>
+make_array(Function f) {
static_assert(Size > 0, "Can only create non-empty arrays");
return detail::make_array_helper<Function, Size - 1>::make_array(f);
}
@@ -93,6 +94,40 @@ struct has_output_operator {
static constexpr bool value = sizeof(__check_operator(ptr_t{nullptr})) == 1;
};
+namespace detail {
+
+template <typename Func, typename T, typename... Ts>
+struct fold_helper {
+ static_assert(sizeof...(Ts) == 0, "this is the base case");
+ using result_t = typename std::remove_reference<T>::type;
+ static constexpr T&& fold(Func func, T&& first) {
+ return std::forward<T>(first);
+ }
+};
+
+template <typename Func, typename T1, typename T2, typename... Ts>
+struct fold_helper<Func, T1, T2, Ts...> {
+ using folded_t = typename std::result_of<Func(T1, T2)>::type;
+ using next_fold_helper = fold_helper<Func, folded_t&&, Ts...>;
+ using result_t = typename next_fold_helper::result_t;
+ static constexpr result_t fold(Func func, T1&& first, T2&& second,
+ Ts&&... more) {
+ return next_fold_helper::fold(
+ func, func(std::forward<T1>(first), std::forward<T2>(second)),
+ std::forward<Ts>(more)...);
+ }
+};
+
+} // namespace detail
+
+// Fold all arguments from left to right with a given function.
+template <typename Func, typename... Ts>
+constexpr auto fold(Func func, Ts&&... more) ->
+ typename detail::fold_helper<Func, Ts...>::result_t {
+ return detail::fold_helper<Func, Ts...>::fold(func,
+ std::forward<Ts>(more)...);
+}
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/utils/random-number-generator.cc b/deps/v8/src/base/utils/random-number-generator.cc
index 842b36a1a0..86c3694feb 100644
--- a/deps/v8/src/base/utils/random-number-generator.cc
+++ b/deps/v8/src/base/utils/random-number-generator.cc
@@ -7,6 +7,7 @@
#include <stdio.h>
#include <stdlib.h>
+#include <algorithm>
#include <new>
#include "src/base/bits.h"
@@ -18,8 +19,7 @@ namespace v8 {
namespace base {
static LazyMutex entropy_mutex = LAZY_MUTEX_INITIALIZER;
-static RandomNumberGenerator::EntropySource entropy_source = NULL;
-
+static RandomNumberGenerator::EntropySource entropy_source = nullptr;
// static
void RandomNumberGenerator::SetEntropySource(EntropySource source) {
@@ -31,7 +31,7 @@ void RandomNumberGenerator::SetEntropySource(EntropySource source) {
RandomNumberGenerator::RandomNumberGenerator() {
// Check if embedder supplied an entropy source.
{ LockGuard<Mutex> lock_guard(entropy_mutex.Pointer());
- if (entropy_source != NULL) {
+ if (entropy_source != nullptr) {
int64_t seed;
if (entropy_source(reinterpret_cast<unsigned char*>(&seed),
sizeof(seed))) {
@@ -53,7 +53,7 @@ RandomNumberGenerator::RandomNumberGenerator() {
#else
// Gather entropy from /dev/urandom if available.
FILE* fp = fopen("/dev/urandom", "rb");
- if (fp != NULL) {
+ if (fp != nullptr) {
int64_t seed;
size_t n = fread(&seed, sizeof(seed), 1, fp);
fclose(fp);
@@ -115,6 +115,85 @@ void RandomNumberGenerator::NextBytes(void* buffer, size_t buflen) {
}
}
+static std::vector<uint64_t> ComplementSample(
+ const std::unordered_set<uint64_t>& set, uint64_t max) {
+ std::vector<uint64_t> result;
+ result.reserve(max - set.size());
+ for (uint64_t i = 0; i < max; i++) {
+ if (!set.count(i)) {
+ result.push_back(i);
+ }
+ }
+ return result;
+}
+
+std::vector<uint64_t> RandomNumberGenerator::NextSample(uint64_t max,
+ size_t n) {
+ CHECK_LE(n, max);
+
+ if (n == 0) {
+ return std::vector<uint64_t>();
+ }
+
+ // Choose to select or exclude, whatever needs fewer generator calls.
+ size_t smaller_part = static_cast<size_t>(
+ std::min(max - static_cast<uint64_t>(n), static_cast<uint64_t>(n)));
+ std::unordered_set<uint64_t> selected;
+
+ size_t counter = 0;
+ while (selected.size() != smaller_part && counter / 3 < smaller_part) {
+ uint64_t x = static_cast<uint64_t>(NextDouble() * max);
+ CHECK_LT(x, max);
+
+ selected.insert(x);
+ counter++;
+ }
+
+ if (selected.size() == smaller_part) {
+ if (smaller_part != n) {
+ return ComplementSample(selected, max);
+ }
+ return std::vector<uint64_t>(selected.begin(), selected.end());
+ }
+
+ // Failed to select numbers in smaller_part * 3 steps, try different approach.
+ return NextSampleSlow(max, n, selected);
+}
+
+std::vector<uint64_t> RandomNumberGenerator::NextSampleSlow(
+ uint64_t max, size_t n, const std::unordered_set<uint64_t>& excluded) {
+ CHECK_GE(max - excluded.size(), n);
+
+ std::vector<uint64_t> result;
+ result.reserve(max - excluded.size());
+
+ for (uint64_t i = 0; i < max; i++) {
+ if (!excluded.count(i)) {
+ result.push_back(i);
+ }
+ }
+
+ // Decrease result vector until it contains values to select or exclude,
+ // whatever needs fewer generator calls.
+ size_t larger_part = static_cast<size_t>(
+ std::max(max - static_cast<uint64_t>(n), static_cast<uint64_t>(n)));
+
+ // Excluded set may cause that initial result is already smaller than
+ // larget_part.
+ while (result.size() != larger_part && result.size() > n) {
+ size_t x = static_cast<size_t>(NextDouble() * result.size());
+ CHECK_LT(x, result.size());
+
+ std::swap(result[x], result.back());
+ result.pop_back();
+ }
+
+ if (result.size() != n) {
+ return ComplementSample(
+ std::unordered_set<uint64_t>(result.begin(), result.end()), max);
+ }
+ return result;
+}
int RandomNumberGenerator::Next(int bits) {
DCHECK_LT(0, bits);
diff --git a/deps/v8/src/base/utils/random-number-generator.h b/deps/v8/src/base/utils/random-number-generator.h
index 7a322b5332..285c5972e0 100644
--- a/deps/v8/src/base/utils/random-number-generator.h
+++ b/deps/v8/src/base/utils/random-number-generator.h
@@ -5,6 +5,9 @@
#ifndef V8_BASE_UTILS_RANDOM_NUMBER_GENERATOR_H_
#define V8_BASE_UTILS_RANDOM_NUMBER_GENERATOR_H_
+#include <unordered_set>
+#include <vector>
+
#include "src/base/base-export.h"
#include "src/base/macros.h"
@@ -85,6 +88,23 @@ class V8_BASE_EXPORT RandomNumberGenerator final {
// Fills the elements of a specified array of bytes with random numbers.
void NextBytes(void* buffer, size_t buflen);
+ // Returns the next pseudorandom set of n unique uint64 values smaller than
+ // max.
+ // n must be less or equal to max.
+ std::vector<uint64_t> NextSample(uint64_t max, size_t n) WARN_UNUSED_RESULT;
+
+ // Returns the next pseudorandom set of n unique uint64 values smaller than
+ // max.
+ // n must be less or equal to max.
+ // max - |excluded| must be less or equal to n.
+ //
+ // Generates list of all possible values and removes random values from it
+ // until size reaches n.
+ std::vector<uint64_t> NextSampleSlow(
+ uint64_t max, size_t n,
+ const std::unordered_set<uint64_t>& excluded =
+ std::unordered_set<uint64_t>{}) WARN_UNUSED_RESULT;
+
// Override the current ssed.
void SetSeed(int64_t seed);