aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src/ic/stub-cache.h
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2019-11-08 15:39:11 +0100
committerMichaël Zasso <targos@protonmail.com>2019-11-08 15:46:25 +0100
commit6ca81ad72a3c6fdf16c683335be748f22aaa9a0d (patch)
tree33c8ee75f729aed76c2c0b89c63f9bf1b4dd66aa /deps/v8/src/ic/stub-cache.h
parent1eee0b8bf8bba39b600fb16a9223e545e3bac2bc (diff)
downloadandroid-node-v8-6ca81ad72a3c6fdf16c683335be748f22aaa9a0d.tar.gz
android-node-v8-6ca81ad72a3c6fdf16c683335be748f22aaa9a0d.tar.bz2
android-node-v8-6ca81ad72a3c6fdf16c683335be748f22aaa9a0d.zip
deps: update V8 to 7.9.317.20
PR-URL: https://github.com/nodejs/node/pull/30020 Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Anna Henningsen <anna@addaleax.net> Reviewed-By: Matteo Collina <matteo.collina@gmail.com>
Diffstat (limited to 'deps/v8/src/ic/stub-cache.h')
-rw-r--r--deps/v8/src/ic/stub-cache.h18
1 files changed, 13 insertions, 5 deletions
diff --git a/deps/v8/src/ic/stub-cache.h b/deps/v8/src/ic/stub-cache.h
index 87acc0e007..dc3317588d 100644
--- a/deps/v8/src/ic/stub-cache.h
+++ b/deps/v8/src/ic/stub-cache.h
@@ -78,10 +78,15 @@ class V8_EXPORT_PRIVATE StubCache {
Isolate* isolate() { return isolate_; }
- // Setting the entry size such that the index is shifted by Name::kHashShift
- // is convenient; shifting down the length field (to extract the hash code)
- // automatically discards the hash bit field.
- static const int kCacheIndexShift = Name::kHashShift;
+ // Ideally we would set kCacheIndexShift to Name::kHashShift, such that
+ // the bit field inside the hash field gets shifted out implicitly. However,
+ // sizeof(Entry) needs to be a multiple of 1 << kCacheIndexShift, and it
+ // isn't clear whether letting one bit of the bit field leak into the index
+ // computation is bad enough to warrant an additional shift to get rid of it.
+ static const int kCacheIndexShift = 2;
+ // The purpose of the static assert is to make us reconsider this choice
+ // if the bit field ever grows even more.
+ STATIC_ASSERT(kCacheIndexShift == Name::kHashShift - 1);
static const int kPrimaryTableBits = 11;
static const int kPrimaryTableSize = (1 << kPrimaryTableBits);
@@ -125,7 +130,10 @@ class V8_EXPORT_PRIVATE StubCache {
// of sizeof(Entry). This makes it easier to avoid making mistakes
// in the hashed offset computations.
static Entry* entry(Entry* table, int offset) {
- const int multiplier = sizeof(*table) >> Name::kHashShift;
+ // The size of {Entry} must be a multiple of 1 << kCacheIndexShift.
+ STATIC_ASSERT((sizeof(*table) >> kCacheIndexShift) << kCacheIndexShift ==
+ sizeof(*table));
+ const int multiplier = sizeof(*table) >> kCacheIndexShift;
return reinterpret_cast<Entry*>(reinterpret_cast<Address>(table) +
offset * multiplier);
}