summaryrefslogtreecommitdiff
path: root/lib/xray/xray_segmented_array.h
diff options
context:
space:
mode:
Diffstat (limited to 'lib/xray/xray_segmented_array.h')
-rw-r--r--lib/xray/xray_segmented_array.h149
1 files changed, 89 insertions, 60 deletions
diff --git a/lib/xray/xray_segmented_array.h b/lib/xray/xray_segmented_array.h
index 3205a9697..ddfdfb3c7 100644
--- a/lib/xray/xray_segmented_array.h
+++ b/lib/xray/xray_segmented_array.h
@@ -18,21 +18,13 @@
#include "sanitizer_common/sanitizer_allocator.h"
#include "xray_allocator.h"
+#include "xray_utils.h"
+#include <cassert>
#include <type_traits>
#include <utility>
namespace __xray {
-namespace {
-
-constexpr size_t gcd(size_t a, size_t b) {
- return (b == 0) ? a : gcd(b, a % b);
-}
-
-constexpr size_t lcm(size_t a, size_t b) { return a * b / gcd(a, b); }
-
-} // namespace
-
/// The Array type provides an interface similar to std::vector<...> but does
/// not shrink in size. Once constructed, elements can be appended but cannot be
/// removed. The implementation is heavily dependent on the contract provided by
@@ -45,10 +37,12 @@ constexpr size_t lcm(size_t a, size_t b) { return a * b / gcd(a, b); }
/// size, to allow us to maximise the number of T objects we can place in
/// cache-line multiple sized blocks. To get back the number of T's, we divide
/// this least common multiple by the size of T.
-template <class T, size_t N = lcm(sizeof(T), kCacheLineSize) / sizeof(T)>
+template <class T,
+ size_t N = lcm(next_pow2(sizeof(T)), kCacheLineSize) / sizeof(T)>
struct Array {
static constexpr size_t ChunkSize = N;
- static constexpr size_t AllocatorChunkSize = sizeof(T) * ChunkSize;
+ static constexpr size_t ElementStorageSize = next_pow2(sizeof(T));
+ static constexpr size_t AllocatorChunkSize = ElementStorageSize * ChunkSize;
using AllocatorType = Allocator<AllocatorChunkSize>;
static_assert(std::is_trivially_destructible<T>::value,
"T must be trivially destructible.");
@@ -60,8 +54,8 @@ private:
struct Chunk {
typename AllocatorType::Block Block;
static constexpr size_t Size = N;
- Chunk *Prev = nullptr;
- Chunk *Next = nullptr;
+ Chunk *Prev = this;
+ Chunk *Next = this;
};
static Chunk SentinelChunk;
@@ -70,7 +64,6 @@ private:
Chunk *Head = &SentinelChunk;
Chunk *Tail = &SentinelChunk;
size_t Size = 0;
- size_t FreeElements = 0;
// Here we keep track of chunks in the freelist, to allow us to re-use chunks
// when elements are trimmed off the end.
@@ -85,17 +78,22 @@ private:
auto *FreeChunk = Freelist;
Freelist = FreeChunk->Next;
FreeChunk->Next = &SentinelChunk;
+ Freelist->Prev = &SentinelChunk;
return FreeChunk;
}
- auto Block = Alloc->Allocate();
+ auto Block = Alloc->Allocate(ElementStorageSize);
if (Block.Data == nullptr)
return nullptr;
+
// TODO: Maybe use a separate managed allocator for Chunk instances?
- auto C = reinterpret_cast<Chunk *>(InternalAlloc(sizeof(Chunk)));
+ auto C = reinterpret_cast<Chunk *>(
+ InternalAlloc(sizeof(Chunk), nullptr, kCacheLineSize));
if (C == nullptr)
return nullptr;
C->Block = Block;
+ C->Prev = &SentinelChunk;
+ C->Next = &SentinelChunk;
return C;
}
@@ -116,42 +114,52 @@ private:
auto Chunk = NewChunk();
if (Chunk == nullptr)
return nullptr;
- Chunk->Prev = &SentinelChunk;
- Chunk->Next = &SentinelChunk;
- Head = Chunk;
- Tail = Chunk;
- return Chunk;
+ DCHECK_EQ(Chunk->Next, &SentinelChunk);
+ DCHECK_EQ(Chunk->Prev, &SentinelChunk);
+ return Head = Tail = Chunk;
}
Chunk *AppendNewChunk() {
auto Chunk = NewChunk();
if (Chunk == nullptr)
return nullptr;
+ DCHECK_NE(Tail, &SentinelChunk);
+ DCHECK_EQ(Tail->Next, &SentinelChunk);
+ DCHECK_EQ(Chunk->Prev, &SentinelChunk);
+ DCHECK_EQ(Chunk->Next, &SentinelChunk);
Tail->Next = Chunk;
Chunk->Prev = Tail;
- Chunk->Next = &SentinelChunk;
Tail = Chunk;
- return Chunk;
+ return Tail;
}
// This Iterator models a BidirectionalIterator.
template <class U> class Iterator {
- Chunk *C = nullptr;
+ Chunk *C = &SentinelChunk;
size_t Offset = 0;
+ size_t Size = 0;
public:
- Iterator(Chunk *IC, size_t Off) : C(IC), Offset(Off) {}
+ Iterator(Chunk *IC, size_t Off, size_t S) : C(IC), Offset(Off), Size(S) {}
+ Iterator(const Iterator &) noexcept = default;
+ Iterator() noexcept = default;
+ Iterator(Iterator &&) noexcept = default;
+ Iterator &operator=(const Iterator &) = default;
+ Iterator &operator=(Iterator &&) = default;
+ ~Iterator() = default;
Iterator &operator++() {
- if (++Offset % N)
+ if (++Offset % N || Offset == Size)
return *this;
- DCHECK_NE(C, &SentinelChunk);
-
// At this point, we know that Offset % N == 0, so we must advance the
// chunk pointer.
DCHECK_EQ(Offset % N, 0);
+ DCHECK_NE(Offset, Size);
+ DCHECK_NE(C, &SentinelChunk);
+ DCHECK_NE(C->Next, &SentinelChunk);
C = C->Next;
+ DCHECK_NE(C, &SentinelChunk);
return *this;
}
@@ -159,10 +167,12 @@ private:
DCHECK_NE(C, &SentinelChunk);
DCHECK_GT(Offset, 0);
- // We check whether the offset was on a boundary before decrement, to see
- // whether we need to retreat to the previous chunk.
- if ((Offset-- % N) == 0)
+ auto PreviousOffset = Offset--;
+ if (PreviousOffset != Size && PreviousOffset % N == 0) {
+ DCHECK_NE(C->Prev, &SentinelChunk);
C = C->Prev;
+ }
+
return *this;
}
@@ -190,12 +200,16 @@ private:
U &operator*() const {
DCHECK_NE(C, &SentinelChunk);
- return reinterpret_cast<U *>(C->Block.Data)[Offset % N];
+ auto RelOff = Offset % N;
+ return *reinterpret_cast<U *>(reinterpret_cast<char *>(C->Block.Data) +
+ (RelOff * ElementStorageSize));
}
U *operator->() const {
DCHECK_NE(C, &SentinelChunk);
- return reinterpret_cast<U *>(C->Block.Data) + (Offset % N);
+ auto RelOff = Offset % N;
+ return reinterpret_cast<U *>(reinterpret_cast<char *>(C->Block.Data) +
+ (RelOff * ElementStorageSize));
}
};
@@ -232,10 +246,11 @@ public:
if (AppendNewChunk() == nullptr)
return nullptr;
- auto Position = reinterpret_cast<T *>(Tail->Block.Data) + Offset;
+ auto Position =
+ reinterpret_cast<T *>(reinterpret_cast<char *>(Tail->Block.Data) +
+ (Offset * ElementStorageSize));
*Position = E;
++Size;
- FreeElements -= FreeElements ? 1 : 0;
return Position;
}
@@ -245,16 +260,21 @@ public:
return nullptr;
auto Offset = Size % N;
- if (UNLIKELY(Size != 0 && Offset == 0))
- if (AppendNewChunk() == nullptr)
+ auto *LatestChunk = Tail;
+ if (UNLIKELY(Size != 0 && Offset == 0)) {
+ LatestChunk = AppendNewChunk();
+ if (LatestChunk == nullptr)
return nullptr;
+ }
- auto Position = reinterpret_cast<T *>(Tail->Block.Data) + Offset;
+ DCHECK_NE(Tail, &SentinelChunk);
+ auto Position = reinterpret_cast<char *>(LatestChunk->Block.Data) +
+ (Offset * ElementStorageSize);
+ DCHECK_EQ(reinterpret_cast<uintptr_t>(Position) % ElementStorageSize, 0);
// In-place construct at Position.
- new (Position) T(std::forward<Args>(args)...);
+ new (Position) T{std::forward<Args>(args)...};
++Size;
- FreeElements -= FreeElements ? 1 : 0;
- return Position;
+ return reinterpret_cast<T *>(Position);
}
T &operator[](size_t Offset) const {
@@ -266,20 +286,22 @@ public:
Offset -= N;
DCHECK_NE(C, &SentinelChunk);
}
- auto Position = reinterpret_cast<T *>(C->Block.Data) + Offset;
- return *Position;
+ return *reinterpret_cast<T *>(reinterpret_cast<char *>(C->Block.Data) +
+ (Offset * ElementStorageSize));
}
T &front() const {
DCHECK_NE(Head, &SentinelChunk);
DCHECK_NE(Size, 0u);
- return *reinterpret_cast<T *>(Head->Block.Data);
+ return *begin();
}
T &back() const {
DCHECK_NE(Tail, &SentinelChunk);
- auto Offset = (Size - 1) % N;
- return *(reinterpret_cast<T *>(Tail->Block.Data) + Offset);
+ DCHECK_NE(Size, 0u);
+ auto It = end();
+ --It;
+ return *It;
}
template <class Predicate> T *find_element(Predicate P) const {
@@ -298,15 +320,18 @@ public:
/// require allocation of new blocks for new elements added after trimming.
void trim(size_t Elements) {
DCHECK_LE(Elements, Size);
+ DCHECK_GT(Size, 0);
+ auto OldSize = Size;
Size -= Elements;
- FreeElements += Elements;
-
- // Here we need to check whether we've cleared enough elements to warrant
- // putting blocks on to the freelist. We determine whether we need to
- // right-size the internal list, by keeping track of the number of "free"
- // elements still in the array.
- auto ChunksToTrim = FreeElements / N;
- for (size_t i = 0; i < ChunksToTrim; ++i, FreeElements -= N) {
+
+ DCHECK_NE(Head, &SentinelChunk);
+ DCHECK_NE(Tail, &SentinelChunk);
+
+ for (auto ChunksToTrim =
+ (nearest_boundary(OldSize, N) - nearest_boundary(Size, N)) / N;
+ ChunksToTrim > 0; --ChunksToTrim) {
+ DCHECK_NE(Head, &SentinelChunk);
+ DCHECK_NE(Tail, &SentinelChunk);
// Put the tail into the Freelist.
auto *FreeChunk = Tail;
Tail = Tail->Prev;
@@ -314,17 +339,21 @@ public:
Head = Tail;
else
Tail->Next = &SentinelChunk;
+
+ DCHECK_EQ(Tail->Next, &SentinelChunk);
FreeChunk->Next = Freelist;
- FreeChunk->Prev = Freelist->Prev;
+ FreeChunk->Prev = &SentinelChunk;
+ if (Freelist != &SentinelChunk)
+ Freelist->Prev = FreeChunk;
Freelist = FreeChunk;
}
}
// Provide iterators.
- Iterator<T> begin() const { return Iterator<T>(Head, 0); }
- Iterator<T> end() const { return Iterator<T>(Tail, Size); }
- Iterator<const T> cbegin() const { return Iterator<const T>(Head, 0); }
- Iterator<const T> cend() const { return Iterator<const T>(Tail, Size); }
+ Iterator<T> begin() const { return Iterator<T>(Head, 0, Size); }
+ Iterator<T> end() const { return Iterator<T>(Tail, Size, Size); }
+ Iterator<const T> cbegin() const { return Iterator<const T>(Head, 0, Size); }
+ Iterator<const T> cend() const { return Iterator<const T>(Tail, Size, Size); }
};
// We need to have this storage definition out-of-line so that the compiler can