summaryrefslogtreecommitdiff
path: root/lib/sanitizer_common/sanitizer_allocator_secondary.h
diff options
context:
space:
mode:
authorKostya Serebryany <kcc@google.com>2016-11-10 17:27:28 +0000
committerKostya Serebryany <kcc@google.com>2016-11-10 17:27:28 +0000
commit39d94072e69f98f023eda1b3093851665c01e43b (patch)
tree9423a0522435bf62085f1fe5ea4111b0143987fe /lib/sanitizer_common/sanitizer_allocator_secondary.h
parentf2de077dc7d89466b630c308ed8eaf6bb4915210 (diff)
[lsan] fix a rare lsan false positive: ensure that we don't re-sort the chunks_ array while iterating over it. A test is hard to create, but I've added a consistency check that fires w/o the fix on existing tests. The bug analysis and the initial patch were provided by Pierre Bourdon
git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@286478 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/sanitizer_common/sanitizer_allocator_secondary.h')
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_secondary.h31
1 files changed, 19 insertions, 12 deletions
diff --git a/lib/sanitizer_common/sanitizer_allocator_secondary.h b/lib/sanitizer_common/sanitizer_allocator_secondary.h
index 64694edc5..2e98e591b 100644
--- a/lib/sanitizer_common/sanitizer_allocator_secondary.h
+++ b/lib/sanitizer_common/sanitizer_allocator_secondary.h
@@ -161,6 +161,14 @@ class LargeMmapAllocator {
return GetUser(h);
}
+ void EnsureSortedChunks() {
+ if (chunks_sorted_) return;
+ SortArray(reinterpret_cast<uptr*>(chunks_), n_chunks_);
+ for (uptr i = 0; i < n_chunks_; i++)
+ chunks_[i]->chunk_idx = i;
+ chunks_sorted_ = true;
+ }
+
// This function does the same as GetBlockBegin, but is much faster.
// Must be called with the allocator locked.
void *GetBlockBeginFastLocked(void *ptr) {
@@ -168,16 +176,10 @@ class LargeMmapAllocator {
uptr p = reinterpret_cast<uptr>(ptr);
uptr n = n_chunks_;
if (!n) return nullptr;
- if (!chunks_sorted_) {
- // Do one-time sort. chunks_sorted_ is reset in Allocate/Deallocate.
- SortArray(reinterpret_cast<uptr*>(chunks_), n);
- for (uptr i = 0; i < n; i++)
- chunks_[i]->chunk_idx = i;
- chunks_sorted_ = true;
- min_mmap_ = reinterpret_cast<uptr>(chunks_[0]);
- max_mmap_ = reinterpret_cast<uptr>(chunks_[n - 1]) +
- chunks_[n - 1]->map_size;
- }
+ EnsureSortedChunks();
+ auto min_mmap_ = reinterpret_cast<uptr>(chunks_[0]);
+ auto max_mmap_ =
+ reinterpret_cast<uptr>(chunks_[n - 1]) + chunks_[n - 1]->map_size;
if (p < min_mmap_ || p >= max_mmap_)
return nullptr;
uptr beg = 0, end = n - 1;
@@ -230,8 +232,14 @@ class LargeMmapAllocator {
// Iterate over all existing chunks.
// The allocator must be locked when calling this function.
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
- for (uptr i = 0; i < n_chunks_; i++)
+ EnsureSortedChunks(); // Avoid doing the sort while iterating.
+ for (uptr i = 0; i < n_chunks_; i++) {
+ auto t = chunks_[i];
callback(reinterpret_cast<uptr>(GetUser(chunks_[i])), arg);
+ // Consistency check: verify that the array did not change.
+ CHECK_EQ(chunks_[i], t);
+ CHECK_EQ(chunks_[i]->chunk_idx, i);
+ }
}
private:
@@ -263,7 +271,6 @@ class LargeMmapAllocator {
uptr page_size_;
Header *chunks_[kMaxNumChunks];
uptr n_chunks_;
- uptr min_mmap_, max_mmap_;
bool chunks_sorted_;
struct Stats {
uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];