summaryrefslogtreecommitdiff
path: root/lib/sanitizer_common/sanitizer_allocator_bytemap.h
blob: 92472cdf5150982b5fabe19bf692e13955b59cff (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
//===-- sanitizer_allocator_bytemap.h ---------------------------*- C++ -*-===//
//
//                     The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Part of the Sanitizer Allocator.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_ALLOCATOR_H
#error This file must be included inside sanitizer_allocator.h
#endif

// Maps integers in rage [0, kSize) to u8 values.
template<u64 kSize>
class FlatByteMap {
 public:
  void TestOnlyInit() {
    internal_memset(map_, 0, sizeof(map_));
  }

  void set(uptr idx, u8 val) {
    CHECK_LT(idx, kSize);
    CHECK_EQ(0U, map_[idx]);
    map_[idx] = val;
  }
  u8 operator[] (uptr idx) {
    CHECK_LT(idx, kSize);
    // FIXME: CHECK may be too expensive here.
    return map_[idx];
  }
 private:
  u8 map_[kSize];
};

// TwoLevelByteMap maps integers in range [0, kSize1*kSize2) to u8 values.
// It is implemented as a two-dimensional array: array of kSize1 pointers
// to kSize2-byte arrays. The secondary arrays are mmaped on demand.
// Each value is initially zero and can be set to something else only once.
// Setting and getting values from multiple threads is safe w/o extra locking.
template <u64 kSize1, u64 kSize2, class MapUnmapCallback = NoOpMapUnmapCallback>
class TwoLevelByteMap {
 public:
  void TestOnlyInit() {
    internal_memset(map1_, 0, sizeof(map1_));
    mu_.Init();
  }

  void TestOnlyUnmap() {
    for (uptr i = 0; i < kSize1; i++) {
      u8 *p = Get(i);
      if (!p) continue;
      MapUnmapCallback().OnUnmap(reinterpret_cast<uptr>(p), kSize2);
      UnmapOrDie(p, kSize2);
    }
  }

  uptr size() const { return kSize1 * kSize2; }
  uptr size1() const { return kSize1; }
  uptr size2() const { return kSize2; }

  void set(uptr idx, u8 val) {
    CHECK_LT(idx, kSize1 * kSize2);
    u8 *map2 = GetOrCreate(idx / kSize2);
    CHECK_EQ(0U, map2[idx % kSize2]);
    map2[idx % kSize2] = val;
  }

  u8 operator[] (uptr idx) const {
    CHECK_LT(idx, kSize1 * kSize2);
    u8 *map2 = Get(idx / kSize2);
    if (!map2) return 0;
    return map2[idx % kSize2];
  }

 private:
  u8 *Get(uptr idx) const {
    CHECK_LT(idx, kSize1);
    return reinterpret_cast<u8 *>(
        atomic_load(&map1_[idx], memory_order_acquire));
  }

  u8 *GetOrCreate(uptr idx) {
    u8 *res = Get(idx);
    if (!res) {
      SpinMutexLock l(&mu_);
      if (!(res = Get(idx))) {
        res = (u8*)MmapOrDie(kSize2, "TwoLevelByteMap");
        MapUnmapCallback().OnMap(reinterpret_cast<uptr>(res), kSize2);
        atomic_store(&map1_[idx], reinterpret_cast<uptr>(res),
                     memory_order_release);
      }
    }
    return res;
  }

  atomic_uintptr_t map1_[kSize1];
  StaticSpinMutex mu_;
};