summaryrefslogtreecommitdiff
path: root/lib/tsan/rtl/tsan_mman.cc
blob: b6671b1abf09e735806206455d5d5fe898c220da (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
//===-- tsan_mman.cc ------------------------------------------------------===//
//
//                     The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "tsan_mman.h"
#include "tsan_rtl.h"
#include "tsan_report.h"
#include "tsan_flags.h"

// May be overriden by front-end.
extern "C" void WEAK __tsan_malloc_hook(void *ptr, uptr size) {
  (void)ptr;
  (void)size;
}

extern "C" void WEAK __tsan_free_hook(void *ptr) {
  (void)ptr;
}

namespace __tsan {

COMPILER_CHECK(sizeof(MBlock) == 16);

void MBlock::Lock() {
  atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this);
  uptr v = atomic_load(a, memory_order_relaxed);
  for (int iter = 0;; iter++) {
    if (v & 1) {
      if (iter < 10)
        proc_yield(20);
      else
        internal_sched_yield();
      v = atomic_load(a, memory_order_relaxed);
      continue;
    }
    if (atomic_compare_exchange_weak(a, &v, v | 1, memory_order_acquire))
      break;
  }
}

void MBlock::Unlock() {
  atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this);
  uptr v = atomic_load(a, memory_order_relaxed);
  DCHECK(v & 1);
  atomic_store(a, v & ~1, memory_order_relaxed);
}

struct MapUnmapCallback {
  void OnMap(uptr p, uptr size) const { }
  void OnUnmap(uptr p, uptr size) const {
    // We are about to unmap a chunk of user memory.
    // Mark the corresponding shadow memory as not needed.
    DontNeedShadowFor(p, size);
  }
};

static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
Allocator *allocator() {
  return reinterpret_cast<Allocator*>(&allocator_placeholder);
}

void InitializeAllocator() {
  allocator()->Init();
}

void AllocatorThreadStart(ThreadState *thr) {
  allocator()->InitCache(&thr->alloc_cache);
}

void AllocatorThreadFinish(ThreadState *thr) {
  allocator()->DestroyCache(&thr->alloc_cache);
}

void AllocatorPrintStats() {
  allocator()->PrintStats();
}

static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
  if (!thr->in_signal_handler || !flags()->report_signal_unsafe)
    return;
  Context *ctx = CTX();
  StackTrace stack;
  stack.ObtainCurrent(thr, pc);
  ThreadRegistryLock l(ctx->thread_registry);
  ScopedReport rep(ReportTypeSignalUnsafe);
  if (!IsFiredSuppression(ctx, rep, stack)) {
    rep.AddStack(&stack);
    OutputReport(ctx, rep, rep.GetReport()->stacks[0]);
  }
}

void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align) {
  CHECK_GT(thr->in_rtl, 0);
  if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
    return 0;
  void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
  if (p == 0)
    return 0;
  MBlock *b = new(allocator()->GetMetaData(p)) MBlock;
  b->Init(sz, thr->tid, CurrentStackId(thr, pc));
  if (CTX() && CTX()->initialized)
    MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
  DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
  SignalUnsafeCall(thr, pc);
  return p;
}

void user_free(ThreadState *thr, uptr pc, void *p) {
  CHECK_GT(thr->in_rtl, 0);
  CHECK_NE(p, (void*)0);
  DPrintf("#%d: free(%p)\n", thr->tid, p);
  MBlock *b = (MBlock*)allocator()->GetMetaData(p);
  if (b->ListHead()) {
    MBlock::ScopedLock l(b);
    for (SyncVar *s = b->ListHead(); s;) {
      SyncVar *res = s;
      s = s->next;
      StatInc(thr, StatSyncDestroyed);
      res->mtx.Lock();
      res->mtx.Unlock();
      DestroyAndFree(res);
    }
    b->ListReset();
  }
  if (CTX() && CTX()->initialized && thr->in_rtl == 1)
    MemoryRangeFreed(thr, pc, (uptr)p, b->Size());
  allocator()->Deallocate(&thr->alloc_cache, p);
  SignalUnsafeCall(thr, pc);
}

void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
  CHECK_GT(thr->in_rtl, 0);
  void *p2 = 0;
  // FIXME: Handle "shrinking" more efficiently,
  // it seems that some software actually does this.
  if (sz) {
    p2 = user_alloc(thr, pc, sz);
    if (p2 == 0)
      return 0;
    if (p) {
      MBlock *b = user_mblock(thr, p);
      CHECK_NE(b, 0);
      internal_memcpy(p2, p, min(b->Size(), sz));
    }
  }
  if (p)
    user_free(thr, pc, p);
  return p2;
}

uptr user_alloc_usable_size(ThreadState *thr, uptr pc, void *p) {
  CHECK_GT(thr->in_rtl, 0);
  if (p == 0)
    return 0;
  MBlock *b = (MBlock*)allocator()->GetMetaData(p);
  return b ? b->Size() : 0;
}

MBlock *user_mblock(ThreadState *thr, void *p) {
  CHECK_NE(p, 0);
  Allocator *a = allocator();
  void *b = a->GetBlockBegin(p);
  if (b == 0)
    return 0;
  return (MBlock*)a->GetMetaData(b);
}

void invoke_malloc_hook(void *ptr, uptr size) {
  Context *ctx = CTX();
  ThreadState *thr = cur_thread();
  if (ctx == 0 || !ctx->initialized || thr->in_rtl)
    return;
  __tsan_malloc_hook(ptr, size);
}

void invoke_free_hook(void *ptr) {
  Context *ctx = CTX();
  ThreadState *thr = cur_thread();
  if (ctx == 0 || !ctx->initialized || thr->in_rtl)
    return;
  __tsan_free_hook(ptr);
}

void *internal_alloc(MBlockType typ, uptr sz) {
  ThreadState *thr = cur_thread();
  CHECK_GT(thr->in_rtl, 0);
  if (thr->nomalloc) {
    thr->nomalloc = 0;  // CHECK calls internal_malloc().
    CHECK(0);
  }
  return InternalAlloc(sz);
}

void internal_free(void *p) {
  ThreadState *thr = cur_thread();
  CHECK_GT(thr->in_rtl, 0);
  if (thr->nomalloc) {
    thr->nomalloc = 0;  // CHECK calls internal_malloc().
    CHECK(0);
  }
  InternalFree(p);
}

}  // namespace __tsan

using namespace __tsan;

extern "C" {
uptr __tsan_get_current_allocated_bytes() {
  u64 stats[AllocatorStatCount];
  allocator()->GetStats(stats);
  u64 m = stats[AllocatorStatMalloced];
  u64 f = stats[AllocatorStatFreed];
  return m >= f ? m - f : 1;
}

uptr __tsan_get_heap_size() {
  u64 stats[AllocatorStatCount];
  allocator()->GetStats(stats);
  u64 m = stats[AllocatorStatMmapped];
  u64 f = stats[AllocatorStatUnmapped];
  return m >= f ? m - f : 1;
}

uptr __tsan_get_free_bytes() {
  return 1;
}

uptr __tsan_get_unmapped_bytes() {
  return 1;
}

uptr __tsan_get_estimated_allocated_size(uptr size) {
  return size;
}

bool __tsan_get_ownership(void *p) {
  return allocator()->GetBlockBegin(p) != 0;
}

uptr __tsan_get_allocated_size(void *p) {
  if (p == 0)
    return 0;
  p = allocator()->GetBlockBegin(p);
  if (p == 0)
    return 0;
  MBlock *b = (MBlock*)allocator()->GetMetaData(p);
  return b->Size();
}

void __tsan_on_thread_idle() {
  ThreadState *thr = cur_thread();
  allocator()->SwallowCache(&thr->alloc_cache);
}
}  // extern "C"