summaryrefslogtreecommitdiff
path: root/lib/msan
diff options
context:
space:
mode:
authorVedant Kumar <vsk@apple.com>2015-10-01 00:22:21 +0000
committerVedant Kumar <vsk@apple.com>2015-10-01 00:22:21 +0000
commite05b308c2300c260e9494c52b972934fc89d5f6d (patch)
tree5125ad237a8d757db22de3090fdc83be5a30613e /lib/msan
parent98379eaa5baabd991a6c353cbdd61b539bb369b4 (diff)
[compiler-rt] Apply modernize-use-nullptr fixes in sanitizers
- Trim spaces. - Use nullptr in place of 0 for pointer variables. - Use '!p' in place of 'p == 0' for null pointer checks. - Add blank lines to separate function definitions. - Add 'extern "C"' or 'namespace foo' comments after the appropriate closing brackets This is a continuation of work from 409b7b82. The focus here is on the various sanitizers (not sanitizer_common, as before). Patch by Eugene Zelenko! Differential Revision: http://reviews.llvm.org/D13225 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@248966 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/msan')
-rw-r--r--lib/msan/msan.cc10
-rw-r--r--lib/msan/msan_allocator.cc8
-rw-r--r--lib/msan/msan_chained_origin_depot.cc10
-rw-r--r--lib/msan/msan_interceptors.cc32
-rw-r--r--lib/msan/msan_linux.cc6
-rw-r--r--lib/msan/msan_thread.h2
6 files changed, 37 insertions, 31 deletions
diff --git a/lib/msan/msan.cc b/lib/msan/msan.cc
index 12a141bf4..87f2cdf2e 100644
--- a/lib/msan/msan.cc
+++ b/lib/msan/msan.cc
@@ -223,9 +223,9 @@ void GetStackTrace(BufferedStackTrace *stack, uptr max_s, uptr pc, uptr bp,
if (!t || !StackTrace::WillUseFastUnwind(request_fast_unwind)) {
// Block reports from our interceptors during _Unwind_Backtrace.
SymbolizerScope sym_scope;
- return stack->Unwind(max_s, pc, bp, 0, 0, 0, request_fast_unwind);
+ return stack->Unwind(max_s, pc, bp, nullptr, 0, 0, request_fast_unwind);
}
- stack->Unwind(max_s, pc, bp, 0, t->stack_top(), t->stack_bottom(),
+ stack->Unwind(max_s, pc, bp, nullptr, t->stack_top(), t->stack_bottom(),
request_fast_unwind);
}
@@ -305,7 +305,7 @@ u32 ChainOrigin(u32 id, StackTrace *stack) {
return chained.raw_id();
}
-} // namespace __msan
+} // namespace __msan
// Interface.
@@ -417,7 +417,7 @@ void __msan_init() {
MsanAllocatorInit();
- MsanThread *main_thread = MsanThread::Create(0, 0);
+ MsanThread *main_thread = MsanThread::Create(nullptr, nullptr);
SetCurrentThread(main_thread);
main_thread->ThreadStart();
@@ -641,4 +641,4 @@ void __sanitizer_print_stack_trace() {
GET_FATAL_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME());
stack.Print();
}
-} // extern "C"
+} // extern "C"
diff --git a/lib/msan/msan_allocator.cc b/lib/msan/msan_allocator.cc
index 006d993f0..865a4586b 100644
--- a/lib/msan/msan_allocator.cc
+++ b/lib/msan/msan_allocator.cc
@@ -165,7 +165,7 @@ void MsanDeallocate(StackTrace *stack, void *p) {
void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
return allocator.ReturnNullOrDie();
- return MsanReallocate(stack, 0, nmemb * size, sizeof(u64), true);
+ return MsanReallocate(stack, nullptr, nmemb * size, sizeof(u64), true);
}
void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
@@ -174,7 +174,7 @@ void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
return MsanAllocate(stack, new_size, alignment, zeroise);
if (!new_size) {
MsanDeallocate(stack, old_p);
- return 0;
+ return nullptr;
}
Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(old_p));
uptr old_size = meta->requested_size;
@@ -204,14 +204,14 @@ void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
}
static uptr AllocationSize(const void *p) {
- if (p == 0) return 0;
+ if (!p) return 0;
const void *beg = allocator.GetBlockBegin(p);
if (beg != p) return 0;
Metadata *b = (Metadata *)allocator.GetMetaData(p);
return b->requested_size;
}
-} // namespace __msan
+} // namespace __msan
using namespace __msan;
diff --git a/lib/msan/msan_chained_origin_depot.cc b/lib/msan/msan_chained_origin_depot.cc
index c21e8e827..e2796fd46 100644
--- a/lib/msan/msan_chained_origin_depot.cc
+++ b/lib/msan/msan_chained_origin_depot.cc
@@ -28,12 +28,15 @@ struct ChainedOriginDepotNode {
u32 prev_id;
typedef ChainedOriginDepotDesc args_type;
+
bool eq(u32 hash, const args_type &args) const {
return here_id == args.here_id && prev_id == args.prev_id;
}
+
static uptr storage_size(const args_type &args) {
return sizeof(ChainedOriginDepotNode);
}
+
/* This is murmur2 hash for the 64->32 bit case.
It does not behave all that well because the keys have a very biased
distribution (I've seen 7-element buckets with the table only 14% full).
@@ -76,19 +79,22 @@ struct ChainedOriginDepotNode {
here_id = args.here_id;
prev_id = args.prev_id;
}
+
args_type load() const {
args_type ret = {here_id, prev_id};
return ret;
}
+
struct Handle {
ChainedOriginDepotNode *node_;
- Handle() : node_(0) {}
+ Handle() : node_(nullptr) {}
explicit Handle(ChainedOriginDepotNode *node) : node_(node) {}
bool valid() { return node_; }
u32 id() { return node_->id; }
int here_id() { return node_->here_id; }
int prev_id() { return node_->prev_id; }
};
+
Handle get_handle() { return Handle(this); }
typedef Handle handle_type;
@@ -123,4 +129,4 @@ void ChainedOriginDepotUnlockAll() {
chainedOriginDepot.UnlockAll();
}
-} // namespace __msan
+} // namespace __msan
diff --git a/lib/msan/msan_interceptors.cc b/lib/msan/msan_interceptors.cc
index c42885815..f363c2695 100644
--- a/lib/msan/msan_interceptors.cc
+++ b/lib/msan/msan_interceptors.cc
@@ -166,7 +166,7 @@ INTERCEPTOR(int, posix_memalign, void **memptr, SIZE_T alignment, SIZE_T size) {
GET_MALLOC_STACK_TRACE;
CHECK_EQ(alignment & (alignment - 1), 0);
CHECK_NE(memptr, 0);
- *memptr = MsanReallocate(&stack, 0, size, alignment, false);
+ *memptr = MsanReallocate(&stack, nullptr, size, alignment, false);
CHECK_NE(*memptr, 0);
__msan_unpoison(memptr, sizeof(*memptr));
return 0;
@@ -176,7 +176,7 @@ INTERCEPTOR(int, posix_memalign, void **memptr, SIZE_T alignment, SIZE_T size) {
INTERCEPTOR(void *, memalign, SIZE_T boundary, SIZE_T size) {
GET_MALLOC_STACK_TRACE;
CHECK_EQ(boundary & (boundary - 1), 0);
- void *ptr = MsanReallocate(&stack, 0, size, boundary, false);
+ void *ptr = MsanReallocate(&stack, nullptr, size, boundary, false);
return ptr;
}
#define MSAN_MAYBE_INTERCEPT_MEMALIGN INTERCEPT_FUNCTION(memalign)
@@ -187,21 +187,21 @@ INTERCEPTOR(void *, memalign, SIZE_T boundary, SIZE_T size) {
INTERCEPTOR(void *, aligned_alloc, SIZE_T boundary, SIZE_T size) {
GET_MALLOC_STACK_TRACE;
CHECK_EQ(boundary & (boundary - 1), 0);
- void *ptr = MsanReallocate(&stack, 0, size, boundary, false);
+ void *ptr = MsanReallocate(&stack, nullptr, size, boundary, false);
return ptr;
}
INTERCEPTOR(void *, __libc_memalign, SIZE_T boundary, SIZE_T size) {
GET_MALLOC_STACK_TRACE;
CHECK_EQ(boundary & (boundary - 1), 0);
- void *ptr = MsanReallocate(&stack, 0, size, boundary, false);
+ void *ptr = MsanReallocate(&stack, nullptr, size, boundary, false);
DTLS_on_libc_memalign(ptr, size * boundary);
return ptr;
}
INTERCEPTOR(void *, valloc, SIZE_T size) {
GET_MALLOC_STACK_TRACE;
- void *ptr = MsanReallocate(&stack, 0, size, GetPageSizeCached(), false);
+ void *ptr = MsanReallocate(&stack, nullptr, size, GetPageSizeCached(), false);
return ptr;
}
@@ -214,7 +214,7 @@ INTERCEPTOR(void *, pvalloc, SIZE_T size) {
// pvalloc(0) should allocate one page.
size = PageSize;
}
- void *ptr = MsanReallocate(&stack, 0, size, PageSize, false);
+ void *ptr = MsanReallocate(&stack, nullptr, size, PageSize, false);
return ptr;
}
#define MSAN_MAYBE_INTERCEPT_PVALLOC INTERCEPT_FUNCTION(pvalloc)
@@ -224,14 +224,14 @@ INTERCEPTOR(void *, pvalloc, SIZE_T size) {
INTERCEPTOR(void, free, void *ptr) {
GET_MALLOC_STACK_TRACE;
- if (ptr == 0) return;
+ if (!ptr) return;
MsanDeallocate(&stack, ptr);
}
#if !SANITIZER_FREEBSD
INTERCEPTOR(void, cfree, void *ptr) {
GET_MALLOC_STACK_TRACE;
- if (ptr == 0) return;
+ if (!ptr) return;
MsanDeallocate(&stack, ptr);
}
#define MSAN_MAYBE_INTERCEPT_CFREE INTERCEPT_FUNCTION(cfree)
@@ -1000,7 +1000,7 @@ INTERCEPTOR(void *, realloc, void *ptr, SIZE_T size) {
INTERCEPTOR(void *, malloc, SIZE_T size) {
GET_MALLOC_STACK_TRACE;
- return MsanReallocate(&stack, 0, size, sizeof(u64), false);
+ return MsanReallocate(&stack, nullptr, size, sizeof(u64), false);
}
void __msan_allocated_memory(const void *data, uptr size) {
@@ -1029,7 +1029,7 @@ INTERCEPTOR(void *, mmap, void *addr, SIZE_T length, int prot, int flags,
*__errno_location() = errno_EINVAL;
return (void *)-1;
} else {
- addr = 0;
+ addr = nullptr;
}
}
void *res = REAL(mmap)(addr, length, prot, flags, fd, offset);
@@ -1047,7 +1047,7 @@ INTERCEPTOR(void *, mmap64, void *addr, SIZE_T length, int prot, int flags,
*__errno_location() = errno_EINVAL;
return (void *)-1;
} else {
- addr = 0;
+ addr = nullptr;
}
}
void *res = REAL(mmap64)(addr, length, prot, flags, fd, offset);
@@ -1083,7 +1083,7 @@ INTERCEPTOR(int, dladdr, void *addr, dlinfo *info) {
INTERCEPTOR(char *, dlerror, int fake) {
ENSURE_MSAN_INITED();
char *res = REAL(dlerror)(fake);
- if (res != 0) __msan_unpoison(res, REAL(strlen)(res) + 1);
+ if (res) __msan_unpoison(res, REAL(strlen)(res) + 1);
return res;
}
@@ -1180,7 +1180,7 @@ INTERCEPTOR(int, sigaction, int signo, const __sanitizer_sigaction *act,
CHECK_LT(signo, kMaxSignals);
uptr old_cb = atomic_load(&sigactions[signo], memory_order_relaxed);
__sanitizer_sigaction new_act;
- __sanitizer_sigaction *pnew_act = act ? &new_act : 0;
+ __sanitizer_sigaction *pnew_act = act ? &new_act : nullptr;
if (act) {
REAL(memcpy)(pnew_act, act, sizeof(__sanitizer_sigaction));
uptr cb = (uptr)pnew_act->sigaction;
@@ -1237,7 +1237,7 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr, void *(*callback)(void*),
void * param) {
ENSURE_MSAN_INITED(); // for GetTlsSize()
__sanitizer_pthread_attr_t myattr;
- if (attr == 0) {
+ if (!attr) {
pthread_attr_init(&myattr);
attr = &myattr;
}
@@ -1376,7 +1376,7 @@ int OnExit() {
return 0;
}
-} // namespace __msan
+} // namespace __msan
// A version of CHECK_UNPOISONED using a saved scope value. Used in common
// interceptors.
@@ -1634,4 +1634,4 @@ void InitializeInterceptors() {
inited = 1;
}
-} // namespace __msan
+} // namespace __msan
diff --git a/lib/msan/msan_linux.cc b/lib/msan/msan_linux.cc
index 7f6cb75ff..2888eb528 100644
--- a/lib/msan/msan_linux.cc
+++ b/lib/msan/msan_linux.cc
@@ -56,7 +56,7 @@ static bool CheckMemoryRangeAvailability(uptr beg, uptr size) {
static bool ProtectMemoryRange(uptr beg, uptr size, const char *name) {
if (size > 0) {
void *addr = MmapNoAccess(beg, size, name);
- if (beg == 0 && addr != 0) {
+ if (beg == 0 && addr) {
// Depending on the kernel configuration, we may not be able to protect
// the page at address zero.
uptr gap = 16 * GetPageSizeCached();
@@ -204,6 +204,6 @@ void MsanTSDDtor(void *tsd) {
MsanThread::TSDDtor(tsd);
}
-} // namespace __msan
+} // namespace __msan
-#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
diff --git a/lib/msan/msan_thread.h b/lib/msan/msan_thread.h
index bc605b89a..ed22e67ed 100644
--- a/lib/msan/msan_thread.h
+++ b/lib/msan/msan_thread.h
@@ -32,7 +32,7 @@ class MsanThread {
uptr stack_bottom() { return stack_bottom_; }
uptr tls_begin() { return tls_begin_; }
uptr tls_end() { return tls_end_; }
- bool IsMainThread() { return start_routine_ == 0; }
+ bool IsMainThread() { return start_routine_ == nullptr; }
bool AddrIsInStack(uptr addr) {
return addr >= stack_bottom_ && addr < stack_top_;