summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorAlexey Samsonov <vonosmas@gmail.com>2014-12-19 20:35:53 +0000
committerAlexey Samsonov <vonosmas@gmail.com>2014-12-19 20:35:53 +0000
commita7d265546807ffc763cb634fe0c1efaa74955ed9 (patch)
tree389f65c773434317dbf23724d6eadb94f7089d42 /lib
parent262ebb726c15f7d11fe24566b6486fe4422b97a4 (diff)
[ASan] Change activation strategy.
Now ASan deactivation doesn't modify common or ASan-specific runtime flags. Flags stay constant after initialization, and "deactivation" instead stashes initialized runtime state, and deactivates the runtime. Activation then just restores the original state (possibly, overriden by some activation flags provided in system property on Android). git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@224614 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r--lib/asan/asan_activation.cc48
-rw-r--r--lib/asan/asan_activation.h2
-rw-r--r--lib/asan/asan_allocator.cc21
-rw-r--r--lib/asan/asan_allocator.h2
-rw-r--r--lib/asan/asan_flags.cc11
-rw-r--r--lib/asan/asan_rtl.cc5
-rw-r--r--lib/sanitizer_common/sanitizer_allocator.h6
7 files changed, 59 insertions, 36 deletions
diff --git a/lib/asan/asan_activation.cc b/lib/asan/asan_activation.cc
index b7c00fd1c..a5fa9c8a3 100644
--- a/lib/asan/asan_activation.cc
+++ b/lib/asan/asan_activation.cc
@@ -27,23 +27,12 @@ static struct AsanDeactivatedFlags {
int malloc_context_size;
bool poison_heap;
- void CopyFrom(const Flags *f, const CommonFlags *cf) {
- allocator_options.SetFrom(f, cf);
- malloc_context_size = cf->malloc_context_size;
- poison_heap = f->poison_heap;
- }
-
void OverrideFromActivationFlags() {
Flags f;
CommonFlags cf;
// Copy the current activation flags.
- f.quarantine_size = allocator_options.quarantine_size_mb << 20;
- f.redzone = allocator_options.min_redzone;
- f.max_redzone = allocator_options.max_redzone;
- cf.allocator_may_return_null = allocator_options.may_return_null;
- f.alloc_dealloc_mismatch = allocator_options.alloc_dealloc_mismatch;
-
+ allocator_options.CopyTo(&f, &cf);
cf.malloc_context_size = malloc_context_size;
f.poison_heap = poison_heap;
@@ -55,7 +44,9 @@ static struct AsanDeactivatedFlags {
ParseCommonFlagsFromString(&cf, buf);
ParseFlagsFromString(&f, buf);
- CopyFrom(&f, &cf);
+ allocator_options.SetFrom(&f, &cf);
+ malloc_context_size = cf.malloc_context_size;
+ poison_heap = f.poison_heap;
}
void Print() {
@@ -71,20 +62,25 @@ static struct AsanDeactivatedFlags {
static bool asan_is_deactivated;
-void AsanStartDeactivated() {
+void AsanDeactivate() {
+ CHECK(!asan_is_deactivated);
VReport(1, "Deactivating ASan\n");
- // Save flag values.
- asan_deactivated_flags.CopyFrom(flags(), common_flags());
-
- // FIXME: Don't overwrite commandline flags. Instead, make the flags store
- // the original values calculated during flag parsing, and re-initialize
- // the necessary runtime objects.
- flags()->quarantine_size = 0;
- flags()->max_redzone = 16;
- flags()->poison_heap = false;
- common_flags()->malloc_context_size = 0;
- flags()->alloc_dealloc_mismatch = false;
- common_flags()->allocator_may_return_null = true;
+
+ // Stash runtime state.
+ GetAllocatorOptions(&asan_deactivated_flags.allocator_options);
+ asan_deactivated_flags.malloc_context_size = GetMallocContextSize();
+ asan_deactivated_flags.poison_heap = CanPoisonMemory();
+
+ // Deactivate the runtime.
+ SetCanPoisonMemory(false);
+ SetMallocContextSize(1);
+ AllocatorOptions disabled = asan_deactivated_flags.allocator_options;
+ disabled.quarantine_size_mb = 0;
+ disabled.min_redzone = 16; // Redzone must be at least 16 bytes long.
+ disabled.max_redzone = 16;
+ disabled.alloc_dealloc_mismatch = false;
+ disabled.may_return_null = true;
+ ReInitializeAllocator(disabled);
asan_is_deactivated = true;
}
diff --git a/lib/asan/asan_activation.h b/lib/asan/asan_activation.h
index dafb840a6..d5e1ce433 100644
--- a/lib/asan/asan_activation.h
+++ b/lib/asan/asan_activation.h
@@ -16,7 +16,7 @@
#define ASAN_ACTIVATION_H
namespace __asan {
-void AsanStartDeactivated();
+void AsanDeactivate();
void AsanActivate();
} // namespace __asan
diff --git a/lib/asan/asan_allocator.cc b/lib/asan/asan_allocator.cc
index b5a09bdcf..e2e0fb55b 100644
--- a/lib/asan/asan_allocator.cc
+++ b/lib/asan/asan_allocator.cc
@@ -213,6 +213,14 @@ void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
}
+void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
+ f->quarantine_size = (int)quarantine_size_mb << 20;
+ f->redzone = min_redzone;
+ f->max_redzone = max_redzone;
+ cf->allocator_may_return_null = may_return_null;
+ f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
+}
+
struct Allocator {
static const uptr kMaxAllowedMallocSize =
FIRST_32_SECOND_64(3UL << 30, 64UL << 30);
@@ -263,6 +271,15 @@ struct Allocator {
SharedInitCode(options);
}
+ void GetOptions(AllocatorOptions *options) const {
+ options->quarantine_size_mb = quarantine.GetSize() >> 20;
+ options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
+ options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
+ options->may_return_null = allocator.MayReturnNull();
+ options->alloc_dealloc_mismatch =
+ atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
+ }
+
// -------------------- Helper methods. -------------------------
uptr ComputeRZLog(uptr user_requested_size) {
u32 rz_log =
@@ -662,6 +679,10 @@ void ReInitializeAllocator(const AllocatorOptions &options) {
instance.ReInitialize(options);
}
+void GetAllocatorOptions(AllocatorOptions *options) {
+ instance.GetOptions(options);
+}
+
AsanChunkView FindHeapChunkByAddress(uptr addr) {
return instance.FindHeapChunkByAddress(addr);
}
diff --git a/lib/asan/asan_allocator.h b/lib/asan/asan_allocator.h
index 2072954c9..521d47b0b 100644
--- a/lib/asan/asan_allocator.h
+++ b/lib/asan/asan_allocator.h
@@ -40,10 +40,12 @@ struct AllocatorOptions {
u8 alloc_dealloc_mismatch;
void SetFrom(const Flags *f, const CommonFlags *cf);
+ void CopyTo(Flags *f, CommonFlags *cf);
};
void InitializeAllocator(const AllocatorOptions &options);
void ReInitializeAllocator(const AllocatorOptions &options);
+void GetAllocatorOptions(AllocatorOptions *options);
class AsanChunkView {
public:
diff --git a/lib/asan/asan_flags.cc b/lib/asan/asan_flags.cc
index a4780063e..c0635dfae 100644
--- a/lib/asan/asan_flags.cc
+++ b/lib/asan/asan_flags.cc
@@ -247,14 +247,9 @@ void InitializeFlags(Flags *f) {
VReport(1, "Parsed ASAN_OPTIONS: %s\n", env);
}
- // If ASan starts in deactivated state, stash and clear some flags.
- // Otherwise, let activation flags override current settings.
- if (flags()->start_deactivated) {
- AsanStartDeactivated();
- } else {
- // Parse flags that may change between startup and activation.
- // On Android they come from a system property.
- // On other platforms this is no-op.
+ // Let activation flags override current settings. On Android they come
+ // from a system property. On other platforms this is no-op.
+ if (!flags()->start_deactivated) {
char buf[100];
GetExtraActivationFlags(buf, sizeof(buf));
ParseCommonFlagsFromString(cf, buf);
diff --git a/lib/asan/asan_rtl.cc b/lib/asan/asan_rtl.cc
index e1c8769a8..895ac6bf7 100644
--- a/lib/asan/asan_rtl.cc
+++ b/lib/asan/asan_rtl.cc
@@ -397,6 +397,11 @@ static void AsanInitInternal() {
MaybeStartBackgroudThread();
+ // Now that ASan runtime is (mostly) initialized, deactivate it if
+ // necessary, so that it can be re-activated when requested.
+ if (flags()->start_deactivated)
+ AsanDeactivate();
+
// On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited
// should be set to 1 prior to initializing the threads.
asan_inited = 1;
diff --git a/lib/sanitizer_common/sanitizer_allocator.h b/lib/sanitizer_common/sanitizer_allocator.h
index d3723eb1d..d749acb90 100644
--- a/lib/sanitizer_common/sanitizer_allocator.h
+++ b/lib/sanitizer_common/sanitizer_allocator.h
@@ -1300,8 +1300,12 @@ class CombinedAllocator {
return res;
}
+ bool MayReturnNull() const {
+ return atomic_load(&may_return_null_, memory_order_acquire);
+ }
+
void *ReturnNullOrDie() {
- if (atomic_load(&may_return_null_, memory_order_acquire))
+ if (MayReturnNull())
return 0;
ReportAllocatorCannotReturnNull();
}