summaryrefslogtreecommitdiff
path: root/libsanitizer
diff options
context:
space:
mode:
authorkcc <kcc@138bc75d-0d04-0410-961f-82ee72b054a4>2014-05-22 07:09:21 +0000
committerkcc <kcc@138bc75d-0d04-0410-961f-82ee72b054a4>2014-05-22 07:09:21 +0000
commit7d752f28b590bbad13c877c2aa7f5f8de2cdff10 (patch)
treef90afdf42b3ae78508a5c6422f458a5bb0216aa2 /libsanitizer
parentca1a66e9c405dff80abce41c96371525e515bdb4 (diff)
libsanitizer merge from upstream r209283
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@210743 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'libsanitizer')
-rw-r--r--libsanitizer/ChangeLog10
-rw-r--r--libsanitizer/MERGE2
-rw-r--r--libsanitizer/asan/Makefile.am1
-rw-r--r--libsanitizer/asan/Makefile.in15
-rw-r--r--libsanitizer/asan/asan_activation.cc72
-rw-r--r--libsanitizer/asan/asan_activation.h21
-rw-r--r--libsanitizer/asan/asan_allocator.h42
-rw-r--r--libsanitizer/asan/asan_allocator2.cc104
-rw-r--r--libsanitizer/asan/asan_asm_instrumentation.S599
-rw-r--r--libsanitizer/asan/asan_dll_thunk.cc158
-rw-r--r--libsanitizer/asan/asan_fake_stack.cc61
-rw-r--r--libsanitizer/asan/asan_fake_stack.h8
-rw-r--r--libsanitizer/asan/asan_flags.h64
-rw-r--r--libsanitizer/asan/asan_globals.cc13
-rw-r--r--libsanitizer/asan/asan_intercepted_functions.h77
-rw-r--r--libsanitizer/asan/asan_interceptors.cc170
-rw-r--r--libsanitizer/asan/asan_interceptors.h70
-rw-r--r--libsanitizer/asan/asan_interface_internal.h35
-rw-r--r--libsanitizer/asan/asan_internal.h38
-rw-r--r--libsanitizer/asan/asan_linux.cc149
-rw-r--r--libsanitizer/asan/asan_mac.cc76
-rw-r--r--libsanitizer/asan/asan_mac.h57
-rw-r--r--libsanitizer/asan/asan_malloc_linux.cc15
-rw-r--r--libsanitizer/asan/asan_malloc_mac.cc36
-rw-r--r--libsanitizer/asan/asan_malloc_win.cc17
-rw-r--r--libsanitizer/asan/asan_mapping.h77
-rw-r--r--libsanitizer/asan/asan_new_delete.cc43
-rw-r--r--libsanitizer/asan/asan_poisoning.cc102
-rw-r--r--libsanitizer/asan/asan_poisoning.h32
-rw-r--r--libsanitizer/asan/asan_posix.cc75
-rw-r--r--libsanitizer/asan/asan_report.cc328
-rw-r--r--libsanitizer/asan/asan_report.h13
-rw-r--r--libsanitizer/asan/asan_rtl.cc440
-rw-r--r--libsanitizer/asan/asan_stack.cc30
-rw-r--r--libsanitizer/asan/asan_stack.h75
-rw-r--r--libsanitizer/asan/asan_thread.cc45
-rw-r--r--libsanitizer/asan/asan_thread.h5
-rw-r--r--libsanitizer/asan/asan_win.cc67
-rw-r--r--libsanitizer/include/sanitizer/asan_interface.h39
-rw-r--r--libsanitizer/include/sanitizer/common_interface_defs.h29
-rw-r--r--libsanitizer/include/sanitizer/dfsan_interface.h11
-rw-r--r--libsanitizer/include/sanitizer/lsan_interface.h31
-rw-r--r--libsanitizer/include/sanitizer/msan_interface.h41
-rw-r--r--libsanitizer/include/sanitizer/tsan_interface_atomic.h (renamed from libsanitizer/tsan/tsan_interface_atomic.h)163
-rw-r--r--libsanitizer/interception/interception.h9
-rw-r--r--libsanitizer/interception/interception_linux.cc6
-rw-r--r--libsanitizer/interception/interception_linux.h20
-rw-r--r--libsanitizer/interception/interception_type_test.cc12
-rw-r--r--libsanitizer/interception/interception_win.cc137
-rw-r--r--libsanitizer/lsan/lsan.cc12
-rw-r--r--libsanitizer/lsan/lsan_allocator.cc6
-rw-r--r--libsanitizer/lsan/lsan_common.cc350
-rw-r--r--libsanitizer/lsan/lsan_common.h47
-rw-r--r--libsanitizer/lsan/lsan_common_linux.cc25
-rw-r--r--libsanitizer/lsan/lsan_interceptors.cc39
-rw-r--r--libsanitizer/sanitizer_common/Makefile.am11
-rw-r--r--libsanitizer/sanitizer_common/Makefile.in40
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_addrhashmap.h340
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_allocator.cc4
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_allocator_internal.h7
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_atomic.h3
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_atomic_clang.h71
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_atomic_clang_other.h95
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_atomic_clang_x86.h114
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h85
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_bitvector.h349
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_bvgraph.h163
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common.cc38
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common.h78
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc1736
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common_interceptors_format.inc554
-rwxr-xr-xlibsanitizer/sanitizer_common/sanitizer_common_interceptors_ioctl.inc60
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common_interceptors_scanf.inc309
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common_libcdep.cc7
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc47
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_coverage.cc165
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_deadlock_detector.h410
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_deadlock_detector1.cc187
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_deadlock_detector2.cc427
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_deadlock_detector_interface.h91
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_flags.cc161
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_flags.h48
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_interception.h23
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_internal_defs.h45
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_libc.cc2
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_libc.h4
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_libignore.cc7
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_linux.cc524
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_linux.h27
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cc255
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_list.h17
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_mac.cc245
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_mac.h34
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_mutex.h84
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_platform.h63
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h248
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc2
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc420
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h445
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_posix.cc99
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cc62
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_printf.cc26
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_procmaps.h67
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_procmaps_linux.cc302
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cc186
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_report_decorator.h11
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stacktrace.cc107
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stacktrace.h17
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stacktrace_libcdep.cc76
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc58
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_suppressions.cc4
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_suppressions.h1
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer.h21
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.cc45
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.h12
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc456
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cc87
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_syscall_generic.inc12
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_syscall_linux_x86_64.inc2
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_thread_registry.cc17
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_thread_registry.h5
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_tls_get_addr.cc129
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_tls_get_addr.h56
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_win.cc101
-rw-r--r--libsanitizer/tsan/Makefile.am50
-rw-r--r--libsanitizer/tsan/Makefile.in70
-rw-r--r--libsanitizer/tsan/tsan_clock.cc355
-rw-r--r--libsanitizer/tsan/tsan_clock.h54
-rw-r--r--libsanitizer/tsan/tsan_defs.h7
-rw-r--r--libsanitizer/tsan/tsan_fd.cc6
-rw-r--r--libsanitizer/tsan/tsan_fd.h4
-rw-r--r--libsanitizer/tsan/tsan_flags.cc69
-rw-r--r--libsanitizer/tsan/tsan_flags.h17
-rw-r--r--libsanitizer/tsan/tsan_interceptors.cc572
-rw-r--r--libsanitizer/tsan/tsan_interface_ann.cc12
-rw-r--r--libsanitizer/tsan/tsan_interface_atomic.cc289
-rw-r--r--libsanitizer/tsan/tsan_interface_java.cc6
-rw-r--r--libsanitizer/tsan/tsan_mman.cc17
-rw-r--r--libsanitizer/tsan/tsan_mutex.cc17
-rw-r--r--libsanitizer/tsan/tsan_mutex.h5
-rw-r--r--libsanitizer/tsan/tsan_mutexset.h5
-rw-r--r--libsanitizer/tsan/tsan_platform.h19
-rw-r--r--libsanitizer/tsan/tsan_platform_linux.cc57
-rw-r--r--libsanitizer/tsan/tsan_platform_mac.cc25
-rw-r--r--libsanitizer/tsan/tsan_platform_windows.cc11
-rw-r--r--libsanitizer/tsan/tsan_report.cc95
-rw-r--r--libsanitizer/tsan/tsan_report.h9
-rw-r--r--libsanitizer/tsan/tsan_rtl.cc157
-rw-r--r--libsanitizer/tsan/tsan_rtl.h64
-rw-r--r--libsanitizer/tsan/tsan_rtl_mutex.cc239
-rw-r--r--libsanitizer/tsan/tsan_rtl_report.cc97
-rw-r--r--libsanitizer/tsan/tsan_rtl_thread.cc52
-rw-r--r--libsanitizer/tsan/tsan_stat.cc357
-rw-r--r--libsanitizer/tsan/tsan_stat.h358
-rw-r--r--libsanitizer/tsan/tsan_suppressions.cc10
-rw-r--r--libsanitizer/tsan/tsan_symbolize.cc10
-rw-r--r--libsanitizer/tsan/tsan_symbolize.h2
-rw-r--r--libsanitizer/tsan/tsan_symbolize_addr2line_linux.cc191
-rw-r--r--libsanitizer/tsan/tsan_sync.cc11
-rw-r--r--libsanitizer/tsan/tsan_sync.h13
-rw-r--r--libsanitizer/tsan/tsan_vector.h16
-rw-r--r--libsanitizer/ubsan/ubsan_diag.cc21
-rw-r--r--libsanitizer/ubsan/ubsan_value.h6
163 files changed, 12519 insertions, 5079 deletions
diff --git a/libsanitizer/ChangeLog b/libsanitizer/ChangeLog
index 319f93226900..fe06edc882f1 100644
--- a/libsanitizer/ChangeLog
+++ b/libsanitizer/ChangeLog
@@ -1,3 +1,13 @@
+2014-05-22 Kostya Serebryany <kcc@google.com>
+
+ * All source files: Merge from upstream r209283.
+ * asan/Makefile.am (asan_files): Added new files.
+ * asan/Makefile.in: Regenerate.
+ * tsan/Makefile.am (tsan_files): Added new files.
+ * tsan/Makefile.in: Regenerate.
+ * sanitizer_common/Makefile.am (sanitizer_common_files): Added new files.
+ * sanitizer_common/Makefile.in: Regenerate.
+
2014-05-14 Yury Gribov <y.gribov@samsung.com>
PR sanitizer/61100
diff --git a/libsanitizer/MERGE b/libsanitizer/MERGE
index 4688f0c8693f..84e88e10b77e 100644
--- a/libsanitizer/MERGE
+++ b/libsanitizer/MERGE
@@ -1,4 +1,4 @@
-196489
+209283
The first line of this file holds the svn revision number of the
last merge done from the master library sources.
diff --git a/libsanitizer/asan/Makefile.am b/libsanitizer/asan/Makefile.am
index 3f07a834c697..fdc2b4513d90 100644
--- a/libsanitizer/asan/Makefile.am
+++ b/libsanitizer/asan/Makefile.am
@@ -15,6 +15,7 @@ toolexeclib_LTLIBRARIES = libasan.la
nodist_toolexeclib_HEADERS = libasan_preinit.o
asan_files = \
+ asan_activation.cc \
asan_allocator2.cc \
asan_dll_thunk.cc \
asan_fake_stack.cc \
diff --git a/libsanitizer/asan/Makefile.in b/libsanitizer/asan/Makefile.in
index 273eb4b26459..cae6493fc9f9 100644
--- a/libsanitizer/asan/Makefile.in
+++ b/libsanitizer/asan/Makefile.in
@@ -88,12 +88,13 @@ libasan_la_DEPENDENCIES = \
$(top_builddir)/sanitizer_common/libsanitizer_common.la \
$(top_builddir)/lsan/libsanitizer_lsan.la $(am__append_2) \
$(am__append_3) $(am__DEPENDENCIES_1)
-am__objects_1 = asan_allocator2.lo asan_dll_thunk.lo \
- asan_fake_stack.lo asan_globals.lo asan_interceptors.lo \
- asan_linux.lo asan_mac.lo asan_malloc_linux.lo \
- asan_malloc_mac.lo asan_malloc_win.lo asan_new_delete.lo \
- asan_poisoning.lo asan_posix.lo asan_report.lo asan_rtl.lo \
- asan_stack.lo asan_stats.lo asan_thread.lo asan_win.lo
+am__objects_1 = asan_activation.lo asan_allocator2.lo \
+ asan_dll_thunk.lo asan_fake_stack.lo asan_globals.lo \
+ asan_interceptors.lo asan_linux.lo asan_mac.lo \
+ asan_malloc_linux.lo asan_malloc_mac.lo asan_malloc_win.lo \
+ asan_new_delete.lo asan_poisoning.lo asan_posix.lo \
+ asan_report.lo asan_rtl.lo asan_stack.lo asan_stats.lo \
+ asan_thread.lo asan_win.lo
am_libasan_la_OBJECTS = $(am__objects_1)
libasan_la_OBJECTS = $(am_libasan_la_OBJECTS)
libasan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
@@ -272,6 +273,7 @@ ACLOCAL_AMFLAGS = -I $(top_srcdir) -I $(top_srcdir)/config
toolexeclib_LTLIBRARIES = libasan.la
nodist_toolexeclib_HEADERS = libasan_preinit.o
asan_files = \
+ asan_activation.cc \
asan_allocator2.cc \
asan_dll_thunk.cc \
asan_fake_stack.cc \
@@ -412,6 +414,7 @@ mostlyclean-compile:
distclean-compile:
-rm -f *.tab.c
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_activation.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_allocator2.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_dll_thunk.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_fake_stack.Plo@am__quote@
diff --git a/libsanitizer/asan/asan_activation.cc b/libsanitizer/asan/asan_activation.cc
new file mode 100644
index 000000000000..235451c8aec2
--- /dev/null
+++ b/libsanitizer/asan/asan_activation.cc
@@ -0,0 +1,72 @@
+//===-- asan_activation.cc --------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan activation/deactivation logic.
+//===----------------------------------------------------------------------===//
+
+#include "asan_activation.h"
+#include "asan_allocator.h"
+#include "asan_flags.h"
+#include "asan_internal.h"
+#include "sanitizer_common/sanitizer_flags.h"
+
+namespace __asan {
+
+static struct AsanDeactivatedFlags {
+ int quarantine_size;
+ int max_redzone;
+ int malloc_context_size;
+ bool poison_heap;
+} asan_deactivated_flags;
+
+static bool asan_is_deactivated;
+
+void AsanStartDeactivated() {
+ VReport(1, "Deactivating ASan\n");
+ // Save flag values.
+ asan_deactivated_flags.quarantine_size = flags()->quarantine_size;
+ asan_deactivated_flags.max_redzone = flags()->max_redzone;
+ asan_deactivated_flags.poison_heap = flags()->poison_heap;
+ asan_deactivated_flags.malloc_context_size =
+ common_flags()->malloc_context_size;
+
+ flags()->quarantine_size = 0;
+ flags()->max_redzone = 16;
+ flags()->poison_heap = false;
+ common_flags()->malloc_context_size = 0;
+
+ asan_is_deactivated = true;
+}
+
+void AsanActivate() {
+ if (!asan_is_deactivated) return;
+ VReport(1, "Activating ASan\n");
+
+ // Restore flag values.
+ // FIXME: this is not atomic, and there may be other threads alive.
+ flags()->quarantine_size = asan_deactivated_flags.quarantine_size;
+ flags()->max_redzone = asan_deactivated_flags.max_redzone;
+ flags()->poison_heap = asan_deactivated_flags.poison_heap;
+ common_flags()->malloc_context_size =
+ asan_deactivated_flags.malloc_context_size;
+
+ ParseExtraActivationFlags();
+
+ ReInitializeAllocator();
+
+ asan_is_deactivated = false;
+ VReport(
+ 1,
+ "quarantine_size %d, max_redzone %d, poison_heap %d, malloc_context_size "
+ "%d\n",
+ flags()->quarantine_size, flags()->max_redzone, flags()->poison_heap,
+ common_flags()->malloc_context_size);
+}
+
+} // namespace __asan
diff --git a/libsanitizer/asan/asan_activation.h b/libsanitizer/asan/asan_activation.h
new file mode 100644
index 000000000000..01f2d46d2228
--- /dev/null
+++ b/libsanitizer/asan/asan_activation.h
@@ -0,0 +1,21 @@
+//===-- asan_activation.h ---------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan activation/deactivation logic.
+//===----------------------------------------------------------------------===//
+
+#ifndef ASAN_ACTIVATION_H
+#define ASAN_ACTIVATION_H
+
+namespace __asan {
+void AsanStartDeactivated();
+void AsanActivate();
+} // namespace __asan
+
+#endif // ASAN_ACTIVATION_H
diff --git a/libsanitizer/asan/asan_allocator.h b/libsanitizer/asan/asan_allocator.h
index 763f4a58ef9f..174a5997d4bb 100644
--- a/libsanitizer/asan/asan_allocator.h
+++ b/libsanitizer/asan/asan_allocator.h
@@ -15,6 +15,7 @@
#include "asan_internal.h"
#include "asan_interceptors.h"
+#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_list.h"
namespace __asan {
@@ -29,6 +30,7 @@ static const uptr kNumberOfSizeClasses = 255;
struct AsanChunk;
void InitializeAllocator();
+void ReInitializeAllocator();
class AsanChunkView {
public:
@@ -40,6 +42,7 @@ class AsanChunkView {
uptr UsedSize(); // Size requested by the user.
uptr AllocTid();
uptr FreeTid();
+ bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; }
void GetAllocStack(StackTrace *stack);
void GetFreeStack(StackTrace *stack);
bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) {
@@ -88,9 +91,46 @@ class AsanChunkFifoList: public IntrusiveList<AsanChunk> {
uptr size_;
};
+struct AsanMapUnmapCallback {
+ void OnMap(uptr p, uptr size) const;
+ void OnUnmap(uptr p, uptr size) const;
+};
+
+#if SANITIZER_CAN_USE_ALLOCATOR64
+# if defined(__powerpc64__)
+const uptr kAllocatorSpace = 0xa0000000000ULL;
+const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
+# else
+const uptr kAllocatorSpace = 0x600000000000ULL;
+const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
+# endif
+typedef DefaultSizeClassMap SizeClassMap;
+typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
+ SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
+#else // Fallback to SizeClassAllocator32.
+static const uptr kRegionSizeLog = 20;
+static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
+# if SANITIZER_WORDSIZE == 32
+typedef FlatByteMap<kNumRegions> ByteMap;
+# elif SANITIZER_WORDSIZE == 64
+typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
+# endif
+typedef CompactSizeClassMap SizeClassMap;
+typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, 16,
+ SizeClassMap, kRegionSizeLog,
+ ByteMap,
+ AsanMapUnmapCallback> PrimaryAllocator;
+#endif // SANITIZER_CAN_USE_ALLOCATOR64
+
+typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
+typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator;
+typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
+ SecondaryAllocator> Allocator;
+
+
struct AsanThreadLocalMallocStorage {
uptr quarantine_cache[16];
- uptr allocator2_cache[96 * (512 * 8 + 16)]; // Opaque.
+ AllocatorCache allocator2_cache;
void CommitBack();
private:
// These objects are allocated via mmap() and are zero-initialized.
diff --git a/libsanitizer/asan/asan_allocator2.cc b/libsanitizer/asan/asan_allocator2.cc
index b9d66dc7a4ad..bbc1ff723a49 100644
--- a/libsanitizer/asan/asan_allocator2.cc
+++ b/libsanitizer/asan/asan_allocator2.cc
@@ -17,8 +17,8 @@
#include "asan_mapping.h"
#include "asan_poisoning.h"
#include "asan_report.h"
+#include "asan_stack.h"
#include "asan_thread.h"
-#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_list.h"
@@ -28,65 +28,30 @@
namespace __asan {
-struct AsanMapUnmapCallback {
- void OnMap(uptr p, uptr size) const {
- PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
- // Statistics.
- AsanStats &thread_stats = GetCurrentThreadStats();
- thread_stats.mmaps++;
- thread_stats.mmaped += size;
- }
- void OnUnmap(uptr p, uptr size) const {
- PoisonShadow(p, size, 0);
- // We are about to unmap a chunk of user memory.
- // Mark the corresponding shadow memory as not needed.
- // Since asan's mapping is compacting, the shadow chunk may be
- // not page-aligned, so we only flush the page-aligned portion.
- uptr page_size = GetPageSizeCached();
- uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size);
- uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size);
- FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
- // Statistics.
- AsanStats &thread_stats = GetCurrentThreadStats();
- thread_stats.munmaps++;
- thread_stats.munmaped += size;
- }
-};
-
-#if SANITIZER_WORDSIZE == 64
-#if defined(__powerpc64__)
-const uptr kAllocatorSpace = 0xa0000000000ULL;
-const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
-#else
-const uptr kAllocatorSpace = 0x600000000000ULL;
-const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
-#endif
-typedef DefaultSizeClassMap SizeClassMap;
-typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
- SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
-#elif SANITIZER_WORDSIZE == 32
-static const u64 kAddressSpaceSize = 1ULL << 32;
-typedef CompactSizeClassMap SizeClassMap;
-static const uptr kRegionSizeLog = 20;
-static const uptr kFlatByteMapSize = kAddressSpaceSize >> kRegionSizeLog;
-typedef SizeClassAllocator32<0, kAddressSpaceSize, 16,
- SizeClassMap, kRegionSizeLog,
- FlatByteMap<kFlatByteMapSize>,
- AsanMapUnmapCallback> PrimaryAllocator;
-#endif
-
-typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
-typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator;
-typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
- SecondaryAllocator> Allocator;
+void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
+ PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
+ // Statistics.
+ AsanStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.mmaps++;
+ thread_stats.mmaped += size;
+}
+void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
+ PoisonShadow(p, size, 0);
+ // We are about to unmap a chunk of user memory.
+ // Mark the corresponding shadow memory as not needed.
+ FlushUnneededASanShadowMemory(p, size);
+ // Statistics.
+ AsanStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.munmaps++;
+ thread_stats.munmaped += size;
+}
// We can not use THREADLOCAL because it is not supported on some of the
// platforms we care about (OSX 10.6, Android).
// static THREADLOCAL AllocatorCache cache;
AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
CHECK(ms);
- CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator2_cache));
- return reinterpret_cast<AllocatorCache *>(ms->allocator2_cache);
+ return &ms->allocator2_cache;
}
static Allocator allocator;
@@ -132,7 +97,8 @@ static uptr ComputeRZLog(uptr user_requested_size) {
user_requested_size <= (1 << 14) - 256 ? 4 :
user_requested_size <= (1 << 15) - 512 ? 5 :
user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
- return Max(rz_log, RZSize2Log(flags()->redzone));
+ return Min(Max(rz_log, RZSize2Log(flags()->redzone)),
+ RZSize2Log(flags()->max_redzone));
}
// The memory chunk allocated from the underlying allocator looks like this:
@@ -307,10 +273,14 @@ void InitializeAllocator() {
quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
}
+void ReInitializeAllocator() {
+ quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
+}
+
static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
AllocType alloc_type, bool can_fill) {
- if (!asan_inited)
- __asan_init();
+ if (UNLIKELY(!asan_inited))
+ AsanInitFromRtl();
Flags &fl = *flags();
CHECK(stack);
const uptr min_alignment = SHADOW_GRANULARITY;
@@ -355,6 +325,16 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
AllocatorCache *cache = &fallback_allocator_cache;
allocated = allocator.Allocate(cache, needed_size, 8, false);
}
+
+ if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && flags()->poison_heap) {
+ // Heap poisoning is enabled, but the allocator provides an unpoisoned
+ // chunk. This is possible if flags()->poison_heap was disabled for some
+ // time, for example, due to flags()->start_disabled.
+ // Anyway, poison the block before using it for anything else.
+ uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
+ PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
+ }
+
uptr alloc_beg = reinterpret_cast<uptr>(allocated);
uptr alloc_end = alloc_beg + needed_size;
uptr beg_plus_redzone = alloc_beg + rz_size;
@@ -708,8 +688,12 @@ uptr PointsIntoChunk(void* p) {
__asan::AsanChunk *m = __asan::GetAsanChunkByAddrFastLocked(addr);
if (!m) return 0;
uptr chunk = m->Beg();
- if ((m->chunk_state == __asan::CHUNK_ALLOCATED) &&
- m->AddrIsInside(addr, /*locked_version=*/true))
+ if (m->chunk_state != __asan::CHUNK_ALLOCATED)
+ return 0;
+ if (m->AddrIsInside(addr, /*locked_version=*/true))
+ return chunk;
+ if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
+ addr))
return chunk;
return 0;
}
@@ -778,7 +762,7 @@ uptr __asan_get_estimated_allocated_size(uptr size) {
return size;
}
-bool __asan_get_ownership(const void *p) {
+int __asan_get_ownership(const void *p) {
uptr ptr = reinterpret_cast<uptr>(p);
return (AllocationSize(ptr) > 0);
}
diff --git a/libsanitizer/asan/asan_asm_instrumentation.S b/libsanitizer/asan/asan_asm_instrumentation.S
new file mode 100644
index 000000000000..36a9d0b5e975
--- /dev/null
+++ b/libsanitizer/asan/asan_asm_instrumentation.S
@@ -0,0 +1,599 @@
+// This file was generated by gen_asm_instrumentation.sh. Please, do not edit
+.section .text
+#if defined(__x86_64__) || defined(__i386__)
+.globl __asan_report_store1
+.globl __asan_report_load1
+.globl __asan_report_store2
+.globl __asan_report_load2
+.globl __asan_report_store4
+.globl __asan_report_load4
+.globl __asan_report_store8
+.globl __asan_report_load8
+.globl __asan_report_store16
+.globl __asan_report_load16
+#endif // defined(__x86_64__) || defined(__i386__)
+#if defined(__i386__)
+// Sanitize 1-byte store. Takes one 4-byte address as an argument on
+// stack, nothing is returned.
+.globl __sanitizer_sanitize_store1
+.type __sanitizer_sanitize_store1, @function
+__sanitizer_sanitize_store1:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %eax
+ pushl %ecx
+ pushl %edx
+ pushfl
+ movl 8(%ebp), %eax
+ movl %eax, %ecx
+ shrl $0x3, %ecx
+ movb 0x20000000(%ecx), %cl
+ testb %cl, %cl
+ je .sanitize_store1_done
+ movl %eax, %edx
+ andl $0x7, %edx
+ movsbl %cl, %ecx
+ cmpl %ecx, %edx
+ jl .sanitize_store1_done
+ pushl %eax
+ cld
+ emms
+ call __asan_report_store1@PLT
+.sanitize_store1_done:
+ popfl
+ popl %edx
+ popl %ecx
+ popl %eax
+ leave
+ ret
+// Sanitize 1-byte load. Takes one 4-byte address as an argument on
+// stack, nothing is returned.
+.globl __sanitizer_sanitize_load1
+.type __sanitizer_sanitize_load1, @function
+__sanitizer_sanitize_load1:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %eax
+ pushl %ecx
+ pushl %edx
+ pushfl
+ movl 8(%ebp), %eax
+ movl %eax, %ecx
+ shrl $0x3, %ecx
+ movb 0x20000000(%ecx), %cl
+ testb %cl, %cl
+ je .sanitize_load1_done
+ movl %eax, %edx
+ andl $0x7, %edx
+ movsbl %cl, %ecx
+ cmpl %ecx, %edx
+ jl .sanitize_load1_done
+ pushl %eax
+ cld
+ emms
+ call __asan_report_load1@PLT
+.sanitize_load1_done:
+ popfl
+ popl %edx
+ popl %ecx
+ popl %eax
+ leave
+ ret
+// Sanitize 2-byte store. Takes one 4-byte address as an argument on
+// stack, nothing is returned.
+.globl __sanitizer_sanitize_store2
+.type __sanitizer_sanitize_store2, @function
+__sanitizer_sanitize_store2:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %eax
+ pushl %ecx
+ pushl %edx
+ pushfl
+ movl 8(%ebp), %eax
+ movl %eax, %ecx
+ shrl $0x3, %ecx
+ movb 0x20000000(%ecx), %cl
+ testb %cl, %cl
+ je .sanitize_store2_done
+ movl %eax, %edx
+ andl $0x7, %edx
+ incl %edx
+ movsbl %cl, %ecx
+ cmpl %ecx, %edx
+ jl .sanitize_store2_done
+ pushl %eax
+ cld
+ emms
+ call __asan_report_store2@PLT
+.sanitize_store2_done:
+ popfl
+ popl %edx
+ popl %ecx
+ popl %eax
+ leave
+ ret
+// Sanitize 2-byte load. Takes one 4-byte address as an argument on
+// stack, nothing is returned.
+.globl __sanitizer_sanitize_load2
+.type __sanitizer_sanitize_load2, @function
+__sanitizer_sanitize_load2:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %eax
+ pushl %ecx
+ pushl %edx
+ pushfl
+ movl 8(%ebp), %eax
+ movl %eax, %ecx
+ shrl $0x3, %ecx
+ movb 0x20000000(%ecx), %cl
+ testb %cl, %cl
+ je .sanitize_load2_done
+ movl %eax, %edx
+ andl $0x7, %edx
+ incl %edx
+ movsbl %cl, %ecx
+ cmpl %ecx, %edx
+ jl .sanitize_load2_done
+ pushl %eax
+ cld
+ emms
+ call __asan_report_load2@PLT
+.sanitize_load2_done:
+ popfl
+ popl %edx
+ popl %ecx
+ popl %eax
+ leave
+ ret
+// Sanitize 4-byte store. Takes one 4-byte address as an argument on
+// stack, nothing is returned.
+.globl __sanitizer_sanitize_store4
+.type __sanitizer_sanitize_store4, @function
+__sanitizer_sanitize_store4:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %eax
+ pushl %ecx
+ pushl %edx
+ pushfl
+ movl 8(%ebp), %eax
+ movl %eax, %ecx
+ shrl $0x3, %ecx
+ movb 0x20000000(%ecx), %cl
+ testb %cl, %cl
+ je .sanitize_store4_done
+ movl %eax, %edx
+ andl $0x7, %edx
+ addl $0x3, %edx
+ movsbl %cl, %ecx
+ cmpl %ecx, %edx
+ jl .sanitize_store4_done
+ pushl %eax
+ cld
+ emms
+ call __asan_report_store4@PLT
+.sanitize_store4_done:
+ popfl
+ popl %edx
+ popl %ecx
+ popl %eax
+ leave
+ ret
+// Sanitize 4-byte load. Takes one 4-byte address as an argument on
+// stack, nothing is returned.
+.globl __sanitizer_sanitize_load4
+.type __sanitizer_sanitize_load4, @function
+__sanitizer_sanitize_load4:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %eax
+ pushl %ecx
+ pushl %edx
+ pushfl
+ movl 8(%ebp), %eax
+ movl %eax, %ecx
+ shrl $0x3, %ecx
+ movb 0x20000000(%ecx), %cl
+ testb %cl, %cl
+ je .sanitize_load4_done
+ movl %eax, %edx
+ andl $0x7, %edx
+ addl $0x3, %edx
+ movsbl %cl, %ecx
+ cmpl %ecx, %edx
+ jl .sanitize_load4_done
+ pushl %eax
+ cld
+ emms
+ call __asan_report_load4@PLT
+.sanitize_load4_done:
+ popfl
+ popl %edx
+ popl %ecx
+ popl %eax
+ leave
+ ret
+// Sanitize 8-byte store. Takes one 4-byte address as an argument on
+// stack, nothing is returned.
+.globl __sanitizer_sanitize_store8
+.type __sanitizer_sanitize_store8, @function
+__sanitizer_sanitize_store8:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %eax
+ pushl %ecx
+ pushfl
+ movl 8(%ebp), %eax
+ movl %eax, %ecx
+ shrl $0x3, %ecx
+ cmpb $0x0, 0x20000000(%ecx)
+ je .sanitize_store8_done
+ pushl %eax
+ cld
+ emms
+ call __asan_report_store8@PLT
+.sanitize_store8_done:
+ popfl
+ popl %ecx
+ popl %eax
+ leave
+ ret
+// Sanitize 8-byte load. Takes one 4-byte address as an argument on
+// stack, nothing is returned.
+.globl __sanitizer_sanitize_load8
+.type __sanitizer_sanitize_load8, @function
+__sanitizer_sanitize_load8:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %eax
+ pushl %ecx
+ pushfl
+ movl 8(%ebp), %eax
+ movl %eax, %ecx
+ shrl $0x3, %ecx
+ cmpb $0x0, 0x20000000(%ecx)
+ je .sanitize_load8_done
+ pushl %eax
+ cld
+ emms
+ call __asan_report_load8@PLT
+.sanitize_load8_done:
+ popfl
+ popl %ecx
+ popl %eax
+ leave
+ ret
+// Sanitize 16-byte store. Takes one 4-byte address as an argument on
+// stack, nothing is returned.
+.globl __sanitizer_sanitize_store16
+.type __sanitizer_sanitize_store16, @function
+__sanitizer_sanitize_store16:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %eax
+ pushl %ecx
+ pushfl
+ movl 8(%ebp), %eax
+ movl %eax, %ecx
+ shrl $0x3, %ecx
+ cmpw $0x0, 0x20000000(%ecx)
+ je .sanitize_store16_done
+ pushl %eax
+ cld
+ emms
+ call __asan_report_store16@PLT
+.sanitize_store16_done:
+ popfl
+ popl %ecx
+ popl %eax
+ leave
+ ret
+// Sanitize 16-byte load. Takes one 4-byte address as an argument on
+// stack, nothing is returned.
+.globl __sanitizer_sanitize_load16
+.type __sanitizer_sanitize_load16, @function
+__sanitizer_sanitize_load16:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %eax
+ pushl %ecx
+ pushfl
+ movl 8(%ebp), %eax
+ movl %eax, %ecx
+ shrl $0x3, %ecx
+ cmpw $0x0, 0x20000000(%ecx)
+ je .sanitize_load16_done
+ pushl %eax
+ cld
+ emms
+ call __asan_report_load16@PLT
+.sanitize_load16_done:
+ popfl
+ popl %ecx
+ popl %eax
+ leave
+ ret
+#endif // defined(__i386__)
+#if defined(__x86_64__)
+// Sanitize 1-byte store. Takes one 8-byte address as an argument in %rdi,
+// nothing is returned.
+.globl __sanitizer_sanitize_store1
+.type __sanitizer_sanitize_store1, @function
+__sanitizer_sanitize_store1:
+ leaq -128(%rsp), %rsp
+ pushq %rax
+ pushq %rcx
+ pushfq
+ movq %rdi, %rax
+ shrq $0x3, %rax
+ movb 0x7fff8000(%rax), %al
+ test %al, %al
+ je .sanitize_store1_done
+ movl %edi, %ecx
+ andl $0x7, %ecx
+ movsbl %al, %eax
+ cmpl %eax, %ecx
+ jl .sanitize_store1_done
+ subq $8, %rsp
+ andq $-16, %rsp
+ cld
+ emms
+ call __asan_report_store1@PLT
+.sanitize_store1_done:
+ popfq
+ popq %rcx
+ popq %rax
+ leaq 128(%rsp), %rsp
+ ret
+// Sanitize 1-byte load. Takes one 8-byte address as an argument in %rdi,
+// nothing is returned.
+.globl __sanitizer_sanitize_load1
+.type __sanitizer_sanitize_load1, @function
+__sanitizer_sanitize_load1:
+ leaq -128(%rsp), %rsp
+ pushq %rax
+ pushq %rcx
+ pushfq
+ movq %rdi, %rax
+ shrq $0x3, %rax
+ movb 0x7fff8000(%rax), %al
+ test %al, %al
+ je .sanitize_load1_done
+ movl %edi, %ecx
+ andl $0x7, %ecx
+ movsbl %al, %eax
+ cmpl %eax, %ecx
+ jl .sanitize_load1_done
+ subq $8, %rsp
+ andq $-16, %rsp
+ cld
+ emms
+ call __asan_report_load1@PLT
+.sanitize_load1_done:
+ popfq
+ popq %rcx
+ popq %rax
+ leaq 128(%rsp), %rsp
+ ret
+// Sanitize 2-byte store. Takes one 8-byte address as an argument in %rdi,
+// nothing is returned.
+.globl __sanitizer_sanitize_store2
+.type __sanitizer_sanitize_store2, @function
+__sanitizer_sanitize_store2:
+ leaq -128(%rsp), %rsp
+ pushq %rax
+ pushq %rcx
+ pushfq
+ movq %rdi, %rax
+ shrq $0x3, %rax
+ movb 0x7fff8000(%rax), %al
+ test %al, %al
+ je .sanitize_store2_done
+ movl %edi, %ecx
+ andl $0x7, %ecx
+ incl %ecx
+ movsbl %al, %eax
+ cmpl %eax, %ecx
+ jl .sanitize_store2_done
+ subq $8, %rsp
+ andq $-16, %rsp
+ cld
+ emms
+ call __asan_report_store2@PLT
+.sanitize_store2_done:
+ popfq
+ popq %rcx
+ popq %rax
+ leaq 128(%rsp), %rsp
+ ret
+// Sanitize 2-byte load. Takes one 8-byte address as an argument in %rdi,
+// nothing is returned.
+.globl __sanitizer_sanitize_load2
+.type __sanitizer_sanitize_load2, @function
+__sanitizer_sanitize_load2:
+ leaq -128(%rsp), %rsp
+ pushq %rax
+ pushq %rcx
+ pushfq
+ movq %rdi, %rax
+ shrq $0x3, %rax
+ movb 0x7fff8000(%rax), %al
+ test %al, %al
+ je .sanitize_load2_done
+ movl %edi, %ecx
+ andl $0x7, %ecx
+ incl %ecx
+ movsbl %al, %eax
+ cmpl %eax, %ecx
+ jl .sanitize_load2_done
+ subq $8, %rsp
+ andq $-16, %rsp
+ cld
+ emms
+ call __asan_report_load2@PLT
+.sanitize_load2_done:
+ popfq
+ popq %rcx
+ popq %rax
+ leaq 128(%rsp), %rsp
+ ret
+// Sanitize 4-byte store. Takes one 8-byte address as an argument in %rdi,
+// nothing is returned.
+.globl __sanitizer_sanitize_store4
+.type __sanitizer_sanitize_store4, @function
+__sanitizer_sanitize_store4:
+ leaq -128(%rsp), %rsp
+ pushq %rax
+ pushq %rcx
+ pushfq
+ movq %rdi, %rax
+ shrq $0x3, %rax
+ movb 0x7fff8000(%rax), %al
+ test %al, %al
+ je .sanitize_store4_done
+ movl %edi, %ecx
+ andl $0x7, %ecx
+ addl $0x3, %ecx
+ movsbl %al, %eax
+ cmpl %eax, %ecx
+ jl .sanitize_store4_done
+ subq $8, %rsp
+ andq $-16, %rsp
+ cld
+ emms
+ call __asan_report_store4@PLT
+.sanitize_store4_done:
+ popfq
+ popq %rcx
+ popq %rax
+ leaq 128(%rsp), %rsp
+ ret
+// Sanitize 4-byte load. Takes one 8-byte address as an argument in %rdi,
+// nothing is returned.
+.globl __sanitizer_sanitize_load4
+.type __sanitizer_sanitize_load4, @function
+__sanitizer_sanitize_load4:
+ leaq -128(%rsp), %rsp
+ pushq %rax
+ pushq %rcx
+ pushfq
+ movq %rdi, %rax
+ shrq $0x3, %rax
+ movb 0x7fff8000(%rax), %al
+ test %al, %al
+ je .sanitize_load4_done
+ movl %edi, %ecx
+ andl $0x7, %ecx
+ addl $0x3, %ecx
+ movsbl %al, %eax
+ cmpl %eax, %ecx
+ jl .sanitize_load4_done
+ subq $8, %rsp
+ andq $-16, %rsp
+ cld
+ emms
+ call __asan_report_load4@PLT
+.sanitize_load4_done:
+ popfq
+ popq %rcx
+ popq %rax
+ leaq 128(%rsp), %rsp
+ ret
+// Sanitize 8-byte store. Takes one 8-byte address as an argument in %rdi,
+// nothing is returned.
+.globl __sanitizer_sanitize_store8
+.type __sanitizer_sanitize_store8, @function
+__sanitizer_sanitize_store8:
+ leaq -128(%rsp), %rsp
+ pushq %rax
+ pushfq
+ movq %rdi, %rax
+ shrq $0x3, %rax
+ cmpb $0x0, 0x7fff8000(%rax)
+ je .sanitize_store8_done
+ subq $8, %rsp
+ andq $-16, %rsp
+ cld
+ emms
+ call __asan_report_store8@PLT
+.sanitize_store8_done:
+ popfq
+ popq %rax
+ leaq 128(%rsp), %rsp
+ ret
+// Sanitize 8-byte load. Takes one 8-byte address as an argument in %rdi,
+// nothing is returned.
+.globl __sanitizer_sanitize_load8
+.type __sanitizer_sanitize_load8, @function
+__sanitizer_sanitize_load8:
+ leaq -128(%rsp), %rsp
+ pushq %rax
+ pushfq
+ movq %rdi, %rax
+ shrq $0x3, %rax
+ cmpb $0x0, 0x7fff8000(%rax)
+ je .sanitize_load8_done
+ subq $8, %rsp
+ andq $-16, %rsp
+ cld
+ emms
+ call __asan_report_load8@PLT
+.sanitize_load8_done:
+ popfq
+ popq %rax
+ leaq 128(%rsp), %rsp
+ ret
+// Sanitize 16-byte store. Takes one 8-byte address as an argument in %rdi,
+// nothing is returned.
+.globl __sanitizer_sanitize_store16
+.type __sanitizer_sanitize_store16, @function
+__sanitizer_sanitize_store16:
+ leaq -128(%rsp), %rsp
+ pushq %rax
+ pushfq
+ movq %rdi, %rax
+ shrq $0x3, %rax
+ cmpw $0x0, 0x7fff8000(%rax)
+ je .sanitize_store16_done
+ subq $8, %rsp
+ andq $-16, %rsp
+ cld
+ emms
+ call __asan_report_store16@PLT
+.sanitize_store16_done:
+ popfq
+ popq %rax
+ leaq 128(%rsp), %rsp
+ ret
+// Sanitize 16-byte load. Takes one 8-byte address as an argument in %rdi,
+// nothing is returned.
+.globl __sanitizer_sanitize_load16
+.type __sanitizer_sanitize_load16, @function
+__sanitizer_sanitize_load16:
+ leaq -128(%rsp), %rsp
+ pushq %rax
+ pushfq
+ movq %rdi, %rax
+ shrq $0x3, %rax
+ cmpw $0x0, 0x7fff8000(%rax)
+ je .sanitize_load16_done
+ subq $8, %rsp
+ andq $-16, %rsp
+ cld
+ emms
+ call __asan_report_load16@PLT
+.sanitize_load16_done:
+ popfq
+ popq %rax
+ leaq 128(%rsp), %rsp
+ ret
+#endif // defined(__x86_64__)
+/* We do not need executable stack. */
+#if defined(__arm__)
+ .section .note.GNU-stack,"",%progbits
+#else
+ .section .note.GNU-stack,"",@progbits
+#endif // defined(__arm__)
+#endif // __linux__
diff --git a/libsanitizer/asan/asan_dll_thunk.cc b/libsanitizer/asan/asan_dll_thunk.cc
index 19c31f0def8f..5bed39ac0663 100644
--- a/libsanitizer/asan/asan_dll_thunk.cc
+++ b/libsanitizer/asan/asan_dll_thunk.cc
@@ -18,6 +18,7 @@
// Using #ifdef rather than relying on Makefiles etc.
// simplifies the build procedure.
#ifdef ASAN_DLL_THUNK
+#include "sanitizer_common/sanitizer_interception.h"
// ----------------- Helper functions and macros --------------------- {{{1
extern "C" {
@@ -113,7 +114,50 @@ static void *getRealProcAddressOrDie(const char *name) {
}
// }}}
+// --------- Interface interception helper functions and macros ----------- {{{1
+// We need to intercept the ASan interface exported by the DLL thunk and forward
+// all the functions to the runtime in the main module.
+// However, we don't want to keep two lists of interface functions.
+// To avoid that, the list of interface functions should be defined using the
+// INTERFACE_FUNCTION macro. Then, all the interface can be intercepted at once
+// by calling INTERCEPT_ASAN_INTERFACE().
+
+// Use macro+template magic to automatically generate the list of interface
+// functions. Each interface function at line LINE defines a template class
+// with a static InterfaceInteceptor<LINE>::Execute() method intercepting the
+// function. The default implementation of InterfaceInteceptor<LINE> is to call
+// the Execute() method corresponding to the previous line.
+template<int LINE>
+struct InterfaceInteceptor {
+ static void Execute() { InterfaceInteceptor<LINE-1>::Execute(); }
+};
+
+// There shouldn't be any interface function with negative line number.
+template<>
+struct InterfaceInteceptor<0> {
+ static void Execute() {}
+};
+
+#define INTERFACE_FUNCTION(name) \
+ extern "C" void name() { __debugbreak(); } \
+ template<> struct InterfaceInteceptor<__LINE__> { \
+ static void Execute() { \
+ void *wrapper = getRealProcAddressOrDie(#name); \
+ if (!__interception::OverrideFunction((uptr)name, (uptr)wrapper, 0)) \
+ abort(); \
+ InterfaceInteceptor<__LINE__-1>::Execute(); \
+ } \
+ };
+
+// INTERCEPT_ASAN_INTERFACE must be used after the last INTERFACE_FUNCTION.
+#define INTERCEPT_ASAN_INTERFACE InterfaceInteceptor<__LINE__>::Execute
+
+static void InterceptASanInterface();
+// }}}
+
// ----------------- ASan own interface functions --------------------
+// Don't use the INTERFACE_FUNCTION machinery for this function as we actually
+// want to call it in the __asan_init interceptor.
WRAP_W_V(__asan_should_detect_stack_use_after_return)
extern "C" {
@@ -123,54 +167,75 @@ extern "C" {
// __asan_option_detect_stack_use_after_return afterwards.
void __asan_init_v3() {
typedef void (*fntype)();
- static fntype fn = (fntype)getRealProcAddressOrDie("__asan_init_v3");
+ static fntype fn = 0;
+ if (fn) return;
+
+ fn = (fntype)getRealProcAddressOrDie("__asan_init_v3");
fn();
__asan_option_detect_stack_use_after_return =
(__asan_should_detect_stack_use_after_return() != 0);
+
+ InterceptASanInterface();
}
}
-WRAP_V_V(__asan_handle_no_return)
-
-WRAP_V_W(__asan_report_store1)
-WRAP_V_W(__asan_report_store2)
-WRAP_V_W(__asan_report_store4)
-WRAP_V_W(__asan_report_store8)
-WRAP_V_W(__asan_report_store16)
-WRAP_V_WW(__asan_report_store_n)
-
-WRAP_V_W(__asan_report_load1)
-WRAP_V_W(__asan_report_load2)
-WRAP_V_W(__asan_report_load4)
-WRAP_V_W(__asan_report_load8)
-WRAP_V_W(__asan_report_load16)
-WRAP_V_WW(__asan_report_load_n)
-
-WRAP_V_WW(__asan_register_globals)
-WRAP_V_WW(__asan_unregister_globals)
-
-WRAP_W_WW(__asan_stack_malloc_0)
-WRAP_W_WW(__asan_stack_malloc_1)
-WRAP_W_WW(__asan_stack_malloc_2)
-WRAP_W_WW(__asan_stack_malloc_3)
-WRAP_W_WW(__asan_stack_malloc_4)
-WRAP_W_WW(__asan_stack_malloc_5)
-WRAP_W_WW(__asan_stack_malloc_6)
-WRAP_W_WW(__asan_stack_malloc_7)
-WRAP_W_WW(__asan_stack_malloc_8)
-WRAP_W_WW(__asan_stack_malloc_9)
-WRAP_W_WW(__asan_stack_malloc_10)
-
-WRAP_V_WWW(__asan_stack_free_0)
-WRAP_V_WWW(__asan_stack_free_1)
-WRAP_V_WWW(__asan_stack_free_2)
-WRAP_V_WWW(__asan_stack_free_4)
-WRAP_V_WWW(__asan_stack_free_5)
-WRAP_V_WWW(__asan_stack_free_6)
-WRAP_V_WWW(__asan_stack_free_7)
-WRAP_V_WWW(__asan_stack_free_8)
-WRAP_V_WWW(__asan_stack_free_9)
-WRAP_V_WWW(__asan_stack_free_10)
+INTERFACE_FUNCTION(__asan_handle_no_return)
+
+INTERFACE_FUNCTION(__asan_report_store1)
+INTERFACE_FUNCTION(__asan_report_store2)
+INTERFACE_FUNCTION(__asan_report_store4)
+INTERFACE_FUNCTION(__asan_report_store8)
+INTERFACE_FUNCTION(__asan_report_store16)
+INTERFACE_FUNCTION(__asan_report_store_n)
+
+INTERFACE_FUNCTION(__asan_report_load1)
+INTERFACE_FUNCTION(__asan_report_load2)
+INTERFACE_FUNCTION(__asan_report_load4)
+INTERFACE_FUNCTION(__asan_report_load8)
+INTERFACE_FUNCTION(__asan_report_load16)
+INTERFACE_FUNCTION(__asan_report_load_n)
+
+INTERFACE_FUNCTION(__asan_memcpy);
+INTERFACE_FUNCTION(__asan_memset);
+INTERFACE_FUNCTION(__asan_memmove);
+
+INTERFACE_FUNCTION(__asan_register_globals)
+INTERFACE_FUNCTION(__asan_unregister_globals)
+
+INTERFACE_FUNCTION(__asan_before_dynamic_init)
+INTERFACE_FUNCTION(__asan_after_dynamic_init)
+
+INTERFACE_FUNCTION(__asan_poison_stack_memory)
+INTERFACE_FUNCTION(__asan_unpoison_stack_memory)
+
+INTERFACE_FUNCTION(__asan_poison_memory_region)
+INTERFACE_FUNCTION(__asan_unpoison_memory_region)
+
+INTERFACE_FUNCTION(__asan_get_current_fake_stack)
+INTERFACE_FUNCTION(__asan_addr_is_in_fake_stack)
+
+INTERFACE_FUNCTION(__asan_stack_malloc_0)
+INTERFACE_FUNCTION(__asan_stack_malloc_1)
+INTERFACE_FUNCTION(__asan_stack_malloc_2)
+INTERFACE_FUNCTION(__asan_stack_malloc_3)
+INTERFACE_FUNCTION(__asan_stack_malloc_4)
+INTERFACE_FUNCTION(__asan_stack_malloc_5)
+INTERFACE_FUNCTION(__asan_stack_malloc_6)
+INTERFACE_FUNCTION(__asan_stack_malloc_7)
+INTERFACE_FUNCTION(__asan_stack_malloc_8)
+INTERFACE_FUNCTION(__asan_stack_malloc_9)
+INTERFACE_FUNCTION(__asan_stack_malloc_10)
+
+INTERFACE_FUNCTION(__asan_stack_free_0)
+INTERFACE_FUNCTION(__asan_stack_free_1)
+INTERFACE_FUNCTION(__asan_stack_free_2)
+INTERFACE_FUNCTION(__asan_stack_free_4)
+INTERFACE_FUNCTION(__asan_stack_free_5)
+INTERFACE_FUNCTION(__asan_stack_free_6)
+INTERFACE_FUNCTION(__asan_stack_free_7)
+INTERFACE_FUNCTION(__asan_stack_free_8)
+INTERFACE_FUNCTION(__asan_stack_free_9)
+INTERFACE_FUNCTION(__asan_stack_free_10)
// TODO(timurrrr): Add more interface functions on the as-needed basis.
@@ -190,7 +255,16 @@ WRAP_W_WWW(_realloc_dbg)
WRAP_W_WWW(_recalloc)
WRAP_W_W(_msize)
+WRAP_W_W(_expand)
+WRAP_W_W(_expand_dbg)
+
+// TODO(timurrrr): Might want to add support for _aligned_* allocation
+// functions to detect a bit more bugs. Those functions seem to wrap malloc().
// TODO(timurrrr): Do we need to add _Crt* stuff here? (see asan_malloc_win.cc).
+void InterceptASanInterface() {
+ INTERCEPT_ASAN_INTERFACE();
+}
+
#endif // ASAN_DLL_THUNK
diff --git a/libsanitizer/asan/asan_fake_stack.cc b/libsanitizer/asan/asan_fake_stack.cc
index cf4122472efe..cfe96a0882f5 100644
--- a/libsanitizer/asan/asan_fake_stack.cc
+++ b/libsanitizer/asan/asan_fake_stack.cc
@@ -40,21 +40,32 @@ FakeStack *FakeStack::Create(uptr stack_size_log) {
stack_size_log = kMinStackSizeLog;
if (stack_size_log > kMaxStackSizeLog)
stack_size_log = kMaxStackSizeLog;
+ uptr size = RequiredSize(stack_size_log);
FakeStack *res = reinterpret_cast<FakeStack *>(
- MmapOrDie(RequiredSize(stack_size_log), "FakeStack"));
+ flags()->uar_noreserve ? MmapNoReserveOrDie(size, "FakeStack")
+ : MmapOrDie(size, "FakeStack"));
res->stack_size_log_ = stack_size_log;
- if (common_flags()->verbosity) {
- u8 *p = reinterpret_cast<u8 *>(res);
- Report("T%d: FakeStack created: %p -- %p stack_size_log: %zd \n",
- GetCurrentTidOrInvalid(), p,
- p + FakeStack::RequiredSize(stack_size_log), stack_size_log);
- }
+ u8 *p = reinterpret_cast<u8 *>(res);
+ VReport(1, "T%d: FakeStack created: %p -- %p stack_size_log: %zd; "
+ "mmapped %zdK, noreserve=%d \n",
+ GetCurrentTidOrInvalid(), p,
+ p + FakeStack::RequiredSize(stack_size_log), stack_size_log,
+ size >> 10, flags()->uar_noreserve);
return res;
}
-void FakeStack::Destroy() {
+void FakeStack::Destroy(int tid) {
PoisonAll(0);
- UnmapOrDie(this, RequiredSize(stack_size_log_));
+ if (common_flags()->verbosity >= 2) {
+ InternalScopedString str(kNumberOfSizeClasses * 50);
+ for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++)
+ str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id],
+ NumberOfFrames(stack_size_log(), class_id));
+ Report("T%d: FakeStack destroyed: %s\n", tid, str.data());
+ }
+ uptr size = RequiredSize(stack_size_log_);
+ FlushUnneededASanShadowMemory(reinterpret_cast<uptr>(this), size);
+ UnmapOrDie(this, size);
}
void FakeStack::PoisonAll(u8 magic) {
@@ -91,7 +102,7 @@ FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id,
return 0; // We are out of fake stack.
}
-uptr FakeStack::AddrIsInFakeStack(uptr ptr) {
+uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) {
uptr stack_size_log = this->stack_size_log();
uptr beg = reinterpret_cast<uptr>(GetFrame(stack_size_log, 0, 0));
uptr end = reinterpret_cast<uptr>(this) + RequiredSize(stack_size_log);
@@ -101,7 +112,10 @@ uptr FakeStack::AddrIsInFakeStack(uptr ptr) {
CHECK_LE(base, ptr);
CHECK_LT(ptr, base + (1UL << stack_size_log));
uptr pos = (ptr - base) >> (kMinStackFrameSizeLog + class_id);
- return base + pos * BytesInSizeClass(class_id);
+ uptr res = base + pos * BytesInSizeClass(class_id);
+ *frame_end = res + BytesInSizeClass(class_id);
+ *frame_beg = res + sizeof(FakeFrame);
+ return res;
}
void FakeStack::HandleNoReturn() {
@@ -195,14 +209,15 @@ ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size, uptr real_stack) {
} // namespace __asan
// ---------------------- Interface ---------------- {{{1
+using namespace __asan;
#define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
__asan_stack_malloc_##class_id(uptr size, uptr real_stack) { \
- return __asan::OnMalloc(class_id, size, real_stack); \
+ return OnMalloc(class_id, size, real_stack); \
} \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \
uptr ptr, uptr size, uptr real_stack) { \
- __asan::OnFree(ptr, class_id, size, real_stack); \
+ OnFree(ptr, class_id, size, real_stack); \
}
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0)
@@ -216,3 +231,23 @@ DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(7)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(8)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(9)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(10)
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__asan_get_current_fake_stack() { return GetFakeStackFast(); }
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
+ void **end) {
+ FakeStack *fs = reinterpret_cast<FakeStack*>(fake_stack);
+ if (!fs) return 0;
+ uptr frame_beg, frame_end;
+ FakeFrame *frame = reinterpret_cast<FakeFrame *>(fs->AddrIsInFakeStack(
+ reinterpret_cast<uptr>(addr), &frame_beg, &frame_end));
+ if (!frame) return 0;
+ if (frame->magic != kCurrentStackFrameMagic)
+ return 0;
+ if (beg) *beg = reinterpret_cast<void*>(frame_beg);
+ if (end) *end = reinterpret_cast<void*>(frame_end);
+ return reinterpret_cast<void*>(frame->real_stack);
+}
+} // extern "C"
diff --git a/libsanitizer/asan/asan_fake_stack.h b/libsanitizer/asan/asan_fake_stack.h
index 5196025681cf..550a86e29725 100644
--- a/libsanitizer/asan/asan_fake_stack.h
+++ b/libsanitizer/asan/asan_fake_stack.h
@@ -63,7 +63,7 @@ class FakeStack {
// CTOR: create the FakeStack as a single mmap-ed object.
static FakeStack *Create(uptr stack_size_log);
- void Destroy();
+ void Destroy(int tid);
// stack_size_log is at least 15 (stack_size >= 32K).
static uptr SizeRequiredForFlags(uptr stack_size_log) {
@@ -127,7 +127,11 @@ class FakeStack {
void PoisonAll(u8 magic);
// Return the beginning of the FakeFrame or 0 if the address is not ours.
- uptr AddrIsInFakeStack(uptr addr);
+ uptr AddrIsInFakeStack(uptr addr, uptr *frame_beg, uptr *frame_end);
+ USED uptr AddrIsInFakeStack(uptr addr) {
+ uptr t1, t2;
+ return AddrIsInFakeStack(addr, &t1, &t2);
+ }
// Number of bytes in a fake frame of this size class.
static uptr BytesInSizeClass(uptr class_id) {
diff --git a/libsanitizer/asan/asan_flags.h b/libsanitizer/asan/asan_flags.h
index 62b5d3215d34..42463a69b993 100644
--- a/libsanitizer/asan/asan_flags.h
+++ b/libsanitizer/asan/asan_flags.h
@@ -26,88 +26,42 @@
namespace __asan {
struct Flags {
- // Size (in bytes) of quarantine used to detect use-after-free errors.
- // Lower value may reduce memory usage but increase the chance of
- // false negatives.
+ // Flag descriptions are in asan_rtl.cc.
int quarantine_size;
- // Size (in bytes) of redzones around heap objects.
- // Requirement: redzone >= 32, is a power of two.
int redzone;
- // If set, prints some debugging information and does additional checks.
+ int max_redzone;
bool debug;
- // Controls the way to handle globals (0 - don't detect buffer overflow
- // on globals, 1 - detect buffer overflow, 2 - print data about registered
- // globals).
int report_globals;
- // If set, attempts to catch initialization order issues.
bool check_initialization_order;
- // If set, uses custom wrappers and replacements for libc string functions
- // to find more errors.
bool replace_str;
- // If set, uses custom wrappers for memset/memcpy/memmove intinsics.
bool replace_intrin;
- // Used on Mac only.
bool mac_ignore_invalid_free;
- // Enables stack-use-after-return checking at run-time.
bool detect_stack_use_after_return;
- // The minimal fake stack size log.
- int uar_stack_size_log;
- // ASan allocator flag. max_malloc_fill_size is the maximal amount of bytes
- // that will be filled with malloc_fill_byte on malloc.
+ int min_uar_stack_size_log;
+ int max_uar_stack_size_log;
+ bool uar_noreserve;
int max_malloc_fill_size, malloc_fill_byte;
- // Override exit status if something was reported.
int exitcode;
- // If set, user may manually mark memory regions as poisoned or unpoisoned.
bool allow_user_poisoning;
- // Number of seconds to sleep between printing an error report and
- // terminating application. Useful for debug purposes (when one needs
- // to attach gdb, for example).
int sleep_before_dying;
- // If set, registers ASan custom segv handler.
- bool handle_segv;
- // If set, allows user register segv handler even if ASan registers one.
- bool allow_user_segv_handler;
- // If set, uses alternate stack for signal handling.
- bool use_sigaltstack;
- // Allow the users to work around the bug in Nvidia drivers prior to 295.*.
bool check_malloc_usable_size;
- // If set, explicitly unmaps (huge) shadow at exit.
bool unmap_shadow_on_exit;
- // If set, calls abort() instead of _exit() after printing an error report.
bool abort_on_error;
- // Print various statistics after printing an error message or if atexit=1.
bool print_stats;
- // Print the legend for the shadow bytes.
bool print_legend;
- // If set, prints ASan exit stats even after program terminates successfully.
bool atexit;
- // If set, coverage information will be dumped at shutdown time if the
- // appropriate instrumentation was enabled.
- bool coverage;
- // By default, disable core dumper on 64-bit - it makes little sense
- // to dump 16T+ core.
bool disable_core;
- // Allow the tool to re-exec the program. This may interfere badly with the
- // debugger.
bool allow_reexec;
- // If set, prints not only thread creation stacks for threads in error report,
- // but also thread creation stacks for threads that created those threads,
- // etc. up to main thread.
bool print_full_thread_history;
- // Poison (or not) the heap memory on [de]allocation. Zero value is useful
- // for benchmarking the allocator or instrumentator.
bool poison_heap;
- // If true, poison partially addressable 8-byte aligned words (default=true).
- // This flag affects heap and global buffers, but not stack buffers.
bool poison_partial;
- // Report errors on malloc/delete, new/free, new/delete[], etc.
bool alloc_dealloc_mismatch;
- // If true, assume that memcmp(p1, p2, n) always reads n bytes before
- // comparing p1 and p2.
bool strict_memcmp;
- // If true, assume that dynamic initializers can never access globals from
- // other modules, even if the latter are already initialized.
bool strict_init_order;
+ bool start_deactivated;
+ int detect_invalid_pointer_pairs;
+ bool detect_container_overflow;
+ int detect_odr_violation;
};
extern Flags asan_flags_dont_use_directly;
diff --git a/libsanitizer/asan/asan_globals.cc b/libsanitizer/asan/asan_globals.cc
index e97850a854aa..132a564f4fee 100644
--- a/libsanitizer/asan/asan_globals.cc
+++ b/libsanitizer/asan/asan_globals.cc
@@ -90,6 +90,19 @@ static void RegisterGlobal(const Global *g) {
CHECK(AddrIsInMem(g->beg));
CHECK(AddrIsAlignedByGranularity(g->beg));
CHECK(AddrIsAlignedByGranularity(g->size_with_redzone));
+ if (flags()->detect_odr_violation) {
+ // Try detecting ODR (One Definition Rule) violation, i.e. the situation
+ // where two globals with the same name are defined in different modules.
+ if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
+ // This check may not be enough: if the first global is much larger
+ // the entire redzone of the second global may be within the first global.
+ for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
+ if (g->beg == l->g->beg &&
+ (flags()->detect_odr_violation >= 2 || g->size != l->g->size))
+ ReportODRViolation(g, l->g);
+ }
+ }
+ }
if (flags()->poison_heap)
PoisonRedZones(*g);
ListOfGlobals *l = new(allocator_for_globals) ListOfGlobals;
diff --git a/libsanitizer/asan/asan_intercepted_functions.h b/libsanitizer/asan/asan_intercepted_functions.h
deleted file mode 100644
index 19b53363a5b7..000000000000
--- a/libsanitizer/asan/asan_intercepted_functions.h
+++ /dev/null
@@ -1,77 +0,0 @@
-//===-- asan_intercepted_functions.h ----------------------------*- C++ -*-===//
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of AddressSanitizer, an address sanity checker.
-//
-// ASan-private header containing prototypes for wrapper functions and wrappers
-//===----------------------------------------------------------------------===//
-#ifndef ASAN_INTERCEPTED_FUNCTIONS_H
-#define ASAN_INTERCEPTED_FUNCTIONS_H
-
-#include "sanitizer_common/sanitizer_platform_interceptors.h"
-
-// Use macro to describe if specific function should be
-// intercepted on a given platform.
-#if !SANITIZER_WINDOWS
-# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 1
-# define ASAN_INTERCEPT__LONGJMP 1
-# define ASAN_INTERCEPT_STRDUP 1
-# define ASAN_INTERCEPT_INDEX 1
-# define ASAN_INTERCEPT_PTHREAD_CREATE 1
-# define ASAN_INTERCEPT_MLOCKX 1
-#else
-# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 0
-# define ASAN_INTERCEPT__LONGJMP 0
-# define ASAN_INTERCEPT_STRDUP 0
-# define ASAN_INTERCEPT_INDEX 0
-# define ASAN_INTERCEPT_PTHREAD_CREATE 0
-# define ASAN_INTERCEPT_MLOCKX 0
-#endif
-
-#if SANITIZER_LINUX
-# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 1
-#else
-# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0
-#endif
-
-#if !SANITIZER_MAC
-# define ASAN_INTERCEPT_STRNLEN 1
-#else
-# define ASAN_INTERCEPT_STRNLEN 0
-#endif
-
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
-# define ASAN_INTERCEPT_SWAPCONTEXT 1
-#else
-# define ASAN_INTERCEPT_SWAPCONTEXT 0
-#endif
-
-#if !SANITIZER_ANDROID && !SANITIZER_WINDOWS
-# define ASAN_INTERCEPT_SIGNAL_AND_SIGACTION 1
-#else
-# define ASAN_INTERCEPT_SIGNAL_AND_SIGACTION 0
-#endif
-
-#if !SANITIZER_WINDOWS
-# define ASAN_INTERCEPT_SIGLONGJMP 1
-#else
-# define ASAN_INTERCEPT_SIGLONGJMP 0
-#endif
-
-#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS
-# define ASAN_INTERCEPT___CXA_THROW 1
-#else
-# define ASAN_INTERCEPT___CXA_THROW 0
-#endif
-
-#if !SANITIZER_WINDOWS
-# define ASAN_INTERCEPT___CXA_ATEXIT 1
-#else
-# define ASAN_INTERCEPT___CXA_ATEXIT 0
-#endif
-
-#endif // ASAN_INTERCEPTED_FUNCTIONS_H
diff --git a/libsanitizer/asan/asan_interceptors.cc b/libsanitizer/asan/asan_interceptors.cc
index decbfea5f755..13deab5766ea 100644
--- a/libsanitizer/asan/asan_interceptors.cc
+++ b/libsanitizer/asan/asan_interceptors.cc
@@ -12,14 +12,12 @@
#include "asan_interceptors.h"
#include "asan_allocator.h"
-#include "asan_intercepted_functions.h"
#include "asan_internal.h"
#include "asan_mapping.h"
#include "asan_poisoning.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_stats.h"
-#include "interception/interception.h"
#include "sanitizer_common/sanitizer_libc.h"
namespace __asan {
@@ -43,6 +41,10 @@ static inline bool QuickCheckForUnpoisonedRegion(uptr beg, uptr size) {
uptr __offset = (uptr)(offset); \
uptr __size = (uptr)(size); \
uptr __bad = 0; \
+ if (__offset > __offset + __size) { \
+ GET_STACK_TRACE_FATAL_HERE; \
+ ReportStringFunctionSizeOverflow(__offset, __size, &stack); \
+ } \
if (!QuickCheckForUnpoisonedRegion(__offset, __size) && \
(__bad = __asan_region_is_poisoned(__offset, __size))) { \
GET_CURRENT_PC_BP_SP; \
@@ -70,13 +72,6 @@ static inline bool RangesOverlap(const char *offset1, uptr length1,
} \
} while (0)
-#define ENSURE_ASAN_INITED() do { \
- CHECK(!asan_init_is_running); \
- if (!asan_inited) { \
- __asan_init(); \
- } \
-} while (0)
-
static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) {
#if ASAN_INTERCEPT_STRNLEN
if (REAL(strnlen) != 0) {
@@ -106,11 +101,10 @@ DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr)
DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
#if !SANITIZER_MAC
-#define ASAN_INTERCEPT_FUNC(name) \
- do { \
- if ((!INTERCEPT_FUNCTION(name) || !REAL(name)) && \
- common_flags()->verbosity > 0) \
- Report("AddressSanitizer: failed to intercept '" #name "'\n"); \
+#define ASAN_INTERCEPT_FUNC(name) \
+ do { \
+ if ((!INTERCEPT_FUNCTION(name) || !REAL(name))) \
+ VReport(1, "AddressSanitizer: failed to intercept '" #name "'\n"); \
} while (0)
#else
// OS X interceptors don't need to be initialized with INTERCEPT_FUNCTION.
@@ -118,19 +112,18 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
#endif // SANITIZER_MAC
#define COMMON_INTERCEPT_FUNCTION(name) ASAN_INTERCEPT_FUNC(name)
-#define COMMON_INTERCEPTOR_UNPOISON_PARAM(ctx, count) \
- do { \
- } while (false)
#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
ASAN_WRITE_RANGE(ptr, size)
#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) ASAN_READ_RANGE(ptr, size)
-#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
- do { \
- if (asan_init_is_running) return REAL(func)(__VA_ARGS__); \
- ctx = 0; \
- (void) ctx; \
- if (SANITIZER_MAC && !asan_inited) return REAL(func)(__VA_ARGS__); \
- ENSURE_ASAN_INITED(); \
+#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
+ do { \
+ if (asan_init_is_running) \
+ return REAL(func)(__VA_ARGS__); \
+ ctx = 0; \
+ (void) ctx; \
+ if (SANITIZER_MAC && UNLIKELY(!asan_inited)) \
+ return REAL(func)(__VA_ARGS__); \
+ ENSURE_ASAN_INITED(); \
} while (false)
#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
do { \
@@ -194,20 +187,41 @@ INTERCEPTOR(int, pthread_create, void *thread,
#endif // ASAN_INTERCEPT_PTHREAD_CREATE
#if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION
+
+#if SANITIZER_ANDROID
+INTERCEPTOR(void*, bsd_signal, int signum, void *handler) {
+ if (!AsanInterceptsSignal(signum) ||
+ common_flags()->allow_user_segv_handler) {
+ return REAL(bsd_signal)(signum, handler);
+ }
+ return 0;
+}
+#else
INTERCEPTOR(void*, signal, int signum, void *handler) {
- if (!AsanInterceptsSignal(signum) || flags()->allow_user_segv_handler) {
+ if (!AsanInterceptsSignal(signum) ||
+ common_flags()->allow_user_segv_handler) {
return REAL(signal)(signum, handler);
}
return 0;
}
+#endif
INTERCEPTOR(int, sigaction, int signum, const struct sigaction *act,
struct sigaction *oldact) {
- if (!AsanInterceptsSignal(signum) || flags()->allow_user_segv_handler) {
+ if (!AsanInterceptsSignal(signum) ||
+ common_flags()->allow_user_segv_handler) {
return REAL(sigaction)(signum, act, oldact);
}
return 0;
}
+
+namespace __sanitizer {
+int real_sigaction(int signum, const void *act, void *oldact) {
+ return REAL(sigaction)(signum,
+ (struct sigaction *)act, (struct sigaction *)oldact);
+}
+} // namespace __sanitizer
+
#elif SANITIZER_POSIX
// We need to have defined REAL(sigaction) on posix systems.
DEFINE_REAL(int, sigaction, int signum, const struct sigaction *act,
@@ -284,10 +298,9 @@ static void MlockIsUnsupported() {
static bool printed = false;
if (printed) return;
printed = true;
- if (common_flags()->verbosity > 0) {
- Printf("INFO: AddressSanitizer ignores "
- "mlock/mlockall/munlock/munlockall\n");
- }
+ VPrintf(1,
+ "INFO: AddressSanitizer ignores "
+ "mlock/mlockall/munlock/munlockall\n");
}
INTERCEPTOR(int, mlock, const void *addr, uptr len) {
@@ -315,7 +328,7 @@ static inline int CharCmp(unsigned char c1, unsigned char c2) {
}
INTERCEPTOR(int, memcmp, const void *a1, const void *a2, uptr size) {
- if (!asan_inited) return internal_memcmp(a1, a2, size);
+ if (UNLIKELY(!asan_inited)) return internal_memcmp(a1, a2, size);
ENSURE_ASAN_INITED();
if (flags()->replace_intrin) {
if (flags()->strict_memcmp) {
@@ -342,24 +355,8 @@ INTERCEPTOR(int, memcmp, const void *a1, const void *a2, uptr size) {
return REAL(memcmp(a1, a2, size));
}
-#define MEMMOVE_BODY { \
- if (!asan_inited) return internal_memmove(to, from, size); \
- if (asan_init_is_running) { \
- return REAL(memmove)(to, from, size); \
- } \
- ENSURE_ASAN_INITED(); \
- if (flags()->replace_intrin) { \
- ASAN_READ_RANGE(from, size); \
- ASAN_WRITE_RANGE(to, size); \
- } \
- return internal_memmove(to, from, size); \
-}
-
-INTERCEPTOR(void*, memmove, void *to, const void *from, uptr size) MEMMOVE_BODY
-
-INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) {
-#if !SANITIZER_MAC
- if (!asan_inited) return internal_memcpy(to, from, size);
+void *__asan_memcpy(void *to, const void *from, uptr size) {
+ if (UNLIKELY(!asan_inited)) return internal_memcpy(to, from, size);
// memcpy is called during __asan_init() from the internals
// of printf(...).
if (asan_init_is_running) {
@@ -375,23 +372,11 @@ INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) {
ASAN_READ_RANGE(from, size);
ASAN_WRITE_RANGE(to, size);
}
- // Interposing of resolver functions is broken on Mac OS 10.7 and 10.8, so
- // calling REAL(memcpy) here leads to infinite recursion.
- // See also http://code.google.com/p/address-sanitizer/issues/detail?id=116.
- return internal_memcpy(to, from, size);
-#else
- // At least on 10.7 and 10.8 both memcpy() and memmove() are being replaced
- // with WRAP(memcpy). As a result, false positives are reported for memmove()
- // calls. If we just disable error reporting with
- // ASAN_OPTIONS=replace_intrin=0, memmove() is still replaced with
- // internal_memcpy(), which may lead to crashes, see
- // http://llvm.org/bugs/show_bug.cgi?id=16362.
- MEMMOVE_BODY
-#endif // !SANITIZER_MAC
+ return REAL(memcpy)(to, from, size);
}
-INTERCEPTOR(void*, memset, void *block, int c, uptr size) {
- if (!asan_inited) return internal_memset(block, c, size);
+void *__asan_memset(void *block, int c, uptr size) {
+ if (UNLIKELY(!asan_inited)) return internal_memset(block, c, size);
// memset is called inside Printf.
if (asan_init_is_running) {
return REAL(memset)(block, c, size);
@@ -403,8 +388,41 @@ INTERCEPTOR(void*, memset, void *block, int c, uptr size) {
return REAL(memset)(block, c, size);
}
+void *__asan_memmove(void *to, const void *from, uptr size) {
+ if (UNLIKELY(!asan_inited))
+ return internal_memmove(to, from, size);
+ ENSURE_ASAN_INITED();
+ if (flags()->replace_intrin) {
+ ASAN_READ_RANGE(from, size);
+ ASAN_WRITE_RANGE(to, size);
+ }
+ return internal_memmove(to, from, size);
+}
+
+INTERCEPTOR(void*, memmove, void *to, const void *from, uptr size) {
+ return __asan_memmove(to, from, size);
+}
+
+INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) {
+#if !SANITIZER_MAC
+ return __asan_memcpy(to, from, size);
+#else
+ // At least on 10.7 and 10.8 both memcpy() and memmove() are being replaced
+ // with WRAP(memcpy). As a result, false positives are reported for memmove()
+ // calls. If we just disable error reporting with
+ // ASAN_OPTIONS=replace_intrin=0, memmove() is still replaced with
+ // internal_memcpy(), which may lead to crashes, see
+ // http://llvm.org/bugs/show_bug.cgi?id=16362.
+ return __asan_memmove(to, from, size);
+#endif // !SANITIZER_MAC
+}
+
+INTERCEPTOR(void*, memset, void *block, int c, uptr size) {
+ return __asan_memset(block, c, size);
+}
+
INTERCEPTOR(char*, strchr, const char *str, int c) {
- if (!asan_inited) return internal_strchr(str, c);
+ if (UNLIKELY(!asan_inited)) return internal_strchr(str, c);
// strchr is called inside create_purgeable_zone() when MallocGuardEdges=1 is
// used.
if (asan_init_is_running) {
@@ -473,7 +491,7 @@ INTERCEPTOR(char*, strncat, char *to, const char *from, uptr size) {
INTERCEPTOR(char*, strcpy, char *to, const char *from) { // NOLINT
#if SANITIZER_MAC
- if (!asan_inited) return REAL(strcpy)(to, from); // NOLINT
+ if (UNLIKELY(!asan_inited)) return REAL(strcpy)(to, from); // NOLINT
#endif
// strcpy is called from malloc_default_purgeable_zone()
// in __asan::ReplaceSystemAlloc() on Mac.
@@ -492,7 +510,7 @@ INTERCEPTOR(char*, strcpy, char *to, const char *from) { // NOLINT
#if ASAN_INTERCEPT_STRDUP
INTERCEPTOR(char*, strdup, const char *s) {
- if (!asan_inited) return internal_strdup(s);
+ if (UNLIKELY(!asan_inited)) return internal_strdup(s);
ENSURE_ASAN_INITED();
uptr length = REAL(strlen)(s);
if (flags()->replace_str) {
@@ -506,7 +524,7 @@ INTERCEPTOR(char*, strdup, const char *s) {
#endif
INTERCEPTOR(uptr, strlen, const char *s) {
- if (!asan_inited) return internal_strlen(s);
+ if (UNLIKELY(!asan_inited)) return internal_strlen(s);
// strlen is called from malloc_default_purgeable_zone()
// in __asan::ReplaceSystemAlloc() on Mac.
if (asan_init_is_running) {
@@ -588,7 +606,7 @@ INTERCEPTOR(long, strtol, const char *nptr, // NOLINT
INTERCEPTOR(int, atoi, const char *nptr) {
#if SANITIZER_MAC
- if (!asan_inited) return REAL(atoi)(nptr);
+ if (UNLIKELY(!asan_inited)) return REAL(atoi)(nptr);
#endif
ENSURE_ASAN_INITED();
if (!flags()->replace_str) {
@@ -607,7 +625,7 @@ INTERCEPTOR(int, atoi, const char *nptr) {
INTERCEPTOR(long, atol, const char *nptr) { // NOLINT
#if SANITIZER_MAC
- if (!asan_inited) return REAL(atol)(nptr);
+ if (UNLIKELY(!asan_inited)) return REAL(atol)(nptr);
#endif
ENSURE_ASAN_INITED();
if (!flags()->replace_str) {
@@ -664,7 +682,7 @@ static void AtCxaAtexit(void *unused) {
INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg,
void *dso_handle) {
#if SANITIZER_MAC
- if (!asan_inited) return REAL(__cxa_atexit)(func, arg, dso_handle);
+ if (UNLIKELY(!asan_inited)) return REAL(__cxa_atexit)(func, arg, dso_handle);
#endif
ENSURE_ASAN_INITED();
int res = REAL(__cxa_atexit)(func, arg, dso_handle);
@@ -705,7 +723,7 @@ void InitializeAsanInterceptors() {
static bool was_called_once;
CHECK(was_called_once == false);
was_called_once = true;
- SANITIZER_COMMON_INTERCEPTORS_INIT;
+ InitializeCommonInterceptors();
// Intercept mem* functions.
ASAN_INTERCEPT_FUNC(memcmp);
@@ -753,8 +771,12 @@ void InitializeAsanInterceptors() {
ASAN_INTERCEPT_FUNC(longjmp);
#if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION
ASAN_INTERCEPT_FUNC(sigaction);
+#if SANITIZER_ANDROID
+ ASAN_INTERCEPT_FUNC(bsd_signal);
+#else
ASAN_INTERCEPT_FUNC(signal);
#endif
+#endif
#if ASAN_INTERCEPT_SWAPCONTEXT
ASAN_INTERCEPT_FUNC(swapcontext);
#endif
@@ -785,9 +807,7 @@ void InitializeAsanInterceptors() {
InitializeWindowsInterceptors();
#endif
- if (common_flags()->verbosity > 0) {
- Report("AddressSanitizer: libc interceptors initialized\n");
- }
+ VReport(1, "AddressSanitizer: libc interceptors initialized\n");
}
} // namespace __asan
diff --git a/libsanitizer/asan/asan_interceptors.h b/libsanitizer/asan/asan_interceptors.h
index cae4c7f01258..af7cdc8a916c 100644
--- a/libsanitizer/asan/asan_interceptors.h
+++ b/libsanitizer/asan/asan_interceptors.h
@@ -13,7 +13,68 @@
#define ASAN_INTERCEPTORS_H
#include "asan_internal.h"
-#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_interception.h"
+#include "sanitizer_common/sanitizer_platform_interceptors.h"
+
+// Use macro to describe if specific function should be
+// intercepted on a given platform.
+#if !SANITIZER_WINDOWS
+# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 1
+# define ASAN_INTERCEPT__LONGJMP 1
+# define ASAN_INTERCEPT_STRDUP 1
+# define ASAN_INTERCEPT_INDEX 1
+# define ASAN_INTERCEPT_PTHREAD_CREATE 1
+# define ASAN_INTERCEPT_MLOCKX 1
+#else
+# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 0
+# define ASAN_INTERCEPT__LONGJMP 0
+# define ASAN_INTERCEPT_STRDUP 0
+# define ASAN_INTERCEPT_INDEX 0
+# define ASAN_INTERCEPT_PTHREAD_CREATE 0
+# define ASAN_INTERCEPT_MLOCKX 0
+#endif
+
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
+# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 1
+#else
+# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0
+#endif
+
+#if !SANITIZER_MAC
+# define ASAN_INTERCEPT_STRNLEN 1
+#else
+# define ASAN_INTERCEPT_STRNLEN 0
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+# define ASAN_INTERCEPT_SWAPCONTEXT 1
+#else
+# define ASAN_INTERCEPT_SWAPCONTEXT 0
+#endif
+
+#if !SANITIZER_WINDOWS
+# define ASAN_INTERCEPT_SIGNAL_AND_SIGACTION 1
+#else
+# define ASAN_INTERCEPT_SIGNAL_AND_SIGACTION 0
+#endif
+
+#if !SANITIZER_WINDOWS
+# define ASAN_INTERCEPT_SIGLONGJMP 1
+#else
+# define ASAN_INTERCEPT_SIGLONGJMP 0
+#endif
+
+#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS
+# define ASAN_INTERCEPT___CXA_THROW 1
+#else
+# define ASAN_INTERCEPT___CXA_THROW 0
+#endif
+
+#if !SANITIZER_WINDOWS
+# define ASAN_INTERCEPT___CXA_ATEXIT 1
+#else
+# define ASAN_INTERCEPT___CXA_ATEXIT 0
+#endif
DECLARE_REAL(int, memcmp, const void *a1, const void *a2, uptr size)
DECLARE_REAL(void*, memcpy, void *to, const void *from, uptr size)
@@ -31,6 +92,13 @@ namespace __asan {
void InitializeAsanInterceptors();
+#define ENSURE_ASAN_INITED() do { \
+ CHECK(!asan_init_is_running); \
+ if (UNLIKELY(!asan_inited)) { \
+ AsanInitFromRtl(); \
+ } \
+} while (0)
+
} // namespace __asan
#endif // ASAN_INTERCEPTORS_H
diff --git a/libsanitizer/asan/asan_interface_internal.h b/libsanitizer/asan/asan_interface_internal.h
index 7deed9f46073..1940477f2476 100644
--- a/libsanitizer/asan/asan_interface_internal.h
+++ b/libsanitizer/asan/asan_interface_internal.h
@@ -20,7 +20,7 @@ using __sanitizer::uptr;
extern "C" {
// This function should be called at the very beginning of the process,
// before any instrumented code is executed and before any call to malloc.
- // Everytime the asan ABI changes we also change the version number in this
+ // Every time the asan ABI changes we also change the version number in this
// name. Objects build with incompatible asan ABI version
// will not link with run-time.
// Changes between ABI versions:
@@ -75,7 +75,7 @@ extern "C" {
void __asan_unpoison_memory_region(void const volatile *addr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
- bool __asan_address_is_poisoned(void const volatile *addr);
+ int __asan_address_is_poisoned(void const volatile *addr);
SANITIZER_INTERFACE_ATTRIBUTE
uptr __asan_region_is_poisoned(uptr beg, uptr size);
@@ -85,7 +85,7 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_report_error(uptr pc, uptr bp, uptr sp,
- uptr addr, bool is_write, uptr access_size);
+ uptr addr, int is_write, uptr access_size);
SANITIZER_INTERFACE_ATTRIBUTE
int __asan_set_error_exit_code(int exit_code);
@@ -97,14 +97,10 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ void __asan_on_error();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- /* OPTIONAL */ bool __asan_symbolize(const void *pc, char *out_buffer,
- int out_size);
-
SANITIZER_INTERFACE_ATTRIBUTE
uptr __asan_get_estimated_allocated_size(uptr size);
- SANITIZER_INTERFACE_ATTRIBUTE bool __asan_get_ownership(const void *p);
+ SANITIZER_INTERFACE_ATTRIBUTE int __asan_get_ownership(const void *p);
SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_allocated_size(const void *p);
SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_current_allocated_bytes();
SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_heap_size();
@@ -123,6 +119,29 @@ extern "C" {
// Global flag, copy of ASAN_OPTIONS=detect_stack_use_after_return
SANITIZER_INTERFACE_ATTRIBUTE
extern int __asan_option_detect_stack_use_after_return;
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ extern uptr *__asan_test_only_reported_buggy_pointer;
+
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_load1(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_load2(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_load4(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_load8(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_load16(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_store1(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_store2(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_store4(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_store8(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_store16(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_loadN(uptr p, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_storeN(uptr p, uptr size);
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void* __asan_memcpy(void *dst, const void *src, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void* __asan_memset(void *s, int c, uptr n);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void* __asan_memmove(void* dest, const void* src, uptr n);
} // extern "C"
#endif // ASAN_INTERFACE_INTERNAL_H
diff --git a/libsanitizer/asan/asan_internal.h b/libsanitizer/asan/asan_internal.h
index ede273a71701..d56943a08389 100644
--- a/libsanitizer/asan/asan_internal.h
+++ b/libsanitizer/asan/asan_internal.h
@@ -28,26 +28,11 @@
// Build-time configuration options.
-// If set, asan will install its own SEGV signal handler.
-#ifndef ASAN_NEEDS_SEGV
-# if SANITIZER_ANDROID == 1
-# define ASAN_NEEDS_SEGV 0
-# else
-# define ASAN_NEEDS_SEGV 1
-# endif
-#endif
-
// If set, asan will intercept C++ exception api call(s).
#ifndef ASAN_HAS_EXCEPTIONS
# define ASAN_HAS_EXCEPTIONS 1
#endif
-// If set, asan uses the values of SHADOW_SCALE and SHADOW_OFFSET
-// provided by the instrumented objects. Otherwise constants are used.
-#ifndef ASAN_FLEXIBLE_MAPPING_AND_OFFSET
-# define ASAN_FLEXIBLE_MAPPING_AND_OFFSET 0
-#endif
-
// If set, values like allocator chunk size, as well as defaults for some flags
// will be changed towards less memory overhead.
#ifndef ASAN_LOW_MEMORY
@@ -62,32 +47,41 @@
# define ASAN_USE_PREINIT_ARRAY (SANITIZER_LINUX && !SANITIZER_ANDROID)
#endif
+#ifndef ASAN_DYNAMIC
+# ifdef PIC
+# define ASAN_DYNAMIC 1
+# else
+# define ASAN_DYNAMIC 0
+# endif
+#endif
+
// All internal functions in asan reside inside the __asan namespace
// to avoid namespace collisions with the user programs.
-// Seperate namespace also makes it simpler to distinguish the asan run-time
+// Separate namespace also makes it simpler to distinguish the asan run-time
// functions from the instrumented user code in a profile.
namespace __asan {
class AsanThread;
using __sanitizer::StackTrace;
+void AsanInitFromRtl();
+
// asan_rtl.cc
void NORETURN ShowStatsAndAbort();
-void ReplaceOperatorsNewAndDelete();
// asan_malloc_linux.cc / asan_malloc_mac.cc
void ReplaceSystemMalloc();
// asan_linux.cc / asan_mac.cc / asan_win.cc
void *AsanDoesNotSupportStaticLinkage();
+void AsanCheckDynamicRTPrereqs();
+void AsanCheckIncompatibleRT();
void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp);
+void AsanOnSIGSEGV(int, void *siginfo, void *context);
void MaybeReexec();
bool AsanInterceptsSignal(int signum);
-void SetAlternateSignalStack();
-void UnsetAlternateSignalStack();
-void InstallSignalHandlers();
void ReadContextStack(void *context, uptr *stack, uptr *ssize);
void AsanPlatformThreadInit();
void StopInitOrderChecking();
@@ -100,7 +94,9 @@ void PlatformTSDDtor(void *tsd);
void AppendToErrorMessageBuffer(const char *buffer);
-// Platfrom-specific options.
+void ParseExtraActivationFlags();
+
+// Platform-specific options.
#if SANITIZER_MAC
bool PlatformHasDifferentMemcpyAndMemmove();
# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE \
diff --git a/libsanitizer/asan/asan_linux.cc b/libsanitizer/asan/asan_linux.cc
index 0692eb1f455d..d893b2334d50 100644
--- a/libsanitizer/asan/asan_linux.cc
+++ b/libsanitizer/asan/asan_linux.cc
@@ -11,11 +11,12 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_LINUX
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_thread.h"
+#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_procmaps.h"
@@ -30,12 +31,41 @@
#include <unistd.h>
#include <unwind.h>
-#if !SANITIZER_ANDROID
-// FIXME: where to get ucontext on Android?
-#include <sys/ucontext.h>
+#if SANITIZER_FREEBSD
+#include <sys/link_elf.h>
#endif
+#if SANITIZER_ANDROID || SANITIZER_FREEBSD
+#include <ucontext.h>
extern "C" void* _DYNAMIC;
+#else
+#include <sys/ucontext.h>
+#include <dlfcn.h>
+#include <link.h>
+#endif
+
+// x86_64 FreeBSD 9.2 and older define 64-bit register names in both 64-bit
+// and 32-bit modes.
+#if SANITIZER_FREEBSD
+#include <sys/param.h>
+# if __FreeBSD_version <= 902001 // v9.2
+# define mc_eip mc_rip
+# define mc_ebp mc_rbp
+# define mc_esp mc_rsp
+# endif
+#endif
+
+typedef enum {
+ ASAN_RT_VERSION_UNDEFINED = 0,
+ ASAN_RT_VERSION_DYNAMIC,
+ ASAN_RT_VERSION_STATIC,
+} asan_rt_version_t;
+
+// FIXME: perhaps also store abi version here?
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+asan_rt_version_t __asan_rt_version;
+}
namespace __asan {
@@ -48,38 +78,115 @@ void *AsanDoesNotSupportStaticLinkage() {
return &_DYNAMIC; // defined in link.h
}
-void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
#if SANITIZER_ANDROID
- *pc = *sp = *bp = 0;
-#elif defined(__arm__)
+// FIXME: should we do anything for Android?
+void AsanCheckDynamicRTPrereqs() {}
+void AsanCheckIncompatibleRT() {}
+#else
+static int FindFirstDSOCallback(struct dl_phdr_info *info, size_t size,
+ void *data) {
+ // Continue until the first dynamic library is found
+ if (!info->dlpi_name || info->dlpi_name[0] == 0)
+ return 0;
+
+ *(const char **)data = info->dlpi_name;
+ return 1;
+}
+
+static bool IsDynamicRTName(const char *libname) {
+ return internal_strstr(libname, "libclang_rt.asan") ||
+ internal_strstr(libname, "libasan.so");
+}
+
+static void ReportIncompatibleRT() {
+ Report("Your application is linked against incompatible ASan runtimes.\n");
+ Die();
+}
+
+void AsanCheckDynamicRTPrereqs() {
+ // Ensure that dynamic RT is the first DSO in the list
+ const char *first_dso_name = 0;
+ dl_iterate_phdr(FindFirstDSOCallback, &first_dso_name);
+ if (first_dso_name && !IsDynamicRTName(first_dso_name)) {
+ Report("ASan runtime does not come first in initial library list; "
+ "you should either link runtime to your application or "
+ "manually preload it with LD_PRELOAD.\n");
+ Die();
+ }
+}
+
+void AsanCheckIncompatibleRT() {
+ if (ASAN_DYNAMIC) {
+ if (__asan_rt_version == ASAN_RT_VERSION_UNDEFINED) {
+ __asan_rt_version = ASAN_RT_VERSION_DYNAMIC;
+ } else if (__asan_rt_version != ASAN_RT_VERSION_DYNAMIC) {
+ ReportIncompatibleRT();
+ }
+ } else {
+ if (__asan_rt_version == ASAN_RT_VERSION_UNDEFINED) {
+ // Ensure that dynamic runtime is not present. We should detect it
+ // as early as possible, otherwise ASan interceptors could bind to
+ // the functions in dynamic ASan runtime instead of the functions in
+ // system libraries, causing crashes later in ASan initialization.
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
+ char filename[128];
+ while (proc_maps.Next(0, 0, 0, filename, sizeof(filename), 0)) {
+ if (IsDynamicRTName(filename)) {
+ Report("Your application is linked against "
+ "incompatible ASan runtimes.\n");
+ Die();
+ }
+ }
+ __asan_rt_version = ASAN_RT_VERSION_STATIC;
+ } else if (__asan_rt_version != ASAN_RT_VERSION_STATIC) {
+ ReportIncompatibleRT();
+ }
+ }
+}
+#endif // SANITIZER_ANDROID
+
+void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
+#if defined(__arm__)
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.arm_pc;
*bp = ucontext->uc_mcontext.arm_fp;
*sp = ucontext->uc_mcontext.arm_sp;
-# elif defined(__hppa__)
+#elif defined(__aarch64__)
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.pc;
+ *bp = ucontext->uc_mcontext.regs[29];
+ *sp = ucontext->uc_mcontext.sp;
+#elif defined(__hppa__)
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.sc_iaoq[0];
/* GCC uses %r3 whenever a frame pointer is needed. */
*bp = ucontext->uc_mcontext.sc_gr[3];
*sp = ucontext->uc_mcontext.sc_gr[30];
-# elif defined(__x86_64__)
+#elif defined(__x86_64__)
+# if SANITIZER_FREEBSD
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.mc_rip;
+ *bp = ucontext->uc_mcontext.mc_rbp;
+ *sp = ucontext->uc_mcontext.mc_rsp;
+# else
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.gregs[REG_RIP];
*bp = ucontext->uc_mcontext.gregs[REG_RBP];
*sp = ucontext->uc_mcontext.gregs[REG_RSP];
-# elif defined(__i386__)
+# endif
+#elif defined(__i386__)
+# if SANITIZER_FREEBSD
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.mc_eip;
+ *bp = ucontext->uc_mcontext.mc_ebp;
+ *sp = ucontext->uc_mcontext.mc_esp;
+# else
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.gregs[REG_EIP];
*bp = ucontext->uc_mcontext.gregs[REG_EBP];
*sp = ucontext->uc_mcontext.gregs[REG_ESP];
-# elif defined(__powerpc__) || defined(__powerpc64__)
- ucontext_t *ucontext = (ucontext_t*)context;
- *pc = ucontext->uc_mcontext.regs->nip;
- *sp = ucontext->uc_mcontext.regs->gpr[PT_R1];
- // The powerpc{,64}-linux ABIs do not specify r31 as the frame
- // pointer, but GCC always uses r31 when we need a frame pointer.
- *bp = ucontext->uc_mcontext.regs->gpr[PT_R31];
-# elif defined(__sparc__)
+# endif
+#elif defined(__sparc__)
ucontext_t *ucontext = (ucontext_t*)context;
uptr *stk_ptr;
# if defined (__arch64__)
@@ -93,7 +200,7 @@ void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
stk_ptr = (uptr *) *sp;
*bp = stk_ptr[15];
# endif
-# elif defined(__mips__)
+#elif defined(__mips__)
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.gregs[31];
*bp = ucontext->uc_mcontext.gregs[30];
@@ -104,7 +211,7 @@ void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
}
bool AsanInterceptsSignal(int signum) {
- return signum == SIGSEGV && flags()->handle_segv;
+ return signum == SIGSEGV && common_flags()->handle_segv;
}
void AsanPlatformThreadInit() {
@@ -125,4 +232,4 @@ void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
} // namespace __asan
-#endif // SANITIZER_LINUX
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
diff --git a/libsanitizer/asan/asan_mac.cc b/libsanitizer/asan/asan_mac.cc
index 8d01843afafb..4a295e0e355a 100644
--- a/libsanitizer/asan/asan_mac.cc
+++ b/libsanitizer/asan/asan_mac.cc
@@ -15,12 +15,12 @@
#include "asan_interceptors.h"
#include "asan_internal.h"
-#include "asan_mac.h"
#include "asan_mapping.h"
#include "asan_stack.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_mac.h"
#include <crt_externs.h> // for _NSGetArgv
#include <dlfcn.h> // for dladdr()
@@ -51,43 +51,6 @@ void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
# endif // SANITIZER_WORDSIZE
}
-MacosVersion cached_macos_version = MACOS_VERSION_UNINITIALIZED;
-
-MacosVersion GetMacosVersionInternal() {
- int mib[2] = { CTL_KERN, KERN_OSRELEASE };
- char version[100];
- uptr len = 0, maxlen = sizeof(version) / sizeof(version[0]);
- for (uptr i = 0; i < maxlen; i++) version[i] = '\0';
- // Get the version length.
- CHECK_NE(sysctl(mib, 2, 0, &len, 0, 0), -1);
- CHECK_LT(len, maxlen);
- CHECK_NE(sysctl(mib, 2, version, &len, 0, 0), -1);
- switch (version[0]) {
- case '9': return MACOS_VERSION_LEOPARD;
- case '1': {
- switch (version[1]) {
- case '0': return MACOS_VERSION_SNOW_LEOPARD;
- case '1': return MACOS_VERSION_LION;
- case '2': return MACOS_VERSION_MOUNTAIN_LION;
- case '3': return MACOS_VERSION_MAVERICKS;
- default: return MACOS_VERSION_UNKNOWN;
- }
- }
- default: return MACOS_VERSION_UNKNOWN;
- }
-}
-
-MacosVersion GetMacosVersion() {
- atomic_uint32_t *cache =
- reinterpret_cast<atomic_uint32_t*>(&cached_macos_version);
- MacosVersion result =
- static_cast<MacosVersion>(atomic_load(cache, memory_order_acquire));
- if (result == MACOS_VERSION_UNINITIALIZED) {
- result = GetMacosVersionInternal();
- atomic_store(cache, result, memory_order_release);
- }
- return result;
-}
bool PlatformHasDifferentMemcpyAndMemmove() {
// On OS X 10.7 memcpy() and memmove() are both resolved
@@ -172,12 +135,10 @@ void MaybeReexec() {
// Set DYLD_INSERT_LIBRARIES equal to the runtime dylib name.
setenv(kDyldInsertLibraries, info.dli_fname, /*overwrite*/0);
}
- if (common_flags()->verbosity >= 1) {
- Report("exec()-ing the program with\n");
- Report("%s=%s\n", kDyldInsertLibraries, new_env);
- Report("to enable ASan wrappers.\n");
- Report("Set ASAN_OPTIONS=allow_reexec=0 to disable this.\n");
- }
+ VReport(1, "exec()-ing the program with\n");
+ VReport(1, "%s=%s\n", kDyldInsertLibraries, new_env);
+ VReport(1, "to enable ASan wrappers.\n");
+ VReport(1, "Set ASAN_OPTIONS=allow_reexec=0 to disable this.\n");
execv(program_name, *_NSGetArgv());
} else {
// DYLD_INSERT_LIBRARIES is set and contains the runtime library.
@@ -236,8 +197,15 @@ void *AsanDoesNotSupportStaticLinkage() {
return 0;
}
+// No-op. Mac does not support static linkage anyway.
+void AsanCheckDynamicRTPrereqs() {}
+
+// No-op. Mac does not support static linkage anyway.
+void AsanCheckIncompatibleRT() {}
+
bool AsanInterceptsSignal(int signum) {
- return (signum == SIGSEGV || signum == SIGBUS) && flags()->handle_segv;
+ return (signum == SIGSEGV || signum == SIGBUS) &&
+ common_flags()->handle_segv;
}
void AsanPlatformThreadInit() {
@@ -309,11 +277,10 @@ extern "C"
void asan_dispatch_call_block_and_release(void *block) {
GET_STACK_TRACE_THREAD;
asan_block_context_t *context = (asan_block_context_t*)block;
- if (common_flags()->verbosity >= 2) {
- Report("asan_dispatch_call_block_and_release(): "
- "context: %p, pthread_self: %p\n",
- block, pthread_self());
- }
+ VReport(2,
+ "asan_dispatch_call_block_and_release(): "
+ "context: %p, pthread_self: %p\n",
+ block, pthread_self());
asan_register_worker_thread(context->parent_tid, &stack);
// Call the original dispatcher for the block.
context->func(context->block);
@@ -347,10 +314,10 @@ asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func,
if (common_flags()->verbosity >= 2) { \
Report(#dispatch_x_f "(): context: %p, pthread_self: %p\n", \
asan_ctxt, pthread_self()); \
- PRINT_CURRENT_STACK(); \
- } \
- return REAL(dispatch_x_f)(dq, (void*)asan_ctxt, \
- asan_dispatch_call_block_and_release); \
+ PRINT_CURRENT_STACK(); \
+ } \
+ return REAL(dispatch_x_f)(dq, (void*)asan_ctxt, \
+ asan_dispatch_call_block_and_release); \
}
INTERCEPT_DISPATCH_X_F_3(dispatch_async_f)
@@ -386,7 +353,6 @@ INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
#if !defined(MISSING_BLOCKS_SUPPORT)
extern "C" {
-// FIXME: consolidate these declarations with asan_intercepted_functions.h.
void dispatch_async(dispatch_queue_t dq, void(^work)(void));
void dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq,
void(^work)(void));
diff --git a/libsanitizer/asan/asan_mac.h b/libsanitizer/asan/asan_mac.h
deleted file mode 100644
index 2d1d4b0bfb33..000000000000
--- a/libsanitizer/asan/asan_mac.h
+++ /dev/null
@@ -1,57 +0,0 @@
-//===-- asan_mac.h ----------------------------------------------*- C++ -*-===//
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of AddressSanitizer, an address sanity checker.
-//
-// Mac-specific ASan definitions.
-//===----------------------------------------------------------------------===//
-#ifndef ASAN_MAC_H
-#define ASAN_MAC_H
-
-// CF_RC_BITS, the layout of CFRuntimeBase and __CFStrIsConstant are internal
-// and subject to change in further CoreFoundation versions. Apple does not
-// guarantee any binary compatibility from release to release.
-
-// See http://opensource.apple.com/source/CF/CF-635.15/CFInternal.h
-#if defined(__BIG_ENDIAN__)
-#define CF_RC_BITS 0
-#endif
-
-#if defined(__LITTLE_ENDIAN__)
-#define CF_RC_BITS 3
-#endif
-
-// See http://opensource.apple.com/source/CF/CF-635.15/CFRuntime.h
-typedef struct __CFRuntimeBase {
- uptr _cfisa;
- u8 _cfinfo[4];
-#if __LP64__
- u32 _rc;
-#endif
-} CFRuntimeBase;
-
-enum MacosVersion {
- MACOS_VERSION_UNINITIALIZED = 0,
- MACOS_VERSION_UNKNOWN,
- MACOS_VERSION_LEOPARD,
- MACOS_VERSION_SNOW_LEOPARD,
- MACOS_VERSION_LION,
- MACOS_VERSION_MOUNTAIN_LION,
- MACOS_VERSION_MAVERICKS
-};
-
-// Used by asan_malloc_mac.cc and asan_mac.cc
-extern "C" void __CFInitialize();
-
-namespace __asan {
-
-MacosVersion GetMacosVersion();
-void MaybeReplaceCFAllocator();
-
-} // namespace __asan
-
-#endif // ASAN_MAC_H
diff --git a/libsanitizer/asan/asan_malloc_linux.cc b/libsanitizer/asan/asan_malloc_linux.cc
index e3495cb09006..ba908e322d93 100644
--- a/libsanitizer/asan/asan_malloc_linux.cc
+++ b/libsanitizer/asan/asan_malloc_linux.cc
@@ -13,8 +13,9 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_LINUX
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "asan_allocator.h"
#include "asan_interceptors.h"
#include "asan_internal.h"
@@ -74,7 +75,7 @@ INTERCEPTOR(void*, malloc, uptr size) {
}
INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
- if (!asan_inited) {
+ if (UNLIKELY(!asan_inited)) {
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
const uptr kCallocPoolSize = 1024;
static uptr calloc_memory_for_dlsym[kCallocPoolSize];
@@ -99,8 +100,12 @@ INTERCEPTOR(void*, memalign, uptr boundary, uptr size) {
return asan_memalign(boundary, size, &stack, FROM_MALLOC);
}
-INTERCEPTOR(void*, __libc_memalign, uptr align, uptr s)
- ALIAS("memalign");
+INTERCEPTOR(void*, __libc_memalign, uptr boundary, uptr size) {
+ GET_STACK_TRACE_MALLOC;
+ void *res = asan_memalign(boundary, size, &stack, FROM_MALLOC);
+ DTLS_on_libc_memalign(res, size * boundary);
+ return res;
+}
INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
GET_CURRENT_PC_BP_SP;
@@ -146,4 +151,4 @@ INTERCEPTOR(void, malloc_stats, void) {
__asan_print_accumulated_stats();
}
-#endif // SANITIZER_LINUX
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
diff --git a/libsanitizer/asan/asan_malloc_mac.cc b/libsanitizer/asan/asan_malloc_mac.cc
index 342e806e3b67..6a93ce1e8082 100644
--- a/libsanitizer/asan/asan_malloc_mac.cc
+++ b/libsanitizer/asan/asan_malloc_mac.cc
@@ -22,10 +22,10 @@
#include "asan_allocator.h"
#include "asan_interceptors.h"
#include "asan_internal.h"
-#include "asan_mac.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_stats.h"
+#include "sanitizer_common/sanitizer_mac.h"
// Similar code is used in Google Perftools,
// http://code.google.com/p/google-perftools.
@@ -39,7 +39,7 @@ static malloc_zone_t asan_zone;
INTERCEPTOR(malloc_zone_t *, malloc_create_zone,
vm_size_t start_size, unsigned zone_flags) {
- if (!asan_inited) __asan_init();
+ ENSURE_ASAN_INITED();
GET_STACK_TRACE_MALLOC;
uptr page_size = GetPageSizeCached();
uptr allocated_size = RoundUpTo(sizeof(asan_zone), page_size);
@@ -58,34 +58,34 @@ INTERCEPTOR(malloc_zone_t *, malloc_create_zone,
}
INTERCEPTOR(malloc_zone_t *, malloc_default_zone, void) {
- if (!asan_inited) __asan_init();
+ ENSURE_ASAN_INITED();
return &asan_zone;
}
INTERCEPTOR(malloc_zone_t *, malloc_default_purgeable_zone, void) {
// FIXME: ASan should support purgeable allocations.
// https://code.google.com/p/address-sanitizer/issues/detail?id=139
- if (!asan_inited) __asan_init();
+ ENSURE_ASAN_INITED();
return &asan_zone;
}
INTERCEPTOR(void, malloc_make_purgeable, void *ptr) {
// FIXME: ASan should support purgeable allocations. Ignoring them is fine
// for now.
- if (!asan_inited) __asan_init();
+ ENSURE_ASAN_INITED();
}
INTERCEPTOR(int, malloc_make_nonpurgeable, void *ptr) {
// FIXME: ASan should support purgeable allocations. Ignoring them is fine
// for now.
- if (!asan_inited) __asan_init();
+ ENSURE_ASAN_INITED();
// Must return 0 if the contents were not purged since the last call to
// malloc_make_purgeable().
return 0;
}
INTERCEPTOR(void, malloc_set_zone_name, malloc_zone_t *zone, const char *name) {
- if (!asan_inited) __asan_init();
+ ENSURE_ASAN_INITED();
// Allocate |strlen("asan-") + 1 + internal_strlen(name)| bytes.
size_t buflen = 6 + (name ? internal_strlen(name) : 0);
InternalScopedBuffer<char> new_name(buflen);
@@ -100,44 +100,44 @@ INTERCEPTOR(void, malloc_set_zone_name, malloc_zone_t *zone, const char *name) {
}
INTERCEPTOR(void *, malloc, size_t size) {
- if (!asan_inited) __asan_init();
+ ENSURE_ASAN_INITED();
GET_STACK_TRACE_MALLOC;
void *res = asan_malloc(size, &stack);
return res;
}
INTERCEPTOR(void, free, void *ptr) {
- if (!asan_inited) __asan_init();
+ ENSURE_ASAN_INITED();
if (!ptr) return;
GET_STACK_TRACE_FREE;
asan_free(ptr, &stack, FROM_MALLOC);
}
INTERCEPTOR(void *, realloc, void *ptr, size_t size) {
- if (!asan_inited) __asan_init();
+ ENSURE_ASAN_INITED();
GET_STACK_TRACE_MALLOC;
return asan_realloc(ptr, size, &stack);
}
INTERCEPTOR(void *, calloc, size_t nmemb, size_t size) {
- if (!asan_inited) __asan_init();
+ ENSURE_ASAN_INITED();
GET_STACK_TRACE_MALLOC;
return asan_calloc(nmemb, size, &stack);
}
INTERCEPTOR(void *, valloc, size_t size) {
- if (!asan_inited) __asan_init();
+ ENSURE_ASAN_INITED();
GET_STACK_TRACE_MALLOC;
return asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC);
}
INTERCEPTOR(size_t, malloc_good_size, size_t size) {
- if (!asan_inited) __asan_init();
+ ENSURE_ASAN_INITED();
return asan_zone.introspect->good_size(&asan_zone, size);
}
INTERCEPTOR(int, posix_memalign, void **memptr, size_t alignment, size_t size) {
- if (!asan_inited) __asan_init();
+ ENSURE_ASAN_INITED();
CHECK(memptr);
GET_STACK_TRACE_MALLOC;
void *result = asan_memalign(alignment, size, &stack, FROM_MALLOC);
@@ -157,7 +157,7 @@ size_t mz_size(malloc_zone_t* zone, const void* ptr) {
}
void *mz_malloc(malloc_zone_t *zone, size_t size) {
- if (!asan_inited) {
+ if (UNLIKELY(!asan_inited)) {
CHECK(system_malloc_zone);
return malloc_zone_malloc(system_malloc_zone, size);
}
@@ -166,7 +166,7 @@ void *mz_malloc(malloc_zone_t *zone, size_t size) {
}
void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
- if (!asan_inited) {
+ if (UNLIKELY(!asan_inited)) {
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
const size_t kCallocPoolSize = 1024;
static uptr calloc_memory_for_dlsym[kCallocPoolSize];
@@ -182,7 +182,7 @@ void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
}
void *mz_valloc(malloc_zone_t *zone, size_t size) {
- if (!asan_inited) {
+ if (UNLIKELY(!asan_inited)) {
CHECK(system_malloc_zone);
return malloc_zone_valloc(system_malloc_zone, size);
}
@@ -240,7 +240,7 @@ void mz_destroy(malloc_zone_t* zone) {
#if defined(MAC_OS_X_VERSION_10_6) && \
MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6
void *mz_memalign(malloc_zone_t *zone, size_t align, size_t size) {
- if (!asan_inited) {
+ if (UNLIKELY(!asan_inited)) {
CHECK(system_malloc_zone);
return malloc_zone_memalign(system_malloc_zone, align, size);
}
diff --git a/libsanitizer/asan/asan_malloc_win.cc b/libsanitizer/asan/asan_malloc_win.cc
index 1f2495ffc509..8463d5ef2e94 100644
--- a/libsanitizer/asan/asan_malloc_win.cc
+++ b/libsanitizer/asan/asan_malloc_win.cc
@@ -17,7 +17,7 @@
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_stack.h"
-#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_interception.h"
#include <stddef.h>
@@ -101,6 +101,21 @@ size_t _msize(void *ptr) {
return asan_malloc_usable_size(ptr, pc, bp);
}
+SANITIZER_INTERFACE_ATTRIBUTE
+void *_expand(void *memblock, size_t size) {
+ // _expand is used in realloc-like functions to resize the buffer if possible.
+ // We don't want memory to stand still while resizing buffers, so return 0.
+ return 0;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *_expand_dbg(void *memblock, size_t size) {
+ return 0;
+}
+
+// TODO(timurrrr): Might want to add support for _aligned_* allocation
+// functions to detect a bit more bugs. Those functions seem to wrap malloc().
+
int _CrtDbgReport(int, const char*, int,
const char*, const char*, ...) {
ShowStatsAndAbort();
diff --git a/libsanitizer/asan/asan_mapping.h b/libsanitizer/asan/asan_mapping.h
index 1e37bc26e945..86e391f5968d 100644
--- a/libsanitizer/asan/asan_mapping.h
+++ b/libsanitizer/asan/asan_mapping.h
@@ -41,54 +41,81 @@
// || `[0x00007fff8000, 0x00008fff6fff]` || LowShadow ||
// || `[0x000000000000, 0x00007fff7fff]` || LowMem ||
//
-// Default Linux/i386 mapping:
+// Default Linux/i386 mapping on x86_64 machine:
// || `[0x40000000, 0xffffffff]` || HighMem ||
// || `[0x28000000, 0x3fffffff]` || HighShadow ||
// || `[0x24000000, 0x27ffffff]` || ShadowGap ||
// || `[0x20000000, 0x23ffffff]` || LowShadow ||
// || `[0x00000000, 0x1fffffff]` || LowMem ||
//
+// Default Linux/i386 mapping on i386 machine
+// (addresses starting with 0xc0000000 are reserved
+// for kernel and thus not sanitized):
+// || `[0x38000000, 0xbfffffff]` || HighMem ||
+// || `[0x27000000, 0x37ffffff]` || HighShadow ||
+// || `[0x24000000, 0x26ffffff]` || ShadowGap ||
+// || `[0x20000000, 0x23ffffff]` || LowShadow ||
+// || `[0x00000000, 0x1fffffff]` || LowMem ||
+//
// Default Linux/MIPS mapping:
// || `[0x2aaa8000, 0xffffffff]` || HighMem ||
// || `[0x0fffd000, 0x2aaa7fff]` || HighShadow ||
// || `[0x0bffd000, 0x0fffcfff]` || ShadowGap ||
// || `[0x0aaa8000, 0x0bffcfff]` || LowShadow ||
// || `[0x00000000, 0x0aaa7fff]` || LowMem ||
+//
+// Shadow mapping on FreeBSD/x86-64 with SHADOW_OFFSET == 0x400000000000:
+// || `[0x500000000000, 0x7fffffffffff]` || HighMem ||
+// || `[0x4a0000000000, 0x4fffffffffff]` || HighShadow ||
+// || `[0x480000000000, 0x49ffffffffff]` || ShadowGap ||
+// || `[0x400000000000, 0x47ffffffffff]` || LowShadow ||
+// || `[0x000000000000, 0x3fffffffffff]` || LowMem ||
+//
+// Shadow mapping on FreeBSD/i386 with SHADOW_OFFSET == 0x40000000:
+// || `[0x60000000, 0xffffffff]` || HighMem ||
+// || `[0x4c000000, 0x5fffffff]` || HighShadow ||
+// || `[0x48000000, 0x4bffffff]` || ShadowGap ||
+// || `[0x40000000, 0x47ffffff]` || LowShadow ||
+// || `[0x00000000, 0x3fffffff]` || LowMem ||
static const u64 kDefaultShadowScale = 3;
-static const u64 kDefaultShadowOffset32 = 1ULL << 29;
+static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000
+static const u64 kIosShadowOffset32 = 1ULL << 30; // 0x40000000
static const u64 kDefaultShadowOffset64 = 1ULL << 44;
static const u64 kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G.
-static const u64 kPPC64_ShadowOffset64 = 1ULL << 41;
+static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
static const u64 kMIPS32_ShadowOffset32 = 0x0aaa8000;
+static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
+static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
-#if ASAN_FLEXIBLE_MAPPING_AND_OFFSET == 1
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_mapping_scale;
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_mapping_offset;
-# define SHADOW_SCALE (__asan_mapping_scale)
-# define SHADOW_OFFSET (__asan_mapping_offset)
+#define SHADOW_SCALE kDefaultShadowScale
+#if SANITIZER_ANDROID
+# define SHADOW_OFFSET (0)
#else
-# define SHADOW_SCALE kDefaultShadowScale
-# if SANITIZER_ANDROID
-# define SHADOW_OFFSET (0)
+# if SANITIZER_WORDSIZE == 32
+# if defined(__mips__)
+# define SHADOW_OFFSET kMIPS32_ShadowOffset32
+# elif SANITIZER_FREEBSD
+# define SHADOW_OFFSET kFreeBSD_ShadowOffset32
+# else
+# if SANITIZER_IOS
+# define SHADOW_OFFSET kIosShadowOffset32
+# else
+# define SHADOW_OFFSET kDefaultShadowOffset32
+# endif
+# endif
# else
-# if SANITIZER_WORDSIZE == 32
-# if defined(__mips__)
-# define SHADOW_OFFSET kMIPS32_ShadowOffset32
-# else
-# define SHADOW_OFFSET kDefaultShadowOffset32
-# endif
+# if defined(__aarch64__)
+# define SHADOW_OFFSET kAArch64_ShadowOffset64
+# elif SANITIZER_FREEBSD
+# define SHADOW_OFFSET kFreeBSD_ShadowOffset64
+# elif SANITIZER_MAC
+# define SHADOW_OFFSET kDefaultShadowOffset64
# else
-# if defined(__powerpc64__)
-# define SHADOW_OFFSET kPPC64_ShadowOffset64
-# elif SANITIZER_MAC
-# define SHADOW_OFFSET kDefaultShadowOffset64
-# else
-# define SHADOW_OFFSET kDefaultShort64bitShadowOffset
-# endif
+# define SHADOW_OFFSET kDefaultShort64bitShadowOffset
# endif
# endif
-#endif // ASAN_FLEXIBLE_MAPPING_AND_OFFSET
+#endif
#define SHADOW_GRANULARITY (1ULL << SHADOW_SCALE)
#define MEM_TO_SHADOW(mem) (((mem) >> SHADOW_SCALE) + (SHADOW_OFFSET))
diff --git a/libsanitizer/asan/asan_new_delete.cc b/libsanitizer/asan/asan_new_delete.cc
index beac8cdbdd5e..a1ab2cd8c39e 100644
--- a/libsanitizer/asan/asan_new_delete.cc
+++ b/libsanitizer/asan/asan_new_delete.cc
@@ -14,20 +14,14 @@
#include "asan_internal.h"
#include "asan_stack.h"
-#include <stddef.h>
+#include "sanitizer_common/sanitizer_interception.h"
-namespace __asan {
-// This function is a no-op. We need it to make sure that object file
-// with our replacements will actually be loaded from static ASan
-// run-time library at link-time.
-void ReplaceOperatorsNewAndDelete() { }
-}
+#include <stddef.h>
using namespace __asan; // NOLINT
-// On Android new() goes through malloc interceptors.
-// See also https://code.google.com/p/address-sanitizer/issues/detail?id=131.
-#if !SANITIZER_ANDROID
+// This code has issues on OSX.
+// See https://code.google.com/p/address-sanitizer/issues/detail?id=131.
// Fake std::nothrow_t to avoid including <new>.
namespace std {
@@ -46,6 +40,15 @@ struct nothrow_t {};
// To make sure that C++ allocation/deallocation operators are overridden on
// OS X we need to intercept them using their mangled names.
#if !SANITIZER_MAC
+// FreeBSD prior v9.2 have wrong definition of 'size_t'.
+// http://svnweb.freebsd.org/base?view=revision&revision=232261
+#if SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32
+#include <sys/param.h>
+#if __FreeBSD_version <= 902001 // v9.2
+#define size_t unsigned
+#endif // __FreeBSD_version
+#endif // SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32
+
INTERCEPTOR_ATTRIBUTE
void *operator new(size_t size) { OPERATOR_NEW_BODY(FROM_NEW); }
INTERCEPTOR_ATTRIBUTE
@@ -78,15 +81,21 @@ INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) {
#if !SANITIZER_MAC
INTERCEPTOR_ATTRIBUTE
-void operator delete(void *ptr) { OPERATOR_DELETE_BODY(FROM_NEW); }
+void operator delete(void *ptr) throw() {
+ OPERATOR_DELETE_BODY(FROM_NEW);
+}
INTERCEPTOR_ATTRIBUTE
-void operator delete[](void *ptr) { OPERATOR_DELETE_BODY(FROM_NEW_BR); }
+void operator delete[](void *ptr) throw() {
+ OPERATOR_DELETE_BODY(FROM_NEW_BR);
+}
INTERCEPTOR_ATTRIBUTE
-void operator delete(void *ptr, std::nothrow_t const&)
-{ OPERATOR_DELETE_BODY(FROM_NEW); }
+void operator delete(void *ptr, std::nothrow_t const&) {
+ OPERATOR_DELETE_BODY(FROM_NEW);
+}
INTERCEPTOR_ATTRIBUTE
-void operator delete[](void *ptr, std::nothrow_t const&)
-{ OPERATOR_DELETE_BODY(FROM_NEW_BR); }
+void operator delete[](void *ptr, std::nothrow_t const&) {
+ OPERATOR_DELETE_BODY(FROM_NEW_BR);
+}
#else // SANITIZER_MAC
INTERCEPTOR(void, _ZdlPv, void *ptr) {
@@ -102,5 +111,3 @@ INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) {
OPERATOR_DELETE_BODY(FROM_NEW_BR);
}
#endif
-
-#endif
diff --git a/libsanitizer/asan/asan_poisoning.cc b/libsanitizer/asan/asan_poisoning.cc
index 86d49909b68f..a532c5c4388e 100644
--- a/libsanitizer/asan/asan_poisoning.cc
+++ b/libsanitizer/asan/asan_poisoning.cc
@@ -11,6 +11,8 @@
//===----------------------------------------------------------------------===//
#include "asan_poisoning.h"
+#include "asan_report.h"
+#include "asan_stack.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_flags.h"
@@ -48,6 +50,15 @@ struct ShadowSegmentEndpoint {
}
};
+void FlushUnneededASanShadowMemory(uptr p, uptr size) {
+ // Since asan's mapping is compacting, the shadow chunk may be
+ // not page-aligned, so we only flush the page-aligned portion.
+ uptr page_size = GetPageSizeCached();
+ uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size);
+ uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size);
+ FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
+}
+
} // namespace __asan
// ---------------------- Interface ---------------- {{{1
@@ -67,10 +78,8 @@ void __asan_poison_memory_region(void const volatile *addr, uptr size) {
if (!flags()->allow_user_poisoning || size == 0) return;
uptr beg_addr = (uptr)addr;
uptr end_addr = beg_addr + size;
- if (common_flags()->verbosity >= 1) {
- Printf("Trying to poison memory region [%p, %p)\n",
- (void*)beg_addr, (void*)end_addr);
- }
+ VPrintf(1, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr,
+ (void *)end_addr);
ShadowSegmentEndpoint beg(beg_addr);
ShadowSegmentEndpoint end(end_addr);
if (beg.chunk == end.chunk) {
@@ -109,10 +118,8 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
if (!flags()->allow_user_poisoning || size == 0) return;
uptr beg_addr = (uptr)addr;
uptr end_addr = beg_addr + size;
- if (common_flags()->verbosity >= 1) {
- Printf("Trying to unpoison memory region [%p, %p)\n",
- (void*)beg_addr, (void*)end_addr);
- }
+ VPrintf(1, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr,
+ (void *)end_addr);
ShadowSegmentEndpoint beg(beg_addr);
ShadowSegmentEndpoint end(end_addr);
if (beg.chunk == end.chunk) {
@@ -137,7 +144,7 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
}
}
-bool __asan_address_is_poisoned(void const volatile *addr) {
+int __asan_address_is_poisoned(void const volatile *addr) {
return __asan::AddressIsPoisoned((uptr)addr);
}
@@ -146,6 +153,7 @@ uptr __asan_region_is_poisoned(uptr beg, uptr size) {
uptr end = beg + size;
if (!AddrIsInMem(beg)) return beg;
if (!AddrIsInMem(end)) return end;
+ CHECK_LT(beg, end);
uptr aligned_b = RoundUpTo(beg, SHADOW_GRANULARITY);
uptr aligned_e = RoundDownTo(end, SHADOW_GRANULARITY);
uptr shadow_beg = MemToShadow(aligned_b);
@@ -243,14 +251,12 @@ static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
}
void __asan_poison_stack_memory(uptr addr, uptr size) {
- if (common_flags()->verbosity > 0)
- Report("poisoning: %p %zx\n", (void*)addr, size);
+ VReport(1, "poisoning: %p %zx\n", (void *)addr, size);
PoisonAlignedStackMemory(addr, size, true);
}
void __asan_unpoison_stack_memory(uptr addr, uptr size) {
- if (common_flags()->verbosity > 0)
- Report("unpoisoning: %p %zx\n", (void*)addr, size);
+ VReport(1, "unpoisoning: %p %zx\n", (void *)addr, size);
PoisonAlignedStackMemory(addr, size, false);
}
@@ -258,33 +264,40 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
const void *end_p,
const void *old_mid_p,
const void *new_mid_p) {
- if (common_flags()->verbosity >= 2)
- Printf("contiguous_container: %p %p %p %p\n", beg_p, end_p, old_mid_p,
- new_mid_p);
+ if (!flags()->detect_container_overflow) return;
+ VPrintf(2, "contiguous_container: %p %p %p %p\n", beg_p, end_p, old_mid_p,
+ new_mid_p);
uptr beg = reinterpret_cast<uptr>(beg_p);
- uptr end= reinterpret_cast<uptr>(end_p);
+ uptr end = reinterpret_cast<uptr>(end_p);
uptr old_mid = reinterpret_cast<uptr>(old_mid_p);
uptr new_mid = reinterpret_cast<uptr>(new_mid_p);
uptr granularity = SHADOW_GRANULARITY;
- CHECK(beg <= old_mid && beg <= new_mid && old_mid <= end && new_mid <= end &&
- IsAligned(beg, granularity));
+ if (!(beg <= old_mid && beg <= new_mid && old_mid <= end && new_mid <= end &&
+ IsAligned(beg, granularity))) {
+ GET_STACK_TRACE_FATAL_HERE;
+ ReportBadParamsToAnnotateContiguousContainer(beg, end, old_mid, new_mid,
+ &stack);
+ }
CHECK_LE(end - beg,
FIRST_32_SECOND_64(1UL << 30, 1UL << 34)); // Sanity check.
uptr a = RoundDownTo(Min(old_mid, new_mid), granularity);
uptr c = RoundUpTo(Max(old_mid, new_mid), granularity);
uptr d1 = RoundDownTo(old_mid, granularity);
- uptr d2 = RoundUpTo(old_mid, granularity);
+ // uptr d2 = RoundUpTo(old_mid, granularity);
// Currently we should be in this state:
// [a, d1) is good, [d2, c) is bad, [d1, d2) is partially good.
// Make a quick sanity check that we are indeed in this state.
- if (d1 != d2)
- CHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
+ //
+ // FIXME: Two of these three checks are disabled until we fix
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=258.
+ // if (d1 != d2)
+ // CHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
if (a + granularity <= d1)
CHECK_EQ(*(u8*)MemToShadow(a), 0);
- if (d2 + granularity <= c && c <= end)
- CHECK_EQ(*(u8 *)MemToShadow(c - granularity),
- kAsanContiguousContainerOOBMagic);
+ // if (d2 + granularity <= c && c <= end)
+ // CHECK_EQ(*(u8 *)MemToShadow(c - granularity),
+ // kAsanContiguousContainerOOBMagic);
uptr b1 = RoundDownTo(new_mid, granularity);
uptr b2 = RoundUpTo(new_mid, granularity);
@@ -297,3 +310,42 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
*(u8*)MemToShadow(b1) = static_cast<u8>(new_mid - b1);
}
}
+
+int __sanitizer_verify_contiguous_container(const void *beg_p,
+ const void *mid_p,
+ const void *end_p) {
+ if (!flags()->detect_container_overflow) return 1;
+ uptr beg = reinterpret_cast<uptr>(beg_p);
+ uptr end = reinterpret_cast<uptr>(end_p);
+ uptr mid = reinterpret_cast<uptr>(mid_p);
+ CHECK_LE(beg, mid);
+ CHECK_LE(mid, end);
+ // Check some bytes starting from beg, some bytes around mid, and some bytes
+ // ending with end.
+ uptr kMaxRangeToCheck = 32;
+ uptr r1_beg = beg;
+ uptr r1_end = Min(end + kMaxRangeToCheck, mid);
+ uptr r2_beg = Max(beg, mid - kMaxRangeToCheck);
+ uptr r2_end = Min(end, mid + kMaxRangeToCheck);
+ uptr r3_beg = Max(end - kMaxRangeToCheck, mid);
+ uptr r3_end = end;
+ for (uptr i = r1_beg; i < r1_end; i++)
+ if (AddressIsPoisoned(i))
+ return 0;
+ for (uptr i = r2_beg; i < mid; i++)
+ if (AddressIsPoisoned(i))
+ return 0;
+ for (uptr i = mid; i < r2_end; i++)
+ if (!AddressIsPoisoned(i))
+ return 0;
+ for (uptr i = r3_beg; i < r3_end; i++)
+ if (!AddressIsPoisoned(i))
+ return 0;
+ return 1;
+}
+// --- Implementation of LSan-specific functions --- {{{1
+namespace __lsan {
+bool WordIsPoisoned(uptr addr) {
+ return (__asan_region_is_poisoned(addr, sizeof(uptr)) != 0);
+}
+}
diff --git a/libsanitizer/asan/asan_poisoning.h b/libsanitizer/asan/asan_poisoning.h
index da79a0ff2e40..326d9ba1b67b 100644
--- a/libsanitizer/asan/asan_poisoning.h
+++ b/libsanitizer/asan/asan_poisoning.h
@@ -13,6 +13,7 @@
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_mapping.h"
+#include "sanitizer_common/sanitizer_flags.h"
namespace __asan {
@@ -32,10 +33,35 @@ void PoisonShadowPartialRightRedzone(uptr addr,
ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size,
u8 value) {
DCHECK(flags()->poison_heap);
+ uptr PageSize = GetPageSizeCached();
uptr shadow_beg = MEM_TO_SHADOW(aligned_beg);
uptr shadow_end = MEM_TO_SHADOW(
aligned_beg + aligned_size - SHADOW_GRANULARITY) + 1;
- REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg);
+ // FIXME: Page states are different on Windows, so using the same interface
+ // for mapping shadow and zeroing out pages doesn't "just work", so we should
+ // probably provide higher-level interface for these operations.
+ // For now, just memset on Windows.
+ if (value ||
+ SANITIZER_WINDOWS == 1 ||
+ shadow_end - shadow_beg < common_flags()->clear_shadow_mmap_threshold) {
+ REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg);
+ } else {
+ uptr page_beg = RoundUpTo(shadow_beg, PageSize);
+ uptr page_end = RoundDownTo(shadow_end, PageSize);
+
+ if (page_beg >= page_end) {
+ REAL(memset)((void *)shadow_beg, 0, shadow_end - shadow_beg);
+ } else {
+ if (page_beg != shadow_beg) {
+ REAL(memset)((void *)shadow_beg, 0, page_beg - shadow_beg);
+ }
+ if (page_end != shadow_end) {
+ REAL(memset)((void *)page_end, 0, shadow_end - page_end);
+ }
+ void *res = MmapFixedNoReserve(page_beg, page_end - page_beg);
+ CHECK_EQ(page_beg, res);
+ }
+ }
}
ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone(
@@ -55,4 +81,8 @@ ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone(
}
}
+// Calls __sanitizer::FlushUnneededShadowMemory() on
+// [MemToShadow(p), MemToShadow(p+size)] with proper rounding.
+void FlushUnneededASanShadowMemory(uptr p, uptr size);
+
} // namespace __asan
diff --git a/libsanitizer/asan/asan_posix.cc b/libsanitizer/asan/asan_posix.cc
index ac4ec9e01911..8f3798a2e595 100644
--- a/libsanitizer/asan/asan_posix.cc
+++ b/libsanitizer/asan/asan_posix.cc
@@ -11,7 +11,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_LINUX || SANITIZER_MAC
+#if SANITIZER_POSIX
#include "asan_internal.h"
#include "asan_interceptors.h"
@@ -28,70 +28,27 @@
#include <sys/resource.h>
#include <unistd.h>
-static const uptr kAltStackSize = SIGSTKSZ * 4; // SIGSTKSZ is not enough.
-
namespace __asan {
-static void MaybeInstallSigaction(int signum,
- void (*handler)(int, siginfo_t *, void *)) {
- if (!AsanInterceptsSignal(signum))
- return;
- struct sigaction sigact;
- REAL(memset)(&sigact, 0, sizeof(sigact));
- sigact.sa_sigaction = handler;
- sigact.sa_flags = SA_SIGINFO;
- if (flags()->use_sigaltstack) sigact.sa_flags |= SA_ONSTACK;
- CHECK_EQ(0, REAL(sigaction)(signum, &sigact, 0));
- if (common_flags()->verbosity >= 1) {
- Report("Installed the sigaction for signal %d\n", signum);
- }
-}
-
-static void ASAN_OnSIGSEGV(int, siginfo_t *siginfo, void *context) {
- uptr addr = (uptr)siginfo->si_addr;
+void AsanOnSIGSEGV(int, void *siginfo, void *context) {
+ uptr addr = (uptr)((siginfo_t*)siginfo)->si_addr;
+ int code = (int)((siginfo_t*)siginfo)->si_code;
// Write the first message using the bullet-proof write.
if (13 != internal_write(2, "ASAN:SIGSEGV\n", 13)) Die();
uptr pc, sp, bp;
GetPcSpBp(context, &pc, &sp, &bp);
- ReportSIGSEGV(pc, sp, bp, addr);
-}
-
-void SetAlternateSignalStack() {
- stack_t altstack, oldstack;
- CHECK_EQ(0, sigaltstack(0, &oldstack));
- // If the alternate stack is already in place, do nothing.
- if ((oldstack.ss_flags & SS_DISABLE) == 0) return;
- // TODO(glider): the mapped stack should have the MAP_STACK flag in the
- // future. It is not required by man 2 sigaltstack now (they're using
- // malloc()).
- void* base = MmapOrDie(kAltStackSize, __FUNCTION__);
- altstack.ss_sp = base;
- altstack.ss_flags = 0;
- altstack.ss_size = kAltStackSize;
- CHECK_EQ(0, sigaltstack(&altstack, 0));
- if (common_flags()->verbosity > 0) {
- Report("Alternative stack for T%d set: [%p,%p)\n",
- GetCurrentTidOrInvalid(),
- altstack.ss_sp, (char*)altstack.ss_sp + altstack.ss_size);
- }
-}
-
-void UnsetAlternateSignalStack() {
- stack_t altstack, oldstack;
- altstack.ss_sp = 0;
- altstack.ss_flags = SS_DISABLE;
- altstack.ss_size = 0;
- CHECK_EQ(0, sigaltstack(&altstack, &oldstack));
- UnmapOrDie(oldstack.ss_sp, oldstack.ss_size);
-}
-void InstallSignalHandlers() {
- // Set the alternate signal stack for the main thread.
- // This will cause SetAlternateSignalStack to be called twice, but the stack
- // will be actually set only once.
- if (flags()->use_sigaltstack) SetAlternateSignalStack();
- MaybeInstallSigaction(SIGSEGV, ASAN_OnSIGSEGV);
- MaybeInstallSigaction(SIGBUS, ASAN_OnSIGSEGV);
+ // Access at a reasonable offset above SP, or slightly below it (to account
+ // for x86_64 redzone, ARM push of multiple registers, etc) is probably a
+ // stack overflow.
+ // We also check si_code to filter out SEGV caused by something else other
+ // then hitting the guard page or unmapped memory, like, for example,
+ // unaligned memory access.
+ if (addr + 128 > sp && addr < sp + 0xFFFF &&
+ (code == si_SEGV_MAPERR || code == si_SEGV_ACCERR))
+ ReportStackOverflow(pc, sp, bp, context, addr);
+ else
+ ReportSIGSEGV(pc, sp, bp, context, addr);
}
// ---------------------- TSD ---------------- {{{1
@@ -125,4 +82,4 @@ void PlatformTSDDtor(void *tsd) {
}
} // namespace __asan
-#endif // SANITIZER_LINUX || SANITIZER_MAC
+#endif // SANITIZER_POSIX
diff --git a/libsanitizer/asan/asan_report.cc b/libsanitizer/asan/asan_report.cc
index 70c4b481a2f1..d0a89b9677e0 100644
--- a/libsanitizer/asan/asan_report.cc
+++ b/libsanitizer/asan/asan_report.cc
@@ -43,11 +43,9 @@ void AppendToErrorMessageBuffer(const char *buffer) {
}
// ---------------------- Decorator ------------------------------ {{{1
-class Decorator: private __sanitizer::AnsiColorDecorator {
+class Decorator: public __sanitizer::SanitizerCommonDecorator {
public:
- Decorator() : __sanitizer::AnsiColorDecorator(PrintsToTtyCached()) { }
- const char *Warning() { return Red(); }
- const char *EndWarning() { return Default(); }
+ Decorator() : SanitizerCommonDecorator() { }
const char *Access() { return Blue(); }
const char *EndAccess() { return Default(); }
const char *Location() { return Green(); }
@@ -89,68 +87,77 @@ class Decorator: private __sanitizer::AnsiColorDecorator {
// ---------------------- Helper functions ----------------------- {{{1
-static void PrintShadowByte(const char *before, u8 byte,
- const char *after = "\n") {
+static void PrintShadowByte(InternalScopedString *str, const char *before,
+ u8 byte, const char *after = "\n") {
Decorator d;
- Printf("%s%s%x%x%s%s", before,
- d.ShadowByte(byte), byte >> 4, byte & 15, d.EndShadowByte(), after);
+ str->append("%s%s%x%x%s%s", before, d.ShadowByte(byte), byte >> 4, byte & 15,
+ d.EndShadowByte(), after);
}
-static void PrintShadowBytes(const char *before, u8 *bytes,
- u8 *guilty, uptr n) {
+static void PrintShadowBytes(InternalScopedString *str, const char *before,
+ u8 *bytes, u8 *guilty, uptr n) {
Decorator d;
- if (before)
- Printf("%s%p:", before, bytes);
+ if (before) str->append("%s%p:", before, bytes);
for (uptr i = 0; i < n; i++) {
u8 *p = bytes + i;
- const char *before = p == guilty ? "[" :
- (p - 1 == guilty && i != 0) ? "" : " ";
+ const char *before =
+ p == guilty ? "[" : (p - 1 == guilty && i != 0) ? "" : " ";
const char *after = p == guilty ? "]" : "";
- PrintShadowByte(before, *p, after);
+ PrintShadowByte(str, before, *p, after);
}
- Printf("\n");
-}
-
-static void PrintLegend() {
- Printf("Shadow byte legend (one shadow byte represents %d "
- "application bytes):\n", (int)SHADOW_GRANULARITY);
- PrintShadowByte(" Addressable: ", 0);
- Printf(" Partially addressable: ");
- for (u8 i = 1; i < SHADOW_GRANULARITY; i++)
- PrintShadowByte("", i, " ");
- Printf("\n");
- PrintShadowByte(" Heap left redzone: ", kAsanHeapLeftRedzoneMagic);
- PrintShadowByte(" Heap right redzone: ", kAsanHeapRightRedzoneMagic);
- PrintShadowByte(" Freed heap region: ", kAsanHeapFreeMagic);
- PrintShadowByte(" Stack left redzone: ", kAsanStackLeftRedzoneMagic);
- PrintShadowByte(" Stack mid redzone: ", kAsanStackMidRedzoneMagic);
- PrintShadowByte(" Stack right redzone: ", kAsanStackRightRedzoneMagic);
- PrintShadowByte(" Stack partial redzone: ", kAsanStackPartialRedzoneMagic);
- PrintShadowByte(" Stack after return: ", kAsanStackAfterReturnMagic);
- PrintShadowByte(" Stack use after scope: ", kAsanStackUseAfterScopeMagic);
- PrintShadowByte(" Global redzone: ", kAsanGlobalRedzoneMagic);
- PrintShadowByte(" Global init order: ", kAsanInitializationOrderMagic);
- PrintShadowByte(" Poisoned by user: ", kAsanUserPoisonedMemoryMagic);
- PrintShadowByte(" Contiguous container OOB:",
+ str->append("\n");
+}
+
+static void PrintLegend(InternalScopedString *str) {
+ str->append(
+ "Shadow byte legend (one shadow byte represents %d "
+ "application bytes):\n",
+ (int)SHADOW_GRANULARITY);
+ PrintShadowByte(str, " Addressable: ", 0);
+ str->append(" Partially addressable: ");
+ for (u8 i = 1; i < SHADOW_GRANULARITY; i++) PrintShadowByte(str, "", i, " ");
+ str->append("\n");
+ PrintShadowByte(str, " Heap left redzone: ",
+ kAsanHeapLeftRedzoneMagic);
+ PrintShadowByte(str, " Heap right redzone: ",
+ kAsanHeapRightRedzoneMagic);
+ PrintShadowByte(str, " Freed heap region: ", kAsanHeapFreeMagic);
+ PrintShadowByte(str, " Stack left redzone: ",
+ kAsanStackLeftRedzoneMagic);
+ PrintShadowByte(str, " Stack mid redzone: ",
+ kAsanStackMidRedzoneMagic);
+ PrintShadowByte(str, " Stack right redzone: ",
+ kAsanStackRightRedzoneMagic);
+ PrintShadowByte(str, " Stack partial redzone: ",
+ kAsanStackPartialRedzoneMagic);
+ PrintShadowByte(str, " Stack after return: ",
+ kAsanStackAfterReturnMagic);
+ PrintShadowByte(str, " Stack use after scope: ",
+ kAsanStackUseAfterScopeMagic);
+ PrintShadowByte(str, " Global redzone: ", kAsanGlobalRedzoneMagic);
+ PrintShadowByte(str, " Global init order: ",
+ kAsanInitializationOrderMagic);
+ PrintShadowByte(str, " Poisoned by user: ",
+ kAsanUserPoisonedMemoryMagic);
+ PrintShadowByte(str, " Container overflow: ",
kAsanContiguousContainerOOBMagic);
- PrintShadowByte(" ASan internal: ", kAsanInternalHeapMagic);
+ PrintShadowByte(str, " ASan internal: ", kAsanInternalHeapMagic);
}
static void PrintShadowMemoryForAddress(uptr addr) {
- if (!AddrIsInMem(addr))
- return;
+ if (!AddrIsInMem(addr)) return;
uptr shadow_addr = MemToShadow(addr);
const uptr n_bytes_per_row = 16;
uptr aligned_shadow = shadow_addr & ~(n_bytes_per_row - 1);
- Printf("Shadow bytes around the buggy address:\n");
+ InternalScopedString str(4096 * 8);
+ str.append("Shadow bytes around the buggy address:\n");
for (int i = -5; i <= 5; i++) {
const char *prefix = (i == 0) ? "=>" : " ";
- PrintShadowBytes(prefix,
- (u8*)(aligned_shadow + i * n_bytes_per_row),
- (u8*)shadow_addr, n_bytes_per_row);
+ PrintShadowBytes(&str, prefix, (u8 *)(aligned_shadow + i * n_bytes_per_row),
+ (u8 *)shadow_addr, n_bytes_per_row);
}
- if (flags()->print_legend)
- PrintLegend();
+ if (flags()->print_legend) PrintLegend(&str);
+ Printf("%s", str.data());
}
static void PrintZoneForPointer(uptr ptr, uptr zone_ptr,
@@ -182,20 +189,25 @@ static bool IsASCII(unsigned char c) {
static const char *MaybeDemangleGlobalName(const char *name) {
// We can spoil names of globals with C linkage, so use an heuristic
// approach to check if the name should be demangled.
- return (name[0] == '_' && name[1] == 'Z')
- ? Symbolizer::Get()->Demangle(name)
- : name;
+ bool should_demangle = false;
+ if (name[0] == '_' && name[1] == 'Z')
+ should_demangle = true;
+ else if (SANITIZER_WINDOWS && name[0] == '\01' && name[1] == '?')
+ should_demangle = true;
+
+ return should_demangle ? Symbolizer::Get()->Demangle(name) : name;
}
// Check if the global is a zero-terminated ASCII string. If so, print it.
-static void PrintGlobalNameIfASCII(const __asan_global &g) {
+static void PrintGlobalNameIfASCII(InternalScopedString *str,
+ const __asan_global &g) {
for (uptr p = g.beg; p < g.beg + g.size - 1; p++) {
unsigned char c = *(unsigned char*)p;
if (c == '\0' || !IsASCII(c)) return;
}
if (*(char*)(g.beg + g.size - 1) != '\0') return;
- Printf(" '%s' is ascii string '%s'\n",
- MaybeDemangleGlobalName(g.name), (char*)g.beg);
+ str->append(" '%s' is ascii string '%s'\n", MaybeDemangleGlobalName(g.name),
+ (char *)g.beg);
}
bool DescribeAddressRelativeToGlobal(uptr addr, uptr size,
@@ -203,23 +215,26 @@ bool DescribeAddressRelativeToGlobal(uptr addr, uptr size,
static const uptr kMinimalDistanceFromAnotherGlobal = 64;
if (addr <= g.beg - kMinimalDistanceFromAnotherGlobal) return false;
if (addr >= g.beg + g.size_with_redzone) return false;
+ InternalScopedString str(4096);
Decorator d;
- Printf("%s", d.Location());
+ str.append("%s", d.Location());
if (addr < g.beg) {
- Printf("%p is located %zd bytes to the left", (void*)addr, g.beg - addr);
+ str.append("%p is located %zd bytes to the left", (void *)addr,
+ g.beg - addr);
} else if (addr + size > g.beg + g.size) {
if (addr < g.beg + g.size)
addr = g.beg + g.size;
- Printf("%p is located %zd bytes to the right", (void*)addr,
- addr - (g.beg + g.size));
+ str.append("%p is located %zd bytes to the right", (void *)addr,
+ addr - (g.beg + g.size));
} else {
// Can it happen?
- Printf("%p is located %zd bytes inside", (void*)addr, addr - g.beg);
+ str.append("%p is located %zd bytes inside", (void *)addr, addr - g.beg);
}
- Printf(" of global variable '%s' from '%s' (0x%zx) of size %zu\n",
+ str.append(" of global variable '%s' from '%s' (0x%zx) of size %zu\n",
MaybeDemangleGlobalName(g.name), g.module_name, g.beg, g.size);
- Printf("%s", d.EndLocation());
- PrintGlobalNameIfASCII(g);
+ str.append("%s", d.EndLocation());
+ PrintGlobalNameIfASCII(&str, g);
+ Printf("%s", str.data());
return true;
}
@@ -288,16 +303,18 @@ void PrintAccessAndVarIntersection(const char *var_name,
addr - prev_var_end >= var_beg - addr_end)
pos_descr = "underflows";
}
- Printf(" [%zd, %zd) '%s'", var_beg, var_beg + var_size, var_name);
+ InternalScopedString str(1024);
+ str.append(" [%zd, %zd) '%s'", var_beg, var_beg + var_size, var_name);
if (pos_descr) {
Decorator d;
// FIXME: we may want to also print the size of the access here,
// but in case of accesses generated by memset it may be confusing.
- Printf("%s <== Memory access at offset %zd %s this variable%s\n",
- d.Location(), addr, pos_descr, d.EndLocation());
+ str.append("%s <== Memory access at offset %zd %s this variable%s\n",
+ d.Location(), addr, pos_descr, d.EndLocation());
} else {
- Printf("\n");
+ str.append("\n");
}
+ Printf("%s", str.data());
}
struct StackVarDescr {
@@ -346,7 +363,7 @@ bool DescribeAddressIfStack(uptr addr, uptr access_size) {
alloca_stack.trace[0] = frame_pc + 16;
alloca_stack.size = 1;
Printf("%s", d.EndLocation());
- PrintStack(&alloca_stack);
+ alloca_stack.Print();
// Report the number of stack objects.
char *p;
uptr n_objects = (uptr)internal_simple_strtoll(frame_descr, &p, 10);
@@ -394,24 +411,26 @@ static void DescribeAccessToHeapChunk(AsanChunkView chunk, uptr addr,
uptr access_size) {
sptr offset;
Decorator d;
- Printf("%s", d.Location());
+ InternalScopedString str(4096);
+ str.append("%s", d.Location());
if (chunk.AddrIsAtLeft(addr, access_size, &offset)) {
- Printf("%p is located %zd bytes to the left of", (void*)addr, offset);
+ str.append("%p is located %zd bytes to the left of", (void *)addr, offset);
} else if (chunk.AddrIsAtRight(addr, access_size, &offset)) {
if (offset < 0) {
addr -= offset;
offset = 0;
}
- Printf("%p is located %zd bytes to the right of", (void*)addr, offset);
+ str.append("%p is located %zd bytes to the right of", (void *)addr, offset);
} else if (chunk.AddrIsInside(addr, access_size, &offset)) {
- Printf("%p is located %zd bytes inside of", (void*)addr, offset);
+ str.append("%p is located %zd bytes inside of", (void*)addr, offset);
} else {
- Printf("%p is located somewhere around (this is AddressSanitizer bug!)",
- (void*)addr);
+ str.append("%p is located somewhere around (this is AddressSanitizer bug!)",
+ (void *)addr);
}
- Printf(" %zu-byte region [%p,%p)\n", chunk.UsedSize(),
- (void*)(chunk.Beg()), (void*)(chunk.End()));
- Printf("%s", d.EndLocation());
+ str.append(" %zu-byte region [%p,%p)\n", chunk.UsedSize(),
+ (void *)(chunk.Beg()), (void *)(chunk.End()));
+ str.append("%s", d.EndLocation());
+ Printf("%s", str.data());
}
void DescribeHeapAddress(uptr addr, uptr access_size) {
@@ -439,7 +458,7 @@ void DescribeHeapAddress(uptr addr, uptr access_size) {
d.EndAllocation());
StackTrace free_stack;
chunk.GetFreeStack(&free_stack);
- PrintStack(&free_stack);
+ free_stack.Print();
Printf("%spreviously allocated by thread T%d%s here:%s\n",
d.Allocation(), alloc_thread->tid,
ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
@@ -450,7 +469,7 @@ void DescribeHeapAddress(uptr addr, uptr access_size) {
ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
d.EndAllocation());
}
- PrintStack(&alloc_stack);
+ alloc_stack.Print();
DescribeThread(GetCurrentThread());
if (free_thread)
DescribeThread(free_thread);
@@ -481,15 +500,16 @@ void DescribeThread(AsanThreadContext *context) {
}
context->announced = true;
char tname[128];
- Printf("Thread T%d%s", context->tid,
- ThreadNameWithParenthesis(context->tid, tname, sizeof(tname)));
- Printf(" created by T%d%s here:\n",
- context->parent_tid,
- ThreadNameWithParenthesis(context->parent_tid,
- tname, sizeof(tname)));
+ InternalScopedString str(1024);
+ str.append("Thread T%d%s", context->tid,
+ ThreadNameWithParenthesis(context->tid, tname, sizeof(tname)));
+ str.append(
+ " created by T%d%s here:\n", context->parent_tid,
+ ThreadNameWithParenthesis(context->parent_tid, tname, sizeof(tname)));
+ Printf("%s", str.data());
uptr stack_size;
const uptr *stack_trace = StackDepotGet(context->stack_id, &stack_size);
- PrintStack(stack_trace, stack_size);
+ StackTrace::PrintStack(stack_trace, stack_size);
// Recursively described parent thread if needed.
if (flags()->print_full_thread_history) {
AsanThreadContext *parent_context =
@@ -539,6 +559,8 @@ class ScopedInErrorReport {
NORETURN ~ScopedInErrorReport() {
// Make sure the current thread is announced.
DescribeThread(GetCurrentThread());
+ // We may want to grab this lock again when printing stats.
+ asanThreadRegistry().Unlock();
// Print memory stats.
if (flags()->print_stats)
__asan_print_accumulated_stats();
@@ -550,17 +572,33 @@ class ScopedInErrorReport {
}
};
-void ReportSIGSEGV(uptr pc, uptr sp, uptr bp, uptr addr) {
+void ReportStackOverflow(uptr pc, uptr sp, uptr bp, void *context, uptr addr) {
ScopedInErrorReport in_report;
Decorator d;
Printf("%s", d.Warning());
- Report("ERROR: AddressSanitizer: SEGV on unknown address %p"
- " (pc %p sp %p bp %p T%d)\n",
- (void*)addr, (void*)pc, (void*)sp, (void*)bp,
- GetCurrentTidOrInvalid());
+ Report(
+ "ERROR: AddressSanitizer: stack-overflow on address %p"
+ " (pc %p sp %p bp %p T%d)\n",
+ (void *)addr, (void *)pc, (void *)sp, (void *)bp,
+ GetCurrentTidOrInvalid());
Printf("%s", d.EndWarning());
- GET_STACK_TRACE_FATAL(pc, bp);
- PrintStack(&stack);
+ GET_STACK_TRACE_SIGNAL(pc, bp, context);
+ stack.Print();
+ ReportErrorSummary("stack-overflow", &stack);
+}
+
+void ReportSIGSEGV(uptr pc, uptr sp, uptr bp, void *context, uptr addr) {
+ ScopedInErrorReport in_report;
+ Decorator d;
+ Printf("%s", d.Warning());
+ Report(
+ "ERROR: AddressSanitizer: SEGV on unknown address %p"
+ " (pc %p sp %p bp %p T%d)\n",
+ (void *)addr, (void *)pc, (void *)sp, (void *)bp,
+ GetCurrentTidOrInvalid());
+ Printf("%s", d.EndWarning());
+ GET_STACK_TRACE_SIGNAL(pc, bp, context);
+ stack.Print();
Printf("AddressSanitizer can not provide additional info.\n");
ReportErrorSummary("SEGV", &stack);
}
@@ -578,7 +616,7 @@ void ReportDoubleFree(uptr addr, StackTrace *free_stack) {
Printf("%s", d.EndWarning());
CHECK_GT(free_stack->size, 0);
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
- PrintStack(&stack);
+ stack.Print();
DescribeHeapAddress(addr, 1);
ReportErrorSummary("double-free", &stack);
}
@@ -595,7 +633,7 @@ void ReportFreeNotMalloced(uptr addr, StackTrace *free_stack) {
Printf("%s", d.EndWarning());
CHECK_GT(free_stack->size, 0);
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
- PrintStack(&stack);
+ stack.Print();
DescribeHeapAddress(addr, 1);
ReportErrorSummary("bad-free", &stack);
}
@@ -616,7 +654,7 @@ void ReportAllocTypeMismatch(uptr addr, StackTrace *free_stack,
Printf("%s", d.EndWarning());
CHECK_GT(free_stack->size, 0);
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
- PrintStack(&stack);
+ stack.Print();
DescribeHeapAddress(addr, 1);
ReportErrorSummary("alloc-dealloc-mismatch", &stack);
Report("HINT: if you don't care about these warnings you may set "
@@ -631,7 +669,7 @@ void ReportMallocUsableSizeNotOwned(uptr addr, StackTrace *stack) {
"malloc_usable_size() for pointer which is "
"not owned: %p\n", addr);
Printf("%s", d.EndWarning());
- PrintStack(stack);
+ stack->Print();
DescribeHeapAddress(addr, 1);
ReportErrorSummary("bad-malloc_usable_size", stack);
}
@@ -644,7 +682,7 @@ void ReportAsanGetAllocatedSizeNotOwned(uptr addr, StackTrace *stack) {
"__asan_get_allocated_size() for pointer which is "
"not owned: %p\n", addr);
Printf("%s", d.EndWarning());
- PrintStack(stack);
+ stack->Print();
DescribeHeapAddress(addr, 1);
ReportErrorSummary("bad-__asan_get_allocated_size", stack);
}
@@ -661,12 +699,81 @@ void ReportStringFunctionMemoryRangesOverlap(
"memory ranges [%p,%p) and [%p, %p) overlap\n", \
bug_type, offset1, offset1 + length1, offset2, offset2 + length2);
Printf("%s", d.EndWarning());
- PrintStack(stack);
+ stack->Print();
DescribeAddress((uptr)offset1, length1);
DescribeAddress((uptr)offset2, length2);
ReportErrorSummary(bug_type, stack);
}
+void ReportStringFunctionSizeOverflow(uptr offset, uptr size,
+ StackTrace *stack) {
+ ScopedInErrorReport in_report;
+ Decorator d;
+ const char *bug_type = "negative-size-param";
+ Printf("%s", d.Warning());
+ Report("ERROR: AddressSanitizer: %s: (size=%zd)\n", bug_type, size);
+ Printf("%s", d.EndWarning());
+ stack->Print();
+ DescribeAddress(offset, size);
+ ReportErrorSummary(bug_type, stack);
+}
+
+void ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end,
+ uptr old_mid, uptr new_mid,
+ StackTrace *stack) {
+ ScopedInErrorReport in_report;
+ Report("ERROR: AddressSanitizer: bad parameters to "
+ "__sanitizer_annotate_contiguous_container:\n"
+ " beg : %p\n"
+ " end : %p\n"
+ " old_mid : %p\n"
+ " new_mid : %p\n",
+ beg, end, old_mid, new_mid);
+ stack->Print();
+ ReportErrorSummary("bad-__sanitizer_annotate_contiguous_container", stack);
+}
+
+void ReportODRViolation(const __asan_global *g1, const __asan_global *g2) {
+ ScopedInErrorReport in_report;
+ Decorator d;
+ Printf("%s", d.Warning());
+ Report("ERROR: AddressSanitizer: odr-violation (%p):\n", g1->beg);
+ Printf("%s", d.EndWarning());
+ Printf(" [1] size=%zd %s %s\n", g1->size, g1->name, g1->module_name);
+ Printf(" [2] size=%zd %s %s\n", g2->size, g2->name, g2->module_name);
+ Report("HINT: if you don't care about these warnings you may set "
+ "ASAN_OPTIONS=detect_odr_violation=0\n");
+ ReportErrorSummary("odr-violation", g1->module_name, 0, g1->name);
+}
+
+// ----------------------- CheckForInvalidPointerPair ----------- {{{1
+static NOINLINE void
+ReportInvalidPointerPair(uptr pc, uptr bp, uptr sp, uptr a1, uptr a2) {
+ ScopedInErrorReport in_report;
+ Decorator d;
+ Printf("%s", d.Warning());
+ Report("ERROR: AddressSanitizer: invalid-pointer-pair: %p %p\n", a1, a2);
+ Printf("%s", d.EndWarning());
+ GET_STACK_TRACE_FATAL(pc, bp);
+ stack.Print();
+ DescribeAddress(a1, 1);
+ DescribeAddress(a2, 1);
+ ReportErrorSummary("invalid-pointer-pair", &stack);
+}
+
+static INLINE void CheckForInvalidPointerPair(void *p1, void *p2) {
+ if (!flags()->detect_invalid_pointer_pairs) return;
+ uptr a1 = reinterpret_cast<uptr>(p1);
+ uptr a2 = reinterpret_cast<uptr>(p2);
+ AsanChunkView chunk1 = FindHeapChunkByAddress(a1);
+ AsanChunkView chunk2 = FindHeapChunkByAddress(a2);
+ bool valid1 = chunk1.IsValid();
+ bool valid2 = chunk2.IsValid();
+ if ((valid1 != valid2) || (valid1 && valid2 && !chunk1.Eq(chunk2))) {
+ GET_CALLER_PC_BP_SP; \
+ return ReportInvalidPointerPair(pc, bp, sp, a1, a2);
+ }
+}
// ----------------------- Mac-specific reports ----------------- {{{1
void WarnMacFreeUnallocated(
@@ -676,7 +783,7 @@ void WarnMacFreeUnallocated(
"AddressSanitizer is ignoring this error on Mac OS now.\n",
addr);
PrintZoneForPointer(addr, zone_ptr, zone_name);
- PrintStack(stack);
+ stack->Print();
DescribeHeapAddress(addr, 1);
}
@@ -687,7 +794,7 @@ void ReportMacMzReallocUnknown(
"This is an unrecoverable problem, exiting now.\n",
addr);
PrintZoneForPointer(addr, zone_ptr, zone_name);
- PrintStack(stack);
+ stack->Print();
DescribeHeapAddress(addr, 1);
}
@@ -698,7 +805,7 @@ void ReportMacCfReallocUnknown(
"This is an unrecoverable problem, exiting now.\n",
addr);
PrintZoneForPointer(addr, zone_ptr, zone_name);
- PrintStack(stack);
+ stack->Print();
DescribeHeapAddress(addr, 1);
}
@@ -707,8 +814,8 @@ void ReportMacCfReallocUnknown(
// --------------------------- Interface --------------------- {{{1
using namespace __asan; // NOLINT
-void __asan_report_error(uptr pc, uptr bp, uptr sp,
- uptr addr, bool is_write, uptr access_size) {
+void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write,
+ uptr access_size) {
ScopedInErrorReport in_report;
// Determine the error type.
@@ -774,7 +881,7 @@ void __asan_report_error(uptr pc, uptr bp, uptr sp,
d.EndAccess());
GET_STACK_TRACE_FATAL(pc, bp);
- PrintStack(&stack);
+ stack.Print();
DescribeAddress(addr, access_size);
ReportErrorSummary(bug_descr, &stack);
@@ -786,7 +893,7 @@ void NOINLINE __asan_set_error_report_callback(void (*callback)(const char*)) {
if (callback) {
error_message_buffer_size = 1 << 16;
error_message_buffer =
- (char*)MmapOrDie(error_message_buffer_size, __FUNCTION__);
+ (char*)MmapOrDie(error_message_buffer_size, __func__);
error_message_buffer_pos = 0;
}
}
@@ -795,6 +902,17 @@ void __asan_describe_address(uptr addr) {
DescribeAddress(addr, 1);
}
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_ptr_sub(void *a, void *b) {
+ CheckForInvalidPointerPair(a, b);
+}
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_ptr_cmp(void *a, void *b) {
+ CheckForInvalidPointerPair(a, b);
+}
+} // extern "C"
+
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
// Provide default implementation of __asan_on_error that does nothing
// and may be overriden by user.
diff --git a/libsanitizer/asan/asan_report.h b/libsanitizer/asan/asan_report.h
index e4c756e557fe..d9a0bca64233 100644
--- a/libsanitizer/asan/asan_report.h
+++ b/libsanitizer/asan/asan_report.h
@@ -30,7 +30,10 @@ void DescribeAddress(uptr addr, uptr access_size);
void DescribeThread(AsanThreadContext *context);
// Different kinds of error reports.
-void NORETURN ReportSIGSEGV(uptr pc, uptr sp, uptr bp, uptr addr);
+void NORETURN
+ ReportStackOverflow(uptr pc, uptr sp, uptr bp, void *context, uptr addr);
+void NORETURN
+ ReportSIGSEGV(uptr pc, uptr sp, uptr bp, void *context, uptr addr);
void NORETURN ReportDoubleFree(uptr addr, StackTrace *free_stack);
void NORETURN ReportFreeNotMalloced(uptr addr, StackTrace *free_stack);
void NORETURN ReportAllocTypeMismatch(uptr addr, StackTrace *free_stack,
@@ -43,6 +46,14 @@ void NORETURN ReportAsanGetAllocatedSizeNotOwned(uptr addr,
void NORETURN ReportStringFunctionMemoryRangesOverlap(
const char *function, const char *offset1, uptr length1,
const char *offset2, uptr length2, StackTrace *stack);
+void NORETURN
+ReportStringFunctionSizeOverflow(uptr offset, uptr size, StackTrace *stack);
+void NORETURN
+ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end, uptr old_mid,
+ uptr new_mid, StackTrace *stack);
+
+void NORETURN
+ReportODRViolation(const __asan_global *g1, const __asan_global *g2);
// Mac-specific errors and warnings.
void WarnMacFreeUnallocated(
diff --git a/libsanitizer/asan/asan_rtl.cc b/libsanitizer/asan/asan_rtl.cc
index 537d40612aa5..00b4b95868e2 100644
--- a/libsanitizer/asan/asan_rtl.cc
+++ b/libsanitizer/asan/asan_rtl.cc
@@ -9,6 +9,7 @@
//
// Main file of the ASan run-time library.
//===----------------------------------------------------------------------===//
+#include "asan_activation.h"
#include "asan_allocator.h"
#include "asan_interceptors.h"
#include "asan_interface_internal.h"
@@ -26,6 +27,7 @@
#include "lsan/lsan_common.h"
int __asan_option_detect_stack_use_after_return; // Global interface symbol.
+uptr *__asan_test_only_reported_buggy_pointer; // Used only for testing asan.
namespace __asan {
@@ -49,7 +51,7 @@ static void AsanDie() {
UnmapOrDie((void*)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg);
}
}
- if (flags()->coverage)
+ if (common_flags()->coverage)
__sanitizer_cov_dump();
if (death_callback)
death_callback();
@@ -60,8 +62,8 @@ static void AsanDie() {
static void AsanCheckFailed(const char *file, int line, const char *cond,
u64 v1, u64 v2) {
- Report("AddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n",
- file, line, cond, (uptr)v1, (uptr)v2);
+ Report("AddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file,
+ line, cond, (uptr)v1, (uptr)v2);
// FIXME: check for infinite recursion without a thread-local counter here.
PRINT_CURRENT_STACK();
Die();
@@ -76,7 +78,7 @@ static const char *MaybeCallAsanDefaultOptions() {
return (&__asan_default_options) ? __asan_default_options() : "";
}
-static const char *MaybeUseAsanDefaultOptionsCompileDefiniton() {
+static const char *MaybeUseAsanDefaultOptionsCompileDefinition() {
#ifdef ASAN_DEFAULT_OPTIONS
// Stringize the macro value.
# define ASAN_STRINGIZE(x) #x
@@ -91,56 +93,154 @@ static void ParseFlagsFromString(Flags *f, const char *str) {
CommonFlags *cf = common_flags();
ParseCommonFlagsFromString(cf, str);
CHECK((uptr)cf->malloc_context_size <= kStackTraceMax);
-
- ParseFlag(str, &f->quarantine_size, "quarantine_size");
- ParseFlag(str, &f->redzone, "redzone");
+ // Please write meaningful flag descriptions when adding new flags.
+ ParseFlag(str, &f->quarantine_size, "quarantine_size",
+ "Size (in bytes) of quarantine used to detect use-after-free "
+ "errors. Lower value may reduce memory usage but increase the "
+ "chance of false negatives.");
+ ParseFlag(str, &f->redzone, "redzone",
+ "Minimal size (in bytes) of redzones around heap objects. "
+ "Requirement: redzone >= 16, is a power of two.");
+ ParseFlag(str, &f->max_redzone, "max_redzone",
+ "Maximal size (in bytes) of redzones around heap objects.");
CHECK_GE(f->redzone, 16);
+ CHECK_GE(f->max_redzone, f->redzone);
+ CHECK_LE(f->max_redzone, 2048);
CHECK(IsPowerOfTwo(f->redzone));
-
- ParseFlag(str, &f->debug, "debug");
- ParseFlag(str, &f->report_globals, "report_globals");
- ParseFlag(str, &f->check_initialization_order, "check_initialization_order");
-
- ParseFlag(str, &f->replace_str, "replace_str");
- ParseFlag(str, &f->replace_intrin, "replace_intrin");
- ParseFlag(str, &f->mac_ignore_invalid_free, "mac_ignore_invalid_free");
+ CHECK(IsPowerOfTwo(f->max_redzone));
+
+ ParseFlag(str, &f->debug, "debug",
+ "If set, prints some debugging information and does additional checks.");
+ ParseFlag(str, &f->report_globals, "report_globals",
+ "Controls the way to handle globals (0 - don't detect buffer overflow on "
+ "globals, 1 - detect buffer overflow, 2 - print data about registered "
+ "globals).");
+
+ ParseFlag(str, &f->check_initialization_order,
+ "check_initialization_order",
+ "If set, attempts to catch initialization order issues.");
+
+ ParseFlag(str, &f->replace_str, "replace_str",
+ "If set, uses custom wrappers and replacements for libc string functions "
+ "to find more errors.");
+
+ ParseFlag(str, &f->replace_intrin, "replace_intrin",
+ "If set, uses custom wrappers for memset/memcpy/memmove intinsics.");
+ ParseFlag(str, &f->mac_ignore_invalid_free, "mac_ignore_invalid_free",
+ "Ignore invalid free() calls to work around some bugs. Used on OS X "
+ "only.");
ParseFlag(str, &f->detect_stack_use_after_return,
- "detect_stack_use_after_return");
- ParseFlag(str, &f->uar_stack_size_log, "uar_stack_size_log");
- ParseFlag(str, &f->max_malloc_fill_size, "max_malloc_fill_size");
- ParseFlag(str, &f->malloc_fill_byte, "malloc_fill_byte");
- ParseFlag(str, &f->exitcode, "exitcode");
- ParseFlag(str, &f->allow_user_poisoning, "allow_user_poisoning");
- ParseFlag(str, &f->sleep_before_dying, "sleep_before_dying");
- ParseFlag(str, &f->handle_segv, "handle_segv");
- ParseFlag(str, &f->allow_user_segv_handler, "allow_user_segv_handler");
- ParseFlag(str, &f->use_sigaltstack, "use_sigaltstack");
- ParseFlag(str, &f->check_malloc_usable_size, "check_malloc_usable_size");
- ParseFlag(str, &f->unmap_shadow_on_exit, "unmap_shadow_on_exit");
- ParseFlag(str, &f->abort_on_error, "abort_on_error");
- ParseFlag(str, &f->print_stats, "print_stats");
- ParseFlag(str, &f->print_legend, "print_legend");
- ParseFlag(str, &f->atexit, "atexit");
- ParseFlag(str, &f->coverage, "coverage");
- ParseFlag(str, &f->disable_core, "disable_core");
- ParseFlag(str, &f->allow_reexec, "allow_reexec");
- ParseFlag(str, &f->print_full_thread_history, "print_full_thread_history");
- ParseFlag(str, &f->poison_heap, "poison_heap");
- ParseFlag(str, &f->poison_partial, "poison_partial");
- ParseFlag(str, &f->alloc_dealloc_mismatch, "alloc_dealloc_mismatch");
- ParseFlag(str, &f->strict_memcmp, "strict_memcmp");
- ParseFlag(str, &f->strict_init_order, "strict_init_order");
+ "detect_stack_use_after_return",
+ "Enables stack-use-after-return checking at run-time.");
+ ParseFlag(str, &f->min_uar_stack_size_log, "min_uar_stack_size_log",
+ "Minimum fake stack size log.");
+ ParseFlag(str, &f->max_uar_stack_size_log, "max_uar_stack_size_log",
+ "Maximum fake stack size log.");
+ ParseFlag(str, &f->uar_noreserve, "uar_noreserve",
+ "Use mmap with 'norserve' flag to allocate fake stack.");
+ ParseFlag(str, &f->max_malloc_fill_size, "max_malloc_fill_size",
+ "ASan allocator flag. max_malloc_fill_size is the maximal amount of "
+ "bytes that will be filled with malloc_fill_byte on malloc.");
+ ParseFlag(str, &f->malloc_fill_byte, "malloc_fill_byte",
+ "Value used to fill the newly allocated memory.");
+ ParseFlag(str, &f->exitcode, "exitcode",
+ "Override the program exit status if the tool found an error.");
+ ParseFlag(str, &f->allow_user_poisoning, "allow_user_poisoning",
+ "If set, user may manually mark memory regions as poisoned or "
+ "unpoisoned.");
+ ParseFlag(str, &f->sleep_before_dying, "sleep_before_dying",
+ "Number of seconds to sleep between printing an error report and "
+ "terminating the program. Useful for debugging purposes (e.g. when one "
+ "needs to attach gdb).");
+
+ ParseFlag(str, &f->check_malloc_usable_size, "check_malloc_usable_size",
+ "Allows the users to work around the bug in Nvidia drivers prior to "
+ "295.*.");
+
+ ParseFlag(str, &f->unmap_shadow_on_exit, "unmap_shadow_on_exit",
+ "If set, explicitly unmaps the (huge) shadow at exit.");
+ ParseFlag(str, &f->abort_on_error, "abort_on_error",
+ "If set, the tool calls abort() instead of _exit() after printing the "
+ "error report.");
+ ParseFlag(str, &f->print_stats, "print_stats",
+ "Print various statistics after printing an error message or if "
+ "atexit=1.");
+ ParseFlag(str, &f->print_legend, "print_legend",
+ "Print the legend for the shadow bytes.");
+ ParseFlag(str, &f->atexit, "atexit",
+ "If set, prints ASan exit stats even after program terminates "
+ "successfully.");
+
+ ParseFlag(str, &f->disable_core, "disable_core",
+ "Disable core dumping. By default, disable_core=1 on 64-bit to avoid "
+ "dumping a 16T+ core file. "
+ "Ignored on OSes that don't dump core by default.");
+
+ ParseFlag(str, &f->allow_reexec, "allow_reexec",
+ "Allow the tool to re-exec the program. This may interfere badly with "
+ "the debugger.");
+
+ ParseFlag(str, &f->print_full_thread_history,
+ "print_full_thread_history",
+ "If set, prints thread creation stacks for the threads involved in the "
+ "report and their ancestors up to the main thread.");
+
+ ParseFlag(str, &f->poison_heap, "poison_heap",
+ "Poison (or not) the heap memory on [de]allocation. Zero value is useful "
+ "for benchmarking the allocator or instrumentator.");
+
+ ParseFlag(str, &f->poison_partial, "poison_partial",
+ "If true, poison partially addressable 8-byte aligned words "
+ "(default=true). This flag affects heap and global buffers, but not "
+ "stack buffers.");
+
+ ParseFlag(str, &f->alloc_dealloc_mismatch, "alloc_dealloc_mismatch",
+ "Report errors on malloc/delete, new/free, new/delete[], etc.");
+ ParseFlag(str, &f->strict_memcmp, "strict_memcmp",
+ "If true, assume that memcmp(p1, p2, n) always reads n bytes before "
+ "comparing p1 and p2.");
+
+ ParseFlag(str, &f->strict_init_order, "strict_init_order",
+ "If true, assume that dynamic initializers can never access globals from "
+ "other modules, even if the latter are already initialized.");
+
+ ParseFlag(str, &f->start_deactivated, "start_deactivated",
+ "If true, ASan tweaks a bunch of other flags (quarantine, redzone, heap "
+ "poisoning) to reduce memory consumption as much as possible, and "
+ "restores them to original values when the first instrumented module is "
+ "loaded into the process. This is mainly intended to be used on "
+ "Android. ");
+
+ ParseFlag(str, &f->detect_invalid_pointer_pairs,
+ "detect_invalid_pointer_pairs",
+ "If non-zero, try to detect operations like <, <=, >, >= and - on "
+ "invalid pointer pairs (e.g. when pointers belong to different objects). "
+ "The bigger the value the harder we try.");
+
+ ParseFlag(str, &f->detect_container_overflow,
+ "detect_container_overflow",
+ "If true, honor the container overflow annotations. "
+ "See https://code.google.com/p/address-sanitizer/wiki/ContainerOverflow");
+
+ ParseFlag(str, &f->detect_odr_violation, "detect_odr_violation",
+ "If >=2, detect violation of One-Definition-Rule (ODR); "
+ "If ==1, detect ODR-violation only if the two variables "
+ "have different sizes");
}
void InitializeFlags(Flags *f, const char *env) {
CommonFlags *cf = common_flags();
SetCommonFlagsDefaults(cf);
+ cf->detect_leaks = CAN_SANITIZE_LEAKS;
cf->external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH");
cf->malloc_context_size = kDefaultMallocContextSize;
+ cf->intercept_tls_get_addr = true;
+ cf->coverage = false;
internal_memset(f, 0, sizeof(*f));
f->quarantine_size = (ASAN_LOW_MEMORY) ? 1UL << 26 : 1UL << 28;
f->redzone = 16;
+ f->max_redzone = 2048;
f->debug = false;
f->report_globals = 1;
f->check_initialization_order = false;
@@ -148,53 +248,55 @@ void InitializeFlags(Flags *f, const char *env) {
f->replace_intrin = true;
f->mac_ignore_invalid_free = false;
f->detect_stack_use_after_return = false; // Also needs the compiler flag.
- f->uar_stack_size_log = 0;
+ f->min_uar_stack_size_log = 16; // We can't do smaller anyway.
+ f->max_uar_stack_size_log = 20; // 1Mb per size class, i.e. ~11Mb per thread.
+ f->uar_noreserve = false;
f->max_malloc_fill_size = 0x1000; // By default, fill only the first 4K.
f->malloc_fill_byte = 0xbe;
f->exitcode = ASAN_DEFAULT_FAILURE_EXITCODE;
f->allow_user_poisoning = true;
f->sleep_before_dying = 0;
- f->handle_segv = ASAN_NEEDS_SEGV;
- f->allow_user_segv_handler = false;
- f->use_sigaltstack = false;
f->check_malloc_usable_size = true;
f->unmap_shadow_on_exit = false;
f->abort_on_error = false;
f->print_stats = false;
f->print_legend = true;
f->atexit = false;
- f->coverage = false;
f->disable_core = (SANITIZER_WORDSIZE == 64);
f->allow_reexec = true;
f->print_full_thread_history = true;
f->poison_heap = true;
f->poison_partial = true;
// Turn off alloc/dealloc mismatch checker on Mac and Windows for now.
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=131
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=309
// TODO(glider,timurrrr): Fix known issues and enable this back.
f->alloc_dealloc_mismatch = (SANITIZER_MAC == 0) && (SANITIZER_WINDOWS == 0);
f->strict_memcmp = true;
f->strict_init_order = false;
+ f->start_deactivated = false;
+ f->detect_invalid_pointer_pairs = 0;
+ f->detect_container_overflow = true;
// Override from compile definition.
- ParseFlagsFromString(f, MaybeUseAsanDefaultOptionsCompileDefiniton());
+ ParseFlagsFromString(f, MaybeUseAsanDefaultOptionsCompileDefinition());
// Override from user-specified string.
ParseFlagsFromString(f, MaybeCallAsanDefaultOptions());
- if (cf->verbosity) {
- Report("Using the defaults from __asan_default_options: %s\n",
- MaybeCallAsanDefaultOptions());
- }
+ VReport(1, "Using the defaults from __asan_default_options: %s\n",
+ MaybeCallAsanDefaultOptions());
// Override from command line.
ParseFlagsFromString(f, env);
+ if (common_flags()->help) {
+ PrintFlagDescriptions();
+ }
-#if !CAN_SANITIZE_LEAKS
- if (cf->detect_leaks) {
+ if (!CAN_SANITIZE_LEAKS && cf->detect_leaks) {
Report("%s: detect_leaks is not supported on this platform.\n",
SanitizerToolName);
cf->detect_leaks = false;
}
-#endif
// Make "strict_init_order" imply "check_initialization_order".
// TODO(samsonov): Use a single runtime flag for an init-order checker.
@@ -203,6 +305,17 @@ void InitializeFlags(Flags *f, const char *env) {
}
}
+// Parse flags that may change between startup and activation.
+// On Android they come from a system property.
+// On other platforms this is no-op.
+void ParseExtraActivationFlags() {
+ char buf[100];
+ GetExtraActivationFlags(buf, sizeof(buf));
+ ParseFlagsFromString(flags(), buf);
+ if (buf[0] != '\0')
+ VReport(1, "Extra activation flags: %s\n", buf);
+}
+
// -------------------------- Globals --------------------- {{{1
int asan_inited;
bool asan_init_is_running;
@@ -224,6 +337,7 @@ static void ReserveShadowMemoryRange(uptr beg, uptr end) {
CHECK_EQ((beg % GetPageSizeCached()), 0);
CHECK_EQ(((end + 1) % GetPageSizeCached()), 0);
uptr size = end - beg + 1;
+ DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
void *res = MmapFixedNoReserve(beg, size);
if (res != (void*)beg) {
Report("ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. "
@@ -269,6 +383,53 @@ void __asan_report_ ## type ## _n(uptr addr, uptr size) { \
ASAN_REPORT_ERROR_N(load, false)
ASAN_REPORT_ERROR_N(store, true)
+#define ASAN_MEMORY_ACCESS_CALLBACK(type, is_write, size) \
+ extern "C" NOINLINE INTERFACE_ATTRIBUTE void __asan_##type##size(uptr addr); \
+ void __asan_##type##size(uptr addr) { \
+ uptr sp = MEM_TO_SHADOW(addr); \
+ uptr s = size <= SHADOW_GRANULARITY ? *reinterpret_cast<u8 *>(sp) \
+ : *reinterpret_cast<u16 *>(sp); \
+ if (UNLIKELY(s)) { \
+ if (UNLIKELY(size >= SHADOW_GRANULARITY || \
+ ((s8)((addr & (SHADOW_GRANULARITY - 1)) + size - 1)) >= \
+ (s8)s)) { \
+ if (__asan_test_only_reported_buggy_pointer) { \
+ *__asan_test_only_reported_buggy_pointer = addr; \
+ } else { \
+ GET_CALLER_PC_BP_SP; \
+ __asan_report_error(pc, bp, sp, addr, is_write, size); \
+ } \
+ } \
+ } \
+ }
+
+ASAN_MEMORY_ACCESS_CALLBACK(load, false, 1)
+ASAN_MEMORY_ACCESS_CALLBACK(load, false, 2)
+ASAN_MEMORY_ACCESS_CALLBACK(load, false, 4)
+ASAN_MEMORY_ACCESS_CALLBACK(load, false, 8)
+ASAN_MEMORY_ACCESS_CALLBACK(load, false, 16)
+ASAN_MEMORY_ACCESS_CALLBACK(store, true, 1)
+ASAN_MEMORY_ACCESS_CALLBACK(store, true, 2)
+ASAN_MEMORY_ACCESS_CALLBACK(store, true, 4)
+ASAN_MEMORY_ACCESS_CALLBACK(store, true, 8)
+ASAN_MEMORY_ACCESS_CALLBACK(store, true, 16)
+
+extern "C"
+NOINLINE INTERFACE_ATTRIBUTE void __asan_loadN(uptr addr, uptr size) {
+ if (__asan_region_is_poisoned(addr, size)) {
+ GET_CALLER_PC_BP_SP;
+ __asan_report_error(pc, bp, sp, addr, false, size);
+ }
+}
+
+extern "C"
+NOINLINE INTERFACE_ATTRIBUTE void __asan_storeN(uptr addr, uptr size) {
+ if (__asan_region_is_poisoned(addr, size)) {
+ GET_CALLER_PC_BP_SP;
+ __asan_report_error(pc, bp, sp, addr, true, size);
+ }
+}
+
// Force the linker to keep the symbols for various ASan interface functions.
// We want to keep those in the executable in order to let the instrumented
// dynamic libraries access the symbol even if it is not used by the executable
@@ -372,7 +533,8 @@ static void PrintAddressSpaceLayout() {
(void*)MEM_TO_SHADOW(kMidShadowEnd));
}
Printf("\n");
- Printf("red_zone=%zu\n", (uptr)flags()->redzone);
+ Printf("redzone=%zu\n", (uptr)flags()->redzone);
+ Printf("max_redzone=%zu\n", (uptr)flags()->max_redzone);
Printf("quarantine_size=%zuM\n", (uptr)flags()->quarantine_size >> 20);
Printf("malloc_context_size=%zu\n",
(uptr)common_flags()->malloc_context_size);
@@ -387,59 +549,17 @@ static void PrintAddressSpaceLayout() {
kHighShadowBeg > kMidMemEnd);
}
-} // namespace __asan
-
-// ---------------------- Interface ---------------- {{{1
-using namespace __asan; // NOLINT
-
-#if !SANITIZER_SUPPORTS_WEAK_HOOKS
-extern "C" {
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-const char* __asan_default_options() { return ""; }
-} // extern "C"
-#endif
-
-int NOINLINE __asan_set_error_exit_code(int exit_code) {
- int old = flags()->exitcode;
- flags()->exitcode = exit_code;
- return old;
-}
-
-void NOINLINE __asan_handle_no_return() {
- int local_stack;
- AsanThread *curr_thread = GetCurrentThread();
- CHECK(curr_thread);
- uptr PageSize = GetPageSizeCached();
- uptr top = curr_thread->stack_top();
- uptr bottom = ((uptr)&local_stack - PageSize) & ~(PageSize-1);
- static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M
- if (top - bottom > kMaxExpectedCleanupSize) {
- static bool reported_warning = false;
- if (reported_warning)
- return;
- reported_warning = true;
- Report("WARNING: ASan is ignoring requested __asan_handle_no_return: "
- "stack top: %p; bottom %p; size: %p (%zd)\n"
- "False positive error reports may follow\n"
- "For details see "
- "http://code.google.com/p/address-sanitizer/issues/detail?id=189\n",
- top, bottom, top - bottom, top - bottom);
- return;
- }
- PoisonShadow(bottom, top - bottom, 0);
- if (curr_thread->has_fake_stack())
- curr_thread->fake_stack()->HandleNoReturn();
-}
-
-void NOINLINE __asan_set_death_callback(void (*callback)(void)) {
- death_callback = callback;
-}
-
-void __asan_init() {
- if (asan_inited) return;
+static void AsanInitInternal() {
+ if (LIKELY(asan_inited)) return;
SanitizerToolName = "AddressSanitizer";
CHECK(!asan_init_is_running && "ASan init calls itself!");
asan_init_is_running = true;
+
+ // Initialize flags. This must be done early, because most of the
+ // initialization steps look at flags().
+ const char *options = GetEnv("ASAN_OPTIONS");
+ InitializeFlags(flags(), options);
+
InitializeHighMemEnd();
// Make sure we are not statically linked.
@@ -450,18 +570,21 @@ void __asan_init() {
SetCheckFailedCallback(AsanCheckFailed);
SetPrintfAndReportCallback(AppendToErrorMessageBuffer);
- // Initialize flags. This must be done early, because most of the
- // initialization steps look at flags().
- const char *options = GetEnv("ASAN_OPTIONS");
- InitializeFlags(flags(), options);
+ if (!flags()->start_deactivated)
+ ParseExtraActivationFlags();
+
__sanitizer_set_report_path(common_flags()->log_path);
__asan_option_detect_stack_use_after_return =
flags()->detect_stack_use_after_return;
+ CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log);
- if (common_flags()->verbosity && options) {
- Report("Parsed ASAN_OPTIONS: %s\n", options);
+ if (options) {
+ VReport(1, "Parsed ASAN_OPTIONS: %s\n", options);
}
+ if (flags()->start_deactivated)
+ AsanStartDeactivated();
+
// Re-exec ourselves if we need to set additional env or command line args.
MaybeReexec();
@@ -471,7 +594,6 @@ void __asan_init() {
InitializeAsanInterceptors();
ReplaceSystemMalloc();
- ReplaceOperatorsNewAndDelete();
uptr shadow_start = kLowShadowBeg;
if (kLowShadowBeg)
@@ -501,6 +623,7 @@ void __asan_init() {
ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd);
// protect the gap.
ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
+ CHECK_EQ(kShadowGapEnd, kHighShadowBeg - 1);
} else if (kMidMemBeg &&
MemoryRangeIsAvailable(shadow_start, kMidMemBeg - 1) &&
MemoryRangeIsAvailable(kMidMemEnd + 1, kHighShadowEnd)) {
@@ -523,18 +646,13 @@ void __asan_init() {
}
AsanTSDInit(PlatformTSDDtor);
- InstallSignalHandlers();
+ InstallDeadlySignalHandlers(AsanOnSIGSEGV);
// Allocator should be initialized before starting external symbolizer, as
// fork() on Mac locks the allocator.
InitializeAllocator();
- // Start symbolizer process if necessary.
- if (common_flags()->symbolize) {
- Symbolizer::Init(common_flags()->external_symbolizer_path);
- } else {
- Symbolizer::Disable();
- }
+ Symbolizer::Init(common_flags()->external_symbolizer_path);
// On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited
// should be set to 1 prior to initializing the threads.
@@ -544,8 +662,10 @@ void __asan_init() {
if (flags()->atexit)
Atexit(asan_atexit);
- if (flags()->coverage)
+ if (common_flags()->coverage) {
+ __sanitizer_cov_init();
Atexit(__sanitizer_cov_dump);
+ }
// interceptors
InitTlsSize();
@@ -559,6 +679,7 @@ void __asan_init() {
SetCurrentThread(main_thread);
main_thread->ThreadStart(internal_getpid());
force_interface_symbols(); // no-op.
+ SanitizerInitializeUnwinder();
#if CAN_SANITIZE_LEAKS
__lsan::InitCommonLsan();
@@ -567,7 +688,84 @@ void __asan_init() {
}
#endif // CAN_SANITIZE_LEAKS
- if (common_flags()->verbosity) {
- Report("AddressSanitizer Init done\n");
+ VReport(1, "AddressSanitizer Init done\n");
+}
+
+// Initialize as requested from some part of ASan runtime library (interceptors,
+// allocator, etc).
+void AsanInitFromRtl() {
+ AsanInitInternal();
+}
+
+#if ASAN_DYNAMIC
+// Initialize runtime in case it's LD_PRELOAD-ed into unsanitized executable
+// (and thus normal initializer from .preinit_array haven't run).
+
+class AsanInitializer {
+public: // NOLINT
+ AsanInitializer() {
+ AsanCheckIncompatibleRT();
+ AsanCheckDynamicRTPrereqs();
+ if (UNLIKELY(!asan_inited))
+ __asan_init();
}
+};
+
+static AsanInitializer asan_initializer;
+#endif // ASAN_DYNAMIC
+
+} // namespace __asan
+
+// ---------------------- Interface ---------------- {{{1
+using namespace __asan; // NOLINT
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char* __asan_default_options() { return ""; }
+} // extern "C"
+#endif
+
+int NOINLINE __asan_set_error_exit_code(int exit_code) {
+ int old = flags()->exitcode;
+ flags()->exitcode = exit_code;
+ return old;
+}
+
+void NOINLINE __asan_handle_no_return() {
+ int local_stack;
+ AsanThread *curr_thread = GetCurrentThread();
+ CHECK(curr_thread);
+ uptr PageSize = GetPageSizeCached();
+ uptr top = curr_thread->stack_top();
+ uptr bottom = ((uptr)&local_stack - PageSize) & ~(PageSize-1);
+ static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M
+ if (top - bottom > kMaxExpectedCleanupSize) {
+ static bool reported_warning = false;
+ if (reported_warning)
+ return;
+ reported_warning = true;
+ Report("WARNING: ASan is ignoring requested __asan_handle_no_return: "
+ "stack top: %p; bottom %p; size: %p (%zd)\n"
+ "False positive error reports may follow\n"
+ "For details see "
+ "http://code.google.com/p/address-sanitizer/issues/detail?id=189\n",
+ top, bottom, top - bottom, top - bottom);
+ return;
+ }
+ PoisonShadow(bottom, top - bottom, 0);
+ if (curr_thread->has_fake_stack())
+ curr_thread->fake_stack()->HandleNoReturn();
+}
+
+void NOINLINE __asan_set_death_callback(void (*callback)(void)) {
+ death_callback = callback;
+}
+
+// Initialize as requested from instrumented application code.
+// We use this call as a trigger to wake up ASan from deactivated state.
+void __asan_init() {
+ AsanCheckIncompatibleRT();
+ AsanActivate();
+ AsanInitInternal();
}
diff --git a/libsanitizer/asan/asan_stack.cc b/libsanitizer/asan/asan_stack.cc
index 24cccbd196e3..96178e8247b6 100644
--- a/libsanitizer/asan/asan_stack.cc
+++ b/libsanitizer/asan/asan_stack.cc
@@ -10,40 +10,10 @@
// Code for ASan stack trace.
//===----------------------------------------------------------------------===//
#include "asan_internal.h"
-#include "asan_flags.h"
#include "asan_stack.h"
-#include "sanitizer_common/sanitizer_flags.h"
-
-namespace __asan {
-
-static bool MaybeCallAsanSymbolize(const void *pc, char *out_buffer,
- int out_size) {
- return (&__asan_symbolize) ? __asan_symbolize(pc, out_buffer, out_size)
- : false;
-}
-
-void PrintStack(const uptr *trace, uptr size) {
- StackTrace::PrintStack(trace, size, MaybeCallAsanSymbolize);
-}
-
-void PrintStack(StackTrace *stack) {
- PrintStack(stack->trace, stack->size);
-}
-
-} // namespace __asan
// ------------------ Interface -------------- {{{1
-// Provide default implementation of __asan_symbolize that does nothing
-// and may be overriden by user if he wants to use his own symbolization.
-// ASan on Windows has its own implementation of this.
-#if !SANITIZER_WINDOWS && !SANITIZER_SUPPORTS_WEAK_HOOKS
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE NOINLINE
-bool __asan_symbolize(const void *pc, char *out_buffer, int out_size) {
- return false;
-}
-#endif
-
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_print_stack_trace() {
diff --git a/libsanitizer/asan/asan_stack.h b/libsanitizer/asan/asan_stack.h
index df7a9805f92f..d31c0afa83bc 100644
--- a/libsanitizer/asan/asan_stack.h
+++ b/libsanitizer/asan/asan_stack.h
@@ -19,49 +19,62 @@
namespace __asan {
-void PrintStack(StackTrace *stack);
-void PrintStack(const uptr *trace, uptr size);
-
-} // namespace __asan
-
// Get the stack trace with the given pc and bp.
// The pc will be in the position 0 of the resulting stack trace.
// The bp may refer to the current frame or to the caller's frame.
+ALWAYS_INLINE
+void GetStackTraceWithPcBpAndContext(StackTrace *stack, uptr max_depth, uptr pc,
+ uptr bp, void *context, bool fast) {
#if SANITIZER_WINDOWS
-#define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, pc, bp, fast) \
- StackTrace stack; \
- stack.Unwind(max_s, pc, bp, 0, 0, fast)
+ stack->Unwind(max_depth, pc, bp, context, 0, 0, fast);
#else
-#define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, pc, bp, fast) \
- StackTrace stack; \
- { \
- AsanThread *t; \
- stack.size = 0; \
- if (asan_inited) { \
- if ((t = GetCurrentThread()) && !t->isUnwinding()) { \
- uptr stack_top = t->stack_top(); \
- uptr stack_bottom = t->stack_bottom(); \
- ScopedUnwinding unwind_scope(t); \
- stack.Unwind(max_s, pc, bp, stack_top, stack_bottom, fast); \
- } else if (t == 0 && !fast) { \
- /* If GetCurrentThread() has failed, try to do slow unwind anyways. */ \
- stack.Unwind(max_s, pc, bp, 0, 0, false); \
- } \
- } \
+ AsanThread *t;
+ stack->size = 0;
+ if (LIKELY(asan_inited)) {
+ if ((t = GetCurrentThread()) && !t->isUnwinding()) {
+ uptr stack_top = t->stack_top();
+ uptr stack_bottom = t->stack_bottom();
+ ScopedUnwinding unwind_scope(t);
+ stack->Unwind(max_depth, pc, bp, context, stack_top, stack_bottom, fast);
+ } else if (t == 0 && !fast) {
+ /* If GetCurrentThread() has failed, try to do slow unwind anyways. */
+ stack->Unwind(max_depth, pc, bp, context, 0, 0, false);
+ }
}
#endif // SANITIZER_WINDOWS
+}
+
+} // namespace __asan
// NOTE: A Rule of thumb is to retrieve stack trace in the interceptors
// as early as possible (in functions exposed to the user), as we generally
// don't want stack trace to contain functions from ASan internals.
-#define GET_STACK_TRACE(max_size, fast) \
- GET_STACK_TRACE_WITH_PC_AND_BP(max_size, \
- StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), fast)
+#define GET_STACK_TRACE(max_size, fast) \
+ StackTrace stack; \
+ if (max_size <= 2) { \
+ stack.size = max_size; \
+ if (max_size > 0) { \
+ stack.top_frame_bp = GET_CURRENT_FRAME(); \
+ stack.trace[0] = StackTrace::GetCurrentPc(); \
+ if (max_size > 1) \
+ stack.trace[1] = GET_CALLER_PC(); \
+ } \
+ } else { \
+ GetStackTraceWithPcBpAndContext(&stack, max_size, \
+ StackTrace::GetCurrentPc(), \
+ GET_CURRENT_FRAME(), 0, fast); \
+ }
-#define GET_STACK_TRACE_FATAL(pc, bp) \
- GET_STACK_TRACE_WITH_PC_AND_BP(kStackTraceMax, pc, bp, \
- common_flags()->fast_unwind_on_fatal)
+#define GET_STACK_TRACE_FATAL(pc, bp) \
+ StackTrace stack; \
+ GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, 0, \
+ common_flags()->fast_unwind_on_fatal)
+
+#define GET_STACK_TRACE_SIGNAL(pc, bp, context) \
+ StackTrace stack; \
+ GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, context, \
+ common_flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_FATAL_HERE \
GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
@@ -78,7 +91,7 @@ void PrintStack(const uptr *trace, uptr size);
#define PRINT_CURRENT_STACK() \
{ \
GET_STACK_TRACE_FATAL_HERE; \
- PrintStack(&stack); \
+ stack.Print(); \
}
#endif // ASAN_STACK_H
diff --git a/libsanitizer/asan/asan_thread.cc b/libsanitizer/asan/asan_thread.cc
index 5a9c2dddffb8..df85858a67bf 100644
--- a/libsanitizer/asan/asan_thread.cc
+++ b/libsanitizer/asan/asan_thread.cc
@@ -18,6 +18,7 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "lsan/lsan_common.h"
namespace __asan {
@@ -76,7 +77,7 @@ AsanThread *AsanThread::Create(thread_callback_t start_routine,
void *arg) {
uptr PageSize = GetPageSizeCached();
uptr size = RoundUpTo(sizeof(AsanThread), PageSize);
- AsanThread *thread = (AsanThread*)MmapOrDie(size, __FUNCTION__);
+ AsanThread *thread = (AsanThread*)MmapOrDie(size, __func__);
thread->start_routine_ = start_routine;
thread->arg_ = arg;
@@ -85,28 +86,27 @@ AsanThread *AsanThread::Create(thread_callback_t start_routine,
void AsanThread::TSDDtor(void *tsd) {
AsanThreadContext *context = (AsanThreadContext*)tsd;
- if (common_flags()->verbosity >= 1)
- Report("T%d TSDDtor\n", context->tid);
+ VReport(1, "T%d TSDDtor\n", context->tid);
if (context->thread)
context->thread->Destroy();
}
void AsanThread::Destroy() {
- if (common_flags()->verbosity >= 1) {
- Report("T%d exited\n", tid());
- }
+ int tid = this->tid();
+ VReport(1, "T%d exited\n", tid);
malloc_storage().CommitBack();
- if (flags()->use_sigaltstack) UnsetAlternateSignalStack();
- asanThreadRegistry().FinishThread(tid());
+ if (common_flags()->use_sigaltstack) UnsetAlternateSignalStack();
+ asanThreadRegistry().FinishThread(tid);
FlushToDeadThreadStats(&stats_);
// We also clear the shadow on thread destruction because
// some code may still be executing in later TSD destructors
// and we don't want it to have any poisoned stack.
ClearShadowForThreadStackAndTLS();
- DeleteFakeStack();
+ DeleteFakeStack(tid);
uptr size = RoundUpTo(sizeof(AsanThread), GetPageSizeCached());
UnmapOrDie(this, size);
+ DTLS_Destroy();
}
// We want to create the FakeStack lazyly on the first use, but not eralier
@@ -121,13 +121,16 @@ FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
// 1 -- being initialized
// ptr -- initialized
// This CAS checks if the state was 0 and if so changes it to state 1,
- // if that was successfull, it initilizes the pointer.
+ // if that was successful, it initializes the pointer.
if (atomic_compare_exchange_strong(
reinterpret_cast<atomic_uintptr_t *>(&fake_stack_), &old_val, 1UL,
memory_order_relaxed)) {
uptr stack_size_log = Log2(RoundUpToPowerOfTwo(stack_size));
- if (flags()->uar_stack_size_log)
- stack_size_log = static_cast<uptr>(flags()->uar_stack_size_log);
+ CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log);
+ stack_size_log =
+ Min(stack_size_log, static_cast<uptr>(flags()->max_uar_stack_size_log));
+ stack_size_log =
+ Max(stack_size_log, static_cast<uptr>(flags()->min_uar_stack_size_log));
fake_stack_ = FakeStack::Create(stack_size_log);
SetTLSFakeStack(fake_stack_);
return fake_stack_;
@@ -140,12 +143,10 @@ void AsanThread::Init() {
CHECK(AddrIsInMem(stack_bottom_));
CHECK(AddrIsInMem(stack_top_ - 1));
ClearShadowForThreadStackAndTLS();
- if (common_flags()->verbosity >= 1) {
- int local = 0;
- Report("T%d: stack [%p,%p) size 0x%zx; local=%p\n",
- tid(), (void*)stack_bottom_, (void*)stack_top_,
- stack_top_ - stack_bottom_, &local);
- }
+ int local = 0;
+ VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(),
+ (void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_,
+ &local);
fake_stack_ = 0; // Will be initialized lazily if needed.
AsanPlatformThreadInit();
}
@@ -153,7 +154,7 @@ void AsanThread::Init() {
thread_return_t AsanThread::ThreadStart(uptr os_id) {
Init();
asanThreadRegistry().StartThread(tid(), os_id, 0);
- if (flags()->use_sigaltstack) SetAlternateSignalStack();
+ if (common_flags()->use_sigaltstack) SetAlternateSignalStack();
if (!start_routine_) {
// start_routine_ == 0 if we're on the main thread or on one of the
@@ -265,10 +266,8 @@ AsanThread *GetCurrentThread() {
void SetCurrentThread(AsanThread *t) {
CHECK(t->context());
- if (common_flags()->verbosity >= 2) {
- Report("SetCurrentThread: %p for thread %p\n",
- t->context(), (void*)GetThreadSelf());
- }
+ VReport(2, "SetCurrentThread: %p for thread %p\n", t->context(),
+ (void *)GetThreadSelf());
// Make sure we do not reset the current AsanThread.
CHECK_EQ(0, AsanTSDGet());
AsanTSDSet(t->context());
diff --git a/libsanitizer/asan/asan_thread.h b/libsanitizer/asan/asan_thread.h
index 5a917fa9a3da..33242efabaa2 100644
--- a/libsanitizer/asan/asan_thread.h
+++ b/libsanitizer/asan/asan_thread.h
@@ -15,7 +15,6 @@
#include "asan_allocator.h"
#include "asan_internal.h"
#include "asan_fake_stack.h"
-#include "asan_stack.h"
#include "asan_stats.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
@@ -76,12 +75,12 @@ class AsanThread {
return addr >= stack_bottom_ && addr < stack_top_;
}
- void DeleteFakeStack() {
+ void DeleteFakeStack(int tid) {
if (!fake_stack_) return;
FakeStack *t = fake_stack_;
fake_stack_ = 0;
SetTLSFakeStack(0);
- t->Destroy();
+ t->Destroy(tid);
}
bool has_fake_stack() {
diff --git a/libsanitizer/asan/asan_win.cc b/libsanitizer/asan/asan_win.cc
index 8ffa58faa371..03d45e3839ba 100644
--- a/libsanitizer/asan/asan_win.cc
+++ b/libsanitizer/asan/asan_win.cc
@@ -33,11 +33,6 @@ extern "C" {
namespace __asan {
-// ---------------------- Stacktraces, symbols, etc. ---------------- {{{1
-static BlockingMutex dbghelp_lock(LINKER_INITIALIZED);
-static bool dbghelp_initialized = false;
-#pragma comment(lib, "dbghelp.lib")
-
// ---------------------- TSD ---------------- {{{1
static bool tsd_key_inited = false;
@@ -73,17 +68,9 @@ void *AsanDoesNotSupportStaticLinkage() {
return 0;
}
-void SetAlternateSignalStack() {
- // FIXME: Decide what to do on Windows.
-}
+void AsanCheckDynamicRTPrereqs() { UNIMPLEMENTED(); }
-void UnsetAlternateSignalStack() {
- // FIXME: Decide what to do on Windows.
-}
-
-void InstallSignalHandlers() {
- // FIXME: Decide what to do on Windows.
-}
+void AsanCheckIncompatibleRT() {}
void AsanPlatformThreadInit() {
// Nothing here for now.
@@ -93,54 +80,10 @@ void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
UNIMPLEMENTED();
}
-} // namespace __asan
-
-// ---------------------- Interface ---------------- {{{1
-using namespace __asan; // NOLINT
-
-extern "C" {
-SANITIZER_INTERFACE_ATTRIBUTE NOINLINE
-bool __asan_symbolize(const void *addr, char *out_buffer, int buffer_size) {
- BlockingMutexLock lock(&dbghelp_lock);
- if (!dbghelp_initialized) {
- SymSetOptions(SYMOPT_DEFERRED_LOADS |
- SYMOPT_UNDNAME |
- SYMOPT_LOAD_LINES);
- CHECK(SymInitialize(GetCurrentProcess(), 0, TRUE));
- // FIXME: We don't call SymCleanup() on exit yet - should we?
- dbghelp_initialized = true;
- }
-
- // See http://msdn.microsoft.com/en-us/library/ms680578(VS.85).aspx
- char buffer[sizeof(SYMBOL_INFO) + MAX_SYM_NAME * sizeof(CHAR)];
- PSYMBOL_INFO symbol = (PSYMBOL_INFO)buffer;
- symbol->SizeOfStruct = sizeof(SYMBOL_INFO);
- symbol->MaxNameLen = MAX_SYM_NAME;
- DWORD64 offset = 0;
- BOOL got_objname = SymFromAddr(GetCurrentProcess(),
- (DWORD64)addr, &offset, symbol);
- if (!got_objname)
- return false;
-
- DWORD unused;
- IMAGEHLP_LINE64 info;
- info.SizeOfStruct = sizeof(IMAGEHLP_LINE64);
- BOOL got_fileline = SymGetLineFromAddr64(GetCurrentProcess(),
- (DWORD64)addr, &unused, &info);
- int written = 0;
- out_buffer[0] = '\0';
- // FIXME: it might be useful to print out 'obj' or 'obj+offset' info too.
- if (got_fileline) {
- written += internal_snprintf(out_buffer + written, buffer_size - written,
- " %s %s:%d", symbol->Name,
- info.FileName, info.LineNumber);
- } else {
- written += internal_snprintf(out_buffer + written, buffer_size - written,
- " %s+0x%p", symbol->Name, offset);
- }
- return true;
+void AsanOnSIGSEGV(int, void *siginfo, void *context) {
+ UNIMPLEMENTED();
}
-} // extern "C"
+} // namespace __asan
#endif // _WIN32
diff --git a/libsanitizer/include/sanitizer/asan_interface.h b/libsanitizer/include/sanitizer/asan_interface.h
index 0016339e4864..bf4c47895362 100644
--- a/libsanitizer/include/sanitizer/asan_interface.h
+++ b/libsanitizer/include/sanitizer/asan_interface.h
@@ -48,9 +48,10 @@ extern "C" {
((void)(addr), (void)(size))
#endif
- // Returns true iff addr is poisoned (i.e. 1-byte read/write access to this
+ // Returns 1 if addr is poisoned (i.e. 1-byte read/write access to this
// address will result in error report from AddressSanitizer).
- bool __asan_address_is_poisoned(void const volatile *addr);
+ // Otherwise returns 0.
+ int __asan_address_is_poisoned(void const volatile *addr);
// If at least on byte in [beg, beg+size) is poisoned, return the address
// of the first such byte. Otherwise return 0.
@@ -63,7 +64,7 @@ extern "C" {
// However it is still a part of the interface because users may want to
// set a breakpoint on this function in a debugger.
void __asan_report_error(void *pc, void *bp, void *sp,
- void *addr, bool is_write, size_t access_size);
+ void *addr, int is_write, size_t access_size);
// Sets the exit code to use when reporting an error.
// Returns the old value.
@@ -80,22 +81,14 @@ extern "C" {
// the program crashes before ASan report is printed.
void __asan_on_error();
- // User may provide its own implementation for symbolization function.
- // It should print the description of instruction at address "pc" to
- // "out_buffer". Description should be at most "out_size" bytes long.
- // User-specified function should return true if symbolization was
- // successful.
- bool __asan_symbolize(const void *pc, char *out_buffer,
- int out_size);
-
// Returns the estimated number of bytes that will be reserved by allocator
// for request of "size" bytes. If ASan allocator can't allocate that much
// memory, returns the maximal possible allocation size, otherwise returns
// "size".
size_t __asan_get_estimated_allocated_size(size_t size);
- // Returns true if p was returned by the ASan allocator and
- // is not yet freed.
- bool __asan_get_ownership(const void *p);
+ // Returns 1 if p was returned by the ASan allocator and is not yet freed.
+ // Otherwise returns 0.
+ int __asan_get_ownership(const void *p);
// Returns the number of bytes reserved for the pointer p.
// Requires (get_ownership(p) == true) or (p == 0).
size_t __asan_get_allocated_size(const void *p);
@@ -128,6 +121,24 @@ extern "C" {
// deallocation of "ptr".
void __asan_malloc_hook(void *ptr, size_t size);
void __asan_free_hook(void *ptr);
+
+ // The following 2 functions facilitate garbage collection in presence of
+ // asan's fake stack.
+
+ // Returns an opaque handler to be used later in __asan_addr_is_in_fake_stack.
+ // Returns NULL if the current thread does not have a fake stack.
+ void *__asan_get_current_fake_stack();
+
+ // If fake_stack is non-NULL and addr belongs to a fake frame in
+ // fake_stack, returns the address on real stack that corresponds to
+ // the fake frame and sets beg/end to the boundaries of this fake frame.
+ // Otherwise returns NULL and does not touch beg/end.
+ // If beg/end are NULL, they are not touched.
+ // This function may be called from a thread other than the owner of
+ // fake_stack, but the owner thread need to be alive.
+ void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
+ void **end);
+
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/libsanitizer/include/sanitizer/common_interface_defs.h b/libsanitizer/include/sanitizer/common_interface_defs.h
index db8b3b543e3d..44870a039b4f 100644
--- a/libsanitizer/include/sanitizer/common_interface_defs.h
+++ b/libsanitizer/include/sanitizer/common_interface_defs.h
@@ -22,13 +22,28 @@
#ifdef __cplusplus
extern "C" {
#endif
+ // Arguments for __sanitizer_sandbox_on_notify() below.
+ typedef struct {
+ // Enable sandbox support in sanitizer coverage.
+ int coverage_sandboxed;
+ // File descriptor to write coverage data to. If -1 is passed, a file will
+ // be pre-opened by __sanitizer_sandobx_on_notify(). This field has no
+ // effect if coverage_sandboxed == 0.
+ intptr_t coverage_fd;
+ // If non-zero, split the coverage data into well-formed blocks. This is
+ // useful when coverage_fd is a socket descriptor. Each block will contain
+ // a header, allowing data from multiple processes to be sent over the same
+ // socket.
+ unsigned int coverage_max_block_size;
+ } __sanitizer_sandbox_arguments;
+
// Tell the tools to write their reports to "path.<pid>" instead of stderr.
void __sanitizer_set_report_path(const char *path);
// Notify the tools that the sandbox is going to be turned on. The reserved
// parameter will be used in the future to hold a structure with functions
// that the tools may call to bypass the sandbox.
- void __sanitizer_sandbox_on_notify(void *reserved);
+ void __sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
// This function is called by the tool when it has just finished reporting
// an error. 'error_summary' is a one-line string that summarizes
@@ -45,6 +60,8 @@ extern "C" {
void __sanitizer_unaligned_store32(void *p, uint32_t x);
void __sanitizer_unaligned_store64(void *p, uint64_t x);
+ // Initialize coverage.
+ void __sanitizer_cov_init();
// Record and dump coverage info.
void __sanitizer_cov_dump();
@@ -54,7 +71,7 @@ extern "C" {
// in a contiguous region of memory. The container owns the region of memory
// [beg, end); the memory [beg, mid) is used to store the current elements
// and the memory [mid, end) is reserved for future elements;
- // end <= mid <= end. For example, in "std::vector<> v"
+ // beg <= mid <= end. For example, in "std::vector<> v"
// beg = &v[0];
// end = beg + v.capacity() * sizeof(v[0]);
// mid = beg + v.size() * sizeof(v[0]);
@@ -82,6 +99,14 @@ extern "C" {
const void *end,
const void *old_mid,
const void *new_mid);
+ // Returns true if the contiguous container [beg, end) ir properly poisoned
+ // (e.g. with __sanitizer_annotate_contiguous_container), i.e. if
+ // - [beg, mid) is addressable,
+ // - [mid, end) is unaddressable.
+ // Full verification requires O(end-beg) time; this function tries to avoid
+ // such complexity by touching only parts of the container around beg/mid/end.
+ int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
+ const void *end);
// Print the stack trace leading to this call. Useful for debugging user code.
void __sanitizer_print_stack_trace();
diff --git a/libsanitizer/include/sanitizer/dfsan_interface.h b/libsanitizer/include/sanitizer/dfsan_interface.h
index dd938483d184..c1b160205a74 100644
--- a/libsanitizer/include/sanitizer/dfsan_interface.h
+++ b/libsanitizer/include/sanitizer/dfsan_interface.h
@@ -37,6 +37,9 @@ struct dfsan_label_info {
void *userdata;
};
+/// Signature of the callback argument to dfsan_set_write_callback().
+typedef void (*dfsan_write_callback_t)(int fd, const void *buf, size_t count);
+
/// Computes the union of \c l1 and \c l2, possibly creating a union label in
/// the process.
dfsan_label dfsan_union(dfsan_label l1, dfsan_label l2);
@@ -72,6 +75,14 @@ int dfsan_has_label(dfsan_label label, dfsan_label elem);
/// that label, else returns 0.
dfsan_label dfsan_has_label_with_desc(dfsan_label label, const char *desc);
+/// Returns the number of labels allocated.
+size_t dfsan_get_label_count(void);
+
+/// Sets a callback to be invoked on calls to write(). The callback is invoked
+/// before the write is done. The write is not guaranteed to succeed when the
+/// callback executes. Pass in NULL to remove any callback.
+void dfsan_set_write_callback(dfsan_write_callback_t labeled_write_callback);
+
#ifdef __cplusplus
} // extern "C"
diff --git a/libsanitizer/include/sanitizer/lsan_interface.h b/libsanitizer/include/sanitizer/lsan_interface.h
index ec9c730eee30..95e79245ec04 100644
--- a/libsanitizer/include/sanitizer/lsan_interface.h
+++ b/libsanitizer/include/sanitizer/lsan_interface.h
@@ -21,13 +21,24 @@ extern "C" {
// be treated as non-leaks. Disable/enable pairs may be nested.
void __lsan_disable();
void __lsan_enable();
+
// The heap object into which p points will be treated as a non-leak.
void __lsan_ignore_object(const void *p);
- // The user may optionally provide this function to disallow leak checking
- // for the program it is linked into (if the return value is non-zero). This
- // function must be defined as returning a constant value; any behavior beyond
- // that is unsupported.
- int __lsan_is_turned_off();
+
+ // Memory regions registered through this interface will be treated as sources
+ // of live pointers during leak checking. Useful if you store pointers in
+ // mapped memory.
+ // Points of note:
+ // - __lsan_unregister_root_region() must be called with the same pointer and
+ // size that have earlier been passed to __lsan_register_root_region()
+ // - LSan will skip any inaccessible memory when scanning a root region. E.g.,
+ // if you map memory within a larger region that you have mprotect'ed, you can
+ // register the entire large region.
+ // - the implementation is not optimized for performance. This interface is
+ // intended to be used for a small number of relatively static regions.
+ void __lsan_register_root_region(const void *p, size_t size);
+ void __lsan_unregister_root_region(const void *p, size_t size);
+
// Calling this function makes LSan enter the leak checking phase immediately.
// Use this if normal end-of-process leak checking happens too late (e.g. if
// you have intentional memory leaks in your shutdown code). Calling this
@@ -35,6 +46,16 @@ extern "C" {
// most once per process. This function will terminate the process if there
// are memory leaks and the exit_code flag is non-zero.
void __lsan_do_leak_check();
+
+ // The user may optionally provide this function to disallow leak checking
+ // for the program it is linked into (if the return value is non-zero). This
+ // function must be defined as returning a constant value; any behavior beyond
+ // that is unsupported.
+ int __lsan_is_turned_off();
+
+ // This function may be optionally provided by the user and should return
+ // a string containing LSan suppressions.
+ const char *__lsan_default_suppressions();
#ifdef __cplusplus
} // extern "C"
diff --git a/libsanitizer/include/sanitizer/msan_interface.h b/libsanitizer/include/sanitizer/msan_interface.h
index f531cf347c9b..68e510f19bdf 100644
--- a/libsanitizer/include/sanitizer/msan_interface.h
+++ b/libsanitizer/include/sanitizer/msan_interface.h
@@ -17,13 +17,10 @@
#ifdef __cplusplus
extern "C" {
#endif
-
-#if __has_feature(memory_sanitizer)
/* Returns a string describing a stack origin.
Return NULL if the origin is invalid, or is not a stack origin. */
const char *__msan_get_origin_descr_if_stack(uint32_t id);
-
/* Set raw origin for the memory range. */
void __msan_set_origin(const volatile void *a, size_t size, uint32_t origin);
@@ -39,6 +36,10 @@ extern "C" {
/* Make memory region fully initialized (without changing its contents). */
void __msan_unpoison(const volatile void *a, size_t size);
+ /* Make a null-terminated string fully initialized (without changing its
+ contents). */
+ void __msan_unpoison_string(const volatile char *a);
+
/* Make memory region fully uninitialized (without changing its contents). */
void __msan_poison(const volatile void *a, size_t size);
@@ -51,6 +52,10 @@ extern "C" {
memory range, or -1 if the whole range is good. */
intptr_t __msan_test_shadow(const volatile void *x, size_t size);
+ /* Checks that memory range is fully initialized, and reports an error if it
+ * is not. */
+ void __msan_check_mem_is_initialized(const volatile void *x, size_t size);
+
/* Set exit code when error(s) were detected.
Value of 0 means don't change the program exit code. */
void __msan_set_exit_code(int exit_code);
@@ -67,13 +72,13 @@ extern "C" {
modules that were compiled without the corresponding compiler flag. */
void __msan_set_keep_going(int keep_going);
- /* Print shadow and origin for the memory range to stdout in a human-readable
+ /* Print shadow and origin for the memory range to stderr in a human-readable
format. */
void __msan_print_shadow(const volatile void *x, size_t size);
- /* Print current function arguments shadow and origin to stdout in a
+ /* Print shadow for the memory range to stderr in a minimalistic
human-readable format. */
- void __msan_print_param_shadow();
+ void __msan_dump_shadow(const volatile void *x, size_t size);
/* Returns true if running under a dynamic tool (DynamoRio-based). */
int __msan_has_dynamic_component();
@@ -86,6 +91,9 @@ extern "C" {
a string containing Msan runtime options. See msan_flags.h for details. */
const char* __msan_default_options();
+ // Sets the callback to be called right before death on error.
+ // Passing 0 will unset the callback.
+ void __msan_set_death_callback(void (*callback)(void));
/***********************************/
/* Allocator statistics interface. */
@@ -132,27 +140,6 @@ extern "C" {
deallocation of "ptr". */
void __msan_malloc_hook(const volatile void *ptr, size_t size);
void __msan_free_hook(const volatile void *ptr);
-
-#else // __has_feature(memory_sanitizer)
-
-#define __msan_get_origin_descr_if_stack(id) ((const char*)0)
-#define __msan_set_origin(a, size, origin)
-#define __msan_get_origin(a) ((uint32_t)-1)
-#define __msan_get_track_origins() (0)
-#define __msan_get_umr_origin() ((uint32_t)-1)
-#define __msan_unpoison(a, size)
-#define __msan_poison(a, size)
-#define __msan_partial_poison(data, shadow, size)
-#define __msan_test_shadow(x, size) ((intptr_t)-1)
-#define __msan_set_exit_code(exit_code)
-#define __msan_set_expect_umr(expect_umr)
-#define __msan_print_shadow(x, size)
-#define __msan_print_param_shadow()
-#define __msan_has_dynamic_component() (0)
-#define __msan_allocated_memory(data, size)
-
-#endif // __has_feature(memory_sanitizer)
-
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/libsanitizer/tsan/tsan_interface_atomic.h b/libsanitizer/include/sanitizer/tsan_interface_atomic.h
index c500614acc4c..d19c9109416b 100644
--- a/libsanitizer/tsan/tsan_interface_atomic.h
+++ b/libsanitizer/include/sanitizer/tsan_interface_atomic.h
@@ -7,14 +7,11 @@
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
+// Public interface header for TSan atomics.
//===----------------------------------------------------------------------===//
#ifndef TSAN_INTERFACE_ATOMIC_H
#define TSAN_INTERFACE_ATOMIC_H
-#ifndef INTERFACE_ATTRIBUTE
-# define INTERFACE_ATTRIBUTE __attribute__((visibility("default")))
-#endif
-
#ifdef __cplusplus
extern "C" {
#endif
@@ -23,14 +20,12 @@ typedef char __tsan_atomic8;
typedef short __tsan_atomic16; // NOLINT
typedef int __tsan_atomic32;
typedef long __tsan_atomic64; // NOLINT
-
#if defined(__SIZEOF_INT128__) \
|| (__clang_major__ * 100 + __clang_minor__ >= 302)
__extension__ typedef __int128 __tsan_atomic128;
-#define __TSAN_HAS_INT128 1
+# define __TSAN_HAS_INT128 1
#else
-typedef char __tsan_atomic128;
-#define __TSAN_HAS_INT128 0
+# define __TSAN_HAS_INT128 0
#endif
// Part of ABI, do not change.
@@ -45,159 +40,181 @@ typedef enum {
} __tsan_memory_order;
__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo);
+#endif
void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo);
void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo);
void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo);
void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo);
+#endif
__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
__tsan_atomic8 __tsan_atomic8_fetch_sub(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_sub(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_sub(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_sub(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_fetch_sub(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a,
__tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order fail_mo);
int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a,
__tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order fail_mo);
int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a,
__tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order fail_mo);
int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a,
__tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order fail_mo);
+#if __TSAN_HAS_INT128
int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a,
__tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order fail_mo);
+#endif
int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a,
__tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order fail_mo);
int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a,
__tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order fail_mo);
int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a,
__tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order fail_mo);
int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a,
__tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order fail_mo);
+#if __TSAN_HAS_INT128
int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a,
__tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order fail_mo);
+#endif
__tsan_atomic8 __tsan_atomic8_compare_exchange_val(
volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic16 __tsan_atomic16_compare_exchange_val(
volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic32 __tsan_atomic32_compare_exchange_val(
volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic64 __tsan_atomic64_compare_exchange_val(
volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_compare_exchange_val(
volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+#endif
-void __tsan_atomic_thread_fence(__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-void __tsan_atomic_signal_fence(__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+void __tsan_atomic_thread_fence(__tsan_memory_order mo);
+void __tsan_atomic_signal_fence(__tsan_memory_order mo);
#ifdef __cplusplus
} // extern "C"
#endif
-#undef INTERFACE_ATTRIBUTE
-
-#endif // #ifndef TSAN_INTERFACE_ATOMIC_H
+#endif // TSAN_INTERFACE_ATOMIC_H
diff --git a/libsanitizer/interception/interception.h b/libsanitizer/interception/interception.h
index 7393f4c26be2..51505e1baa2e 100644
--- a/libsanitizer/interception/interception.h
+++ b/libsanitizer/interception/interception.h
@@ -13,7 +13,8 @@
#ifndef INTERCEPTION_H
#define INTERCEPTION_H
-#if !defined(__linux__) && !defined(__APPLE__) && !defined(_WIN32)
+#if !defined(__linux__) && !defined(__FreeBSD__) && \
+ !defined(__APPLE__) && !defined(_WIN32)
# error "Interception doesn't work on this operating system."
#endif
@@ -233,11 +234,11 @@ typedef unsigned long uptr; // NOLINT
#define INCLUDED_FROM_INTERCEPTION_LIB
-#if defined(__linux__)
+#if defined(__linux__) || defined(__FreeBSD__)
# include "interception_linux.h"
-# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_LINUX(func)
+# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
# define INTERCEPT_FUNCTION_VER(func, symver) \
- INTERCEPT_FUNCTION_VER_LINUX(func, symver)
+ INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver)
#elif defined(__APPLE__)
# include "interception_mac.h"
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_MAC(func)
diff --git a/libsanitizer/interception/interception_linux.cc b/libsanitizer/interception/interception_linux.cc
index 0a8df474ab49..0a8305b0bf82 100644
--- a/libsanitizer/interception/interception_linux.cc
+++ b/libsanitizer/interception/interception_linux.cc
@@ -10,10 +10,10 @@
// Linux-specific interception methods.
//===----------------------------------------------------------------------===//
-#ifdef __linux__
+#if defined(__linux__) || defined(__FreeBSD__)
#include "interception.h"
-#include <dlfcn.h> // for dlsym
+#include <dlfcn.h> // for dlsym() and dlvsym()
namespace __interception {
bool GetRealFunctionAddress(const char *func_name, uptr *func_addr,
@@ -31,4 +31,4 @@ void *GetFuncAddrVer(const char *func_name, const char *ver) {
} // namespace __interception
-#endif // __linux__
+#endif // __linux__ || __FreeBSD__
diff --git a/libsanitizer/interception/interception_linux.h b/libsanitizer/interception/interception_linux.h
index 5ab24db438ee..e2cc4c19dffe 100644
--- a/libsanitizer/interception/interception_linux.h
+++ b/libsanitizer/interception/interception_linux.h
@@ -10,7 +10,7 @@
// Linux-specific interception methods.
//===----------------------------------------------------------------------===//
-#ifdef __linux__
+#if defined(__linux__) || defined(__FreeBSD__)
#if !defined(INCLUDED_FROM_INTERCEPTION_LIB)
# error "interception_linux.h should be included from interception library only"
@@ -26,20 +26,20 @@ bool GetRealFunctionAddress(const char *func_name, uptr *func_addr,
void *GetFuncAddrVer(const char *func_name, const char *ver);
} // namespace __interception
-#define INTERCEPT_FUNCTION_LINUX(func) \
- ::__interception::GetRealFunctionAddress( \
- #func, (::__interception::uptr*)&REAL(func), \
- (::__interception::uptr)&(func), \
- (::__interception::uptr)&WRAP(func))
+#define INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func) \
+ ::__interception::GetRealFunctionAddress( \
+ #func, (::__interception::uptr *)&__interception::PTR_TO_REAL(func), \
+ (::__interception::uptr) & (func), \
+ (::__interception::uptr) & WRAP(func))
#if !defined(__ANDROID__) // android does not have dlvsym
-# define INTERCEPT_FUNCTION_VER_LINUX(func, symver) \
+# define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
::__interception::real_##func = (func##_f)(unsigned long) \
::__interception::GetFuncAddrVer(#func, symver)
#else
-# define INTERCEPT_FUNCTION_VER_LINUX(func, symver) \
- INTERCEPT_FUNCTION_LINUX(func)
+# define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
+ INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
#endif // !defined(__ANDROID__)
#endif // INTERCEPTION_LINUX_H
-#endif // __linux__
+#endif // __linux__ || __FreeBSD__
diff --git a/libsanitizer/interception/interception_type_test.cc b/libsanitizer/interception/interception_type_test.cc
index f664eeeb04fe..6ba45e26ddac 100644
--- a/libsanitizer/interception/interception_type_test.cc
+++ b/libsanitizer/interception/interception_type_test.cc
@@ -17,13 +17,13 @@
#include <stddef.h>
#include <stdint.h>
-COMPILER_CHECK(sizeof(SIZE_T) == sizeof(size_t));
-COMPILER_CHECK(sizeof(SSIZE_T) == sizeof(ssize_t));
-COMPILER_CHECK(sizeof(PTRDIFF_T) == sizeof(ptrdiff_t));
-COMPILER_CHECK(sizeof(INTMAX_T) == sizeof(intmax_t));
+COMPILER_CHECK(sizeof(::SIZE_T) == sizeof(size_t));
+COMPILER_CHECK(sizeof(::SSIZE_T) == sizeof(ssize_t));
+COMPILER_CHECK(sizeof(::PTRDIFF_T) == sizeof(ptrdiff_t));
+COMPILER_CHECK(sizeof(::INTMAX_T) == sizeof(intmax_t));
#ifndef __APPLE__
-COMPILER_CHECK(sizeof(OFF64_T) == sizeof(off64_t));
+COMPILER_CHECK(sizeof(::OFF64_T) == sizeof(off64_t));
#endif
// The following are the cases when pread (and friends) is used instead of
@@ -31,7 +31,7 @@ COMPILER_CHECK(sizeof(OFF64_T) == sizeof(off64_t));
// rest (they depend on _FILE_OFFSET_BITS setting when building an application).
# if defined(__ANDROID__) || !defined _FILE_OFFSET_BITS || \
_FILE_OFFSET_BITS != 64
-COMPILER_CHECK(sizeof(OFF_T) == sizeof(off_t));
+COMPILER_CHECK(sizeof(::OFF_T) == sizeof(off_t));
# endif
#endif
diff --git a/libsanitizer/interception/interception_win.cc b/libsanitizer/interception/interception_win.cc
index 443bdce1859a..9cd717518391 100644
--- a/libsanitizer/interception/interception_win.cc
+++ b/libsanitizer/interception/interception_win.cc
@@ -54,91 +54,136 @@ static void WriteJumpInstruction(char *jmp_from, char *to) {
*(ptrdiff_t*)(jmp_from + 1) = offset;
}
-bool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func) {
-#ifdef _WIN64
-# error OverrideFunction was not tested on x64
-#endif
- // Basic idea:
- // We write 5 bytes (jmp-to-new_func) at the beginning of the 'old_func'
- // to override it. We want to be able to execute the original 'old_func' from
- // the wrapper, so we need to keep the leading 5+ bytes ('head') of the
- // original instructions somewhere with a "jmp old_func+head".
- // We call these 'head'+5 bytes of instructions a "trampoline".
-
+static char *GetMemoryForTrampoline(size_t size) {
// Trampolines are allocated from a common pool.
const int POOL_SIZE = 1024;
static char *pool = NULL;
static size_t pool_used = 0;
- if (pool == NULL) {
- pool = (char*)VirtualAlloc(NULL, POOL_SIZE,
- MEM_RESERVE | MEM_COMMIT,
- PAGE_EXECUTE_READWRITE);
- // FIXME: set PAGE_EXECUTE_READ access after setting all interceptors?
- if (pool == NULL)
- return false;
+ if (!pool) {
+ pool = (char *)VirtualAlloc(NULL, POOL_SIZE, MEM_RESERVE | MEM_COMMIT,
+ PAGE_EXECUTE_READWRITE);
+ // FIXME: Might want to apply PAGE_EXECUTE_READ access after all the
+ // interceptors are in place.
+ if (!pool)
+ return NULL;
_memset(pool, 0xCC /* int 3 */, POOL_SIZE);
}
- char* old_bytes = (char*)old_func;
- char* trampoline = pool + pool_used;
+ if (pool_used + size > POOL_SIZE)
+ return NULL;
- // Find out the number of bytes of the instructions we need to copy to the
- // island and store it in 'head'.
- size_t head = 0;
- while (head < 5) {
- switch (old_bytes[head]) {
+ char *ret = pool + pool_used;
+ pool_used += size;
+ return ret;
+}
+
+// Returns 0 on error.
+static size_t RoundUpToInstrBoundary(size_t size, char *code) {
+ size_t cursor = 0;
+ while (cursor < size) {
+ switch (code[cursor]) {
+ case '\x51': // push ecx
+ case '\x52': // push edx
+ case '\x53': // push ebx
+ case '\x54': // push esp
case '\x55': // push ebp
case '\x56': // push esi
case '\x57': // push edi
- head++;
+ case '\x5D': // pop ebp
+ cursor++;
+ continue;
+ case '\x6A': // 6A XX = push XX
+ cursor += 2;
+ continue;
+ case '\xE9': // E9 XX YY ZZ WW = jmp WWZZYYXX
+ cursor += 5;
continue;
}
- switch (*(unsigned short*)(old_bytes + head)) { // NOLINT
+ switch (*(unsigned short*)(code + cursor)) { // NOLINT
case 0xFF8B: // 8B FF = mov edi, edi
case 0xEC8B: // 8B EC = mov ebp, esp
case 0xC033: // 33 C0 = xor eax, eax
- head += 2;
+ cursor += 2;
continue;
+ case 0x458B: // 8B 45 XX = mov eax, dword ptr [ebp+XXh]
+ case 0x5D8B: // 8B 5D XX = mov ebx, dword ptr [ebp+XXh]
case 0xEC83: // 83 EC XX = sub esp, XX
- head += 3;
+ cursor += 3;
continue;
case 0xC1F7: // F7 C1 XX YY ZZ WW = test ecx, WWZZYYXX
- head += 6;
+ cursor += 6;
+ continue;
+ case 0x3D83: // 83 3D XX YY ZZ WW TT = cmp TT, WWZZYYXX
+ cursor += 7;
continue;
}
- switch (0x00FFFFFF & *(unsigned int*)(old_bytes + head)) {
+ switch (0x00FFFFFF & *(unsigned int*)(code + cursor)) {
case 0x24448A: // 8A 44 24 XX = mov eal, dword ptr [esp+XXh]
case 0x244C8B: // 8B 4C 24 XX = mov ecx, dword ptr [esp+XXh]
case 0x24548B: // 8B 54 24 XX = mov edx, dword ptr [esp+XXh]
+ case 0x24748B: // 8B 74 24 XX = mov esi, dword ptr [esp+XXh]
case 0x247C8B: // 8B 7C 24 XX = mov edi, dword ptr [esp+XXh]
- head += 4;
+ cursor += 4;
continue;
}
// Unknown instruction!
- return false;
+ // FIXME: Unknown instruction failures might happen when we add a new
+ // interceptor or a new compiler version. In either case, they should result
+ // in visible and readable error messages. However, merely calling abort()
+ // or __debugbreak() leads to an infinite recursion in CheckFailed.
+ // Do we have a good way to abort with an error message here?
+ return 0;
}
- if (pool_used + head + 5 > POOL_SIZE)
- return false;
+ return cursor;
+}
+
+bool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func) {
+#ifdef _WIN64
+#error OverrideFunction is not yet supported on x64
+#endif
+ // Function overriding works basically like this:
+ // We write "jmp <new_func>" (5 bytes) at the beginning of the 'old_func'
+ // to override it.
+ // We might want to be able to execute the original 'old_func' from the
+ // wrapper, in this case we need to keep the leading 5+ bytes ('head')
+ // of the original code somewhere with a "jmp <old_func+head>".
+ // We call these 'head'+5 bytes of instructions a "trampoline".
+ char *old_bytes = (char *)old_func;
+
+ // We'll need at least 5 bytes for a 'jmp'.
+ size_t head = 5;
+ if (orig_old_func) {
+ // Find out the number of bytes of the instructions we need to copy
+ // to the trampoline and store it in 'head'.
+ head = RoundUpToInstrBoundary(head, old_bytes);
+ if (!head)
+ return false;
+
+ // Put the needed instructions into the trampoline bytes.
+ char *trampoline = GetMemoryForTrampoline(head + 5);
+ if (!trampoline)
+ return false;
+ _memcpy(trampoline, old_bytes, head);
+ WriteJumpInstruction(trampoline + head, old_bytes + head);
+ *orig_old_func = (uptr)trampoline;
+ }
- // Now put the "jump to trampoline" instruction into the original code.
+ // Now put the "jmp <new_func>" instruction at the original code location.
+ // We should preserve the EXECUTE flag as some of our own code might be
+ // located in the same page (sic!). FIXME: might consider putting the
+ // __interception code into a separate section or something?
DWORD old_prot, unused_prot;
- if (!VirtualProtect((void*)old_func, head, PAGE_EXECUTE_READWRITE,
+ if (!VirtualProtect((void *)old_bytes, head, PAGE_EXECUTE_READWRITE,
&old_prot))
return false;
- // Put the needed instructions into the trampoline bytes.
- _memcpy(trampoline, old_bytes, head);
- WriteJumpInstruction(trampoline + head, old_bytes + head);
- *orig_old_func = (uptr)trampoline;
- pool_used += head + 5;
-
- // Intercept the 'old_func'.
- WriteJumpInstruction(old_bytes, (char*)new_func);
+ WriteJumpInstruction(old_bytes, (char *)new_func);
_memset(old_bytes + 5, 0xCC /* int 3 */, head - 5);
- if (!VirtualProtect((void*)old_func, head, old_prot, &unused_prot))
+ // Restore the original permissions.
+ if (!VirtualProtect((void *)old_bytes, head, old_prot, &unused_prot))
return false; // not clear if this failure bothers us.
return true;
diff --git a/libsanitizer/lsan/lsan.cc b/libsanitizer/lsan/lsan.cc
index 270979a78e7e..c1481f5fb971 100644
--- a/libsanitizer/lsan/lsan.cc
+++ b/libsanitizer/lsan/lsan.cc
@@ -33,6 +33,11 @@ static void InitializeCommonFlags() {
ParseCommonFlagsFromString(cf, GetEnv("LSAN_OPTIONS"));
}
+///// Interface to the common LSan module. /////
+bool WordIsPoisoned(uptr addr) {
+ return false;
+}
+
} // namespace __lsan
using namespace __lsan; // NOLINT
@@ -53,12 +58,7 @@ extern "C" void __lsan_init() {
ThreadStart(tid, GetTid());
SetCurrentThread(tid);
- // Start symbolizer process if necessary.
- if (common_flags()->symbolize) {
- Symbolizer::Init(common_flags()->external_symbolizer_path);
- } else {
- Symbolizer::Disable();
- }
+ Symbolizer::Init(common_flags()->external_symbolizer_path);
InitCommonLsan();
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit)
diff --git a/libsanitizer/lsan/lsan_allocator.cc b/libsanitizer/lsan/lsan_allocator.cc
index ce47dfcd215a..3e81ebef2a7c 100644
--- a/libsanitizer/lsan/lsan_allocator.cc
+++ b/libsanitizer/lsan/lsan_allocator.cc
@@ -141,7 +141,11 @@ uptr PointsIntoChunk(void* p) {
if (addr < chunk) return 0;
ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
CHECK(m);
- if (m->allocated && addr < chunk + m->requested_size)
+ if (!m->allocated)
+ return 0;
+ if (addr < chunk + m->requested_size)
+ return chunk;
+ if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr))
return chunk;
return 0;
}
diff --git a/libsanitizer/lsan/lsan_common.cc b/libsanitizer/lsan/lsan_common.cc
index bbc5b5f0378a..78afa7706d6b 100644
--- a/libsanitizer/lsan/lsan_common.cc
+++ b/libsanitizer/lsan/lsan_common.cc
@@ -15,6 +15,7 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_stoptheworld.h"
@@ -24,7 +25,8 @@
#if CAN_SANITIZE_LEAKS
namespace __lsan {
-// This mutex is used to prevent races between DoLeakCheck and IgnoreObject.
+// This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
+// also to protect the global list of root regions.
BlockingMutex global_mutex(LINKER_INITIALIZED);
THREADLOCAL int disable_counter;
@@ -39,42 +41,56 @@ static void InitializeFlags() {
f->resolution = 0;
f->max_leaks = 0;
f->exitcode = 23;
+ f->print_suppressions = true;
f->suppressions="";
f->use_registers = true;
f->use_globals = true;
f->use_stacks = true;
f->use_tls = true;
+ f->use_root_regions = true;
f->use_unaligned = false;
- f->verbosity = 0;
+ f->use_poisoned = false;
f->log_pointers = false;
f->log_threads = false;
const char *options = GetEnv("LSAN_OPTIONS");
if (options) {
- ParseFlag(options, &f->use_registers, "use_registers");
- ParseFlag(options, &f->use_globals, "use_globals");
- ParseFlag(options, &f->use_stacks, "use_stacks");
- ParseFlag(options, &f->use_tls, "use_tls");
- ParseFlag(options, &f->use_unaligned, "use_unaligned");
- ParseFlag(options, &f->report_objects, "report_objects");
- ParseFlag(options, &f->resolution, "resolution");
+ ParseFlag(options, &f->use_registers, "use_registers", "");
+ ParseFlag(options, &f->use_globals, "use_globals", "");
+ ParseFlag(options, &f->use_stacks, "use_stacks", "");
+ ParseFlag(options, &f->use_tls, "use_tls", "");
+ ParseFlag(options, &f->use_root_regions, "use_root_regions", "");
+ ParseFlag(options, &f->use_unaligned, "use_unaligned", "");
+ ParseFlag(options, &f->use_poisoned, "use_poisoned", "");
+ ParseFlag(options, &f->report_objects, "report_objects", "");
+ ParseFlag(options, &f->resolution, "resolution", "");
CHECK_GE(&f->resolution, 0);
- ParseFlag(options, &f->max_leaks, "max_leaks");
+ ParseFlag(options, &f->max_leaks, "max_leaks", "");
CHECK_GE(&f->max_leaks, 0);
- ParseFlag(options, &f->verbosity, "verbosity");
- ParseFlag(options, &f->log_pointers, "log_pointers");
- ParseFlag(options, &f->log_threads, "log_threads");
- ParseFlag(options, &f->exitcode, "exitcode");
- ParseFlag(options, &f->suppressions, "suppressions");
+ ParseFlag(options, &f->log_pointers, "log_pointers", "");
+ ParseFlag(options, &f->log_threads, "log_threads", "");
+ ParseFlag(options, &f->exitcode, "exitcode", "");
+ ParseFlag(options, &f->print_suppressions, "print_suppressions", "");
+ ParseFlag(options, &f->suppressions, "suppressions", "");
}
}
+#define LOG_POINTERS(...) \
+ do { \
+ if (flags()->log_pointers) Report(__VA_ARGS__); \
+ } while (0);
+
+#define LOG_THREADS(...) \
+ do { \
+ if (flags()->log_threads) Report(__VA_ARGS__); \
+ } while (0);
+
SuppressionContext *suppression_ctx;
void InitializeSuppressions() {
CHECK(!suppression_ctx);
- ALIGNED(64) static char placeholder_[sizeof(SuppressionContext)];
- suppression_ctx = new(placeholder_) SuppressionContext;
+ ALIGNED(64) static char placeholder[sizeof(SuppressionContext)];
+ suppression_ctx = new(placeholder) SuppressionContext;
char *suppressions_from_file;
uptr buffer_size;
if (ReadFileToBuffer(flags()->suppressions, &suppressions_from_file,
@@ -89,8 +105,22 @@ void InitializeSuppressions() {
suppression_ctx->Parse(__lsan_default_suppressions());
}
+struct RootRegion {
+ const void *begin;
+ uptr size;
+};
+
+InternalMmapVector<RootRegion> *root_regions;
+
+void InitializeRootRegions() {
+ CHECK(!root_regions);
+ ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)];
+ root_regions = new(placeholder) InternalMmapVector<RootRegion>(1);
+}
+
void InitCommonLsan() {
InitializeFlags();
+ InitializeRootRegions();
if (common_flags()->detect_leaks) {
// Initialization which can fail or print warnings should only be done if
// LSan is actually enabled.
@@ -130,8 +160,7 @@ void ScanRangeForPointers(uptr begin, uptr end,
Frontier *frontier,
const char *region_type, ChunkTag tag) {
const uptr alignment = flags()->pointer_alignment();
- if (flags()->log_pointers)
- Report("Scanning %s range %p-%p.\n", region_type, begin, end);
+ LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end);
uptr pp = begin;
if (pp % alignment)
pp = pp + alignment - pp % alignment;
@@ -146,10 +175,19 @@ void ScanRangeForPointers(uptr begin, uptr end,
// Reachable beats ignored beats leaked.
if (m.tag() == kReachable) continue;
if (m.tag() == kIgnored && tag != kReachable) continue;
+
+ // Do this check relatively late so we can log only the interesting cases.
+ if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
+ LOG_POINTERS(
+ "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
+ "%zu.\n",
+ pp, p, chunk, chunk + m.requested_size(), m.requested_size());
+ continue;
+ }
+
m.set_tag(tag);
- if (flags()->log_pointers)
- Report("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
- chunk, chunk + m.requested_size(), m.requested_size());
+ LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
+ chunk, chunk + m.requested_size(), m.requested_size());
if (frontier)
frontier->push_back(chunk);
}
@@ -168,7 +206,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
uptr registers_end = registers_begin + registers.size();
for (uptr i = 0; i < suspended_threads.thread_count(); i++) {
uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i));
- if (flags()->log_threads) Report("Processing thread %d.\n", os_id);
+ LOG_THREADS("Processing thread %d.\n", os_id);
uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
&tls_begin, &tls_end,
@@ -176,8 +214,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
if (!thread_found) {
// If a thread can't be found in the thread registry, it's probably in the
// process of destruction. Log this event and move on.
- if (flags()->log_threads)
- Report("Thread %d not found in registry.\n", os_id);
+ LOG_THREADS("Thread %d not found in registry.\n", os_id);
continue;
}
uptr sp;
@@ -194,14 +231,12 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
"REGISTERS", kReachable);
if (flags()->use_stacks) {
- if (flags()->log_threads)
- Report("Stack at %p-%p, SP = %p.\n", stack_begin, stack_end, sp);
+ LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp);
if (sp < stack_begin || sp >= stack_end) {
// SP is outside the recorded stack range (e.g. the thread is running a
// signal handler on alternate stack). Again, consider the entire stack
// range to be reachable.
- if (flags()->log_threads)
- Report("WARNING: stack pointer not in stack range.\n");
+ LOG_THREADS("WARNING: stack pointer not in stack range.\n");
} else {
// Shrink the stack range to ignore out-of-scope values.
stack_begin = sp;
@@ -212,7 +247,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
}
if (flags()->use_tls) {
- if (flags()->log_threads) Report("TLS at %p-%p.\n", tls_begin, tls_end);
+ LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end);
if (cache_begin == cache_end) {
ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
} else {
@@ -230,6 +265,37 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
}
}
+static void ProcessRootRegion(Frontier *frontier, uptr root_begin,
+ uptr root_end) {
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
+ uptr begin, end, prot;
+ while (proc_maps.Next(&begin, &end,
+ /*offset*/ 0, /*filename*/ 0, /*filename_size*/ 0,
+ &prot)) {
+ uptr intersection_begin = Max(root_begin, begin);
+ uptr intersection_end = Min(end, root_end);
+ if (intersection_begin >= intersection_end) continue;
+ bool is_readable = prot & MemoryMappingLayout::kProtectionRead;
+ LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
+ root_begin, root_end, begin, end,
+ is_readable ? "readable" : "unreadable");
+ if (is_readable)
+ ScanRangeForPointers(intersection_begin, intersection_end, frontier,
+ "ROOT", kReachable);
+ }
+}
+
+// Scans root regions for heap pointers.
+static void ProcessRootRegions(Frontier *frontier) {
+ if (!flags()->use_root_regions) return;
+ CHECK(root_regions);
+ for (uptr i = 0; i < root_regions->size(); i++) {
+ RootRegion region = (*root_regions)[i];
+ uptr begin_addr = reinterpret_cast<uptr>(region.begin);
+ ProcessRootRegion(frontier, begin_addr, begin_addr + region.size);
+ }
+}
+
static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
while (frontier->size()) {
uptr next_chunk = frontier->back();
@@ -264,30 +330,27 @@ static void CollectIgnoredCb(uptr chunk, void *arg) {
// Sets the appropriate tag on each chunk.
static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
// Holds the flood fill frontier.
- Frontier frontier(GetPageSizeCached());
+ Frontier frontier(1);
- if (flags()->use_globals)
- ProcessGlobalRegions(&frontier);
+ ProcessGlobalRegions(&frontier);
ProcessThreads(suspended_threads, &frontier);
+ ProcessRootRegions(&frontier);
FloodFillTag(&frontier, kReachable);
// The check here is relatively expensive, so we do this in a separate flood
// fill. That way we can skip the check for chunks that are reachable
// otherwise.
- if (flags()->log_pointers)
- Report("Processing platform-specific allocations.\n");
+ LOG_POINTERS("Processing platform-specific allocations.\n");
ProcessPlatformSpecificAllocations(&frontier);
FloodFillTag(&frontier, kReachable);
- if (flags()->log_pointers)
- Report("Scanning ignored chunks.\n");
+ LOG_POINTERS("Scanning ignored chunks.\n");
CHECK_EQ(0, frontier.size());
ForEachChunk(CollectIgnoredCb, &frontier);
FloodFillTag(&frontier, kIgnored);
// Iterate over leaked chunks and mark those that are reachable from other
// leaked chunks.
- if (flags()->log_pointers)
- Report("Scanning leaked chunks.\n");
+ LOG_POINTERS("Scanning leaked chunks.\n");
ForEachChunk(MarkIndirectlyLeakedCb, 0 /* arg */);
}
@@ -298,7 +361,8 @@ static void PrintStackTraceById(u32 stack_trace_id) {
StackTrace::PrintStack(trace, size);
}
-// ForEachChunk callback. Aggregates unreachable chunks into a LeakReport.
+// ForEachChunk callback. Aggregates information about unreachable chunks into
+// a LeakReport.
static void CollectLeaksCb(uptr chunk, void *arg) {
CHECK(arg);
LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
@@ -307,26 +371,17 @@ static void CollectLeaksCb(uptr chunk, void *arg) {
if (!m.allocated()) return;
if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
uptr resolution = flags()->resolution;
+ u32 stack_trace_id = 0;
if (resolution > 0) {
uptr size = 0;
const uptr *trace = StackDepotGet(m.stack_trace_id(), &size);
size = Min(size, resolution);
- leak_report->Add(StackDepotPut(trace, size), m.requested_size(), m.tag());
+ stack_trace_id = StackDepotPut(trace, size);
} else {
- leak_report->Add(m.stack_trace_id(), m.requested_size(), m.tag());
+ stack_trace_id = m.stack_trace_id();
}
- }
-}
-
-// ForEachChunkCallback. Prints addresses of unreachable chunks.
-static void PrintLeakedCb(uptr chunk, void *arg) {
- chunk = GetUserBegin(chunk);
- LsanMetadata m(chunk);
- if (!m.allocated()) return;
- if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
- Printf("%s leaked %zu byte object at %p.\n",
- m.tag() == kDirectlyLeaked ? "Directly" : "Indirectly",
- m.requested_size(), chunk);
+ leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(),
+ m.tag());
}
}
@@ -345,12 +400,6 @@ static void PrintMatchedSuppressions() {
Printf("%s\n\n", line);
}
-static void PrintLeaked() {
- Printf("\n");
- Printf("Reporting individual objects:\n");
- ForEachChunk(PrintLeakedCb, 0 /* arg */);
-}
-
struct DoLeakCheckParam {
bool success;
LeakReport leak_report;
@@ -361,11 +410,8 @@ static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads,
DoLeakCheckParam *param = reinterpret_cast<DoLeakCheckParam *>(arg);
CHECK(param);
CHECK(!param->success);
- CHECK(param->leak_report.IsEmpty());
ClassifyAllChunks(suspended_threads);
ForEachChunk(CollectLeaksCb, &param->leak_report);
- if (!param->leak_report.IsEmpty() && flags()->report_objects)
- PrintLeaked();
param->success = true;
}
@@ -376,7 +422,7 @@ void DoLeakCheck() {
if (already_done) return;
already_done = true;
if (&__lsan_is_turned_off && __lsan_is_turned_off())
- return;
+ return;
DoLeakCheckParam param;
param.success = false;
@@ -390,8 +436,9 @@ void DoLeakCheck() {
Report("LeakSanitizer has encountered a fatal error.\n");
Die();
}
- uptr have_unsuppressed = param.leak_report.ApplySuppressions();
- if (have_unsuppressed) {
+ param.leak_report.ApplySuppressions();
+ uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount();
+ if (unsuppressed_count > 0) {
Decorator d;
Printf("\n"
"================================================================="
@@ -399,27 +446,37 @@ void DoLeakCheck() {
Printf("%s", d.Error());
Report("ERROR: LeakSanitizer: detected memory leaks\n");
Printf("%s", d.End());
- param.leak_report.PrintLargest(flags()->max_leaks);
+ param.leak_report.ReportTopLeaks(flags()->max_leaks);
}
- if (have_unsuppressed || (flags()->verbosity >= 1)) {
+ if (flags()->print_suppressions)
PrintMatchedSuppressions();
+ if (unsuppressed_count > 0) {
param.leak_report.PrintSummary();
+ if (flags()->exitcode)
+ internal__exit(flags()->exitcode);
}
- if (have_unsuppressed && flags()->exitcode)
- internal__exit(flags()->exitcode);
}
static Suppression *GetSuppressionForAddr(uptr addr) {
+ Suppression *s;
+
+ // Suppress by module name.
+ const char *module_name;
+ uptr module_offset;
+ if (Symbolizer::Get()->GetModuleNameAndOffsetForPC(addr, &module_name,
+ &module_offset) &&
+ suppression_ctx->Match(module_name, SuppressionLeak, &s))
+ return s;
+
+ // Suppress by file or function name.
static const uptr kMaxAddrFrames = 16;
InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
for (uptr i = 0; i < kMaxAddrFrames; i++) new (&addr_frames[i]) AddressInfo();
- uptr addr_frames_num = Symbolizer::Get()->SymbolizeCode(
+ uptr addr_frames_num = Symbolizer::Get()->SymbolizePC(
addr, addr_frames.data(), kMaxAddrFrames);
for (uptr i = 0; i < addr_frames_num; i++) {
- Suppression* s;
if (suppression_ctx->Match(addr_frames[i].function, SuppressionLeak, &s) ||
- suppression_ctx->Match(addr_frames[i].file, SuppressionLeak, &s) ||
- suppression_ctx->Match(addr_frames[i].module, SuppressionLeak, &s))
+ suppression_ctx->Match(addr_frames[i].file, SuppressionLeak, &s))
return s;
}
return 0;
@@ -439,26 +496,35 @@ static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
///// LeakReport implementation. /////
// A hard limit on the number of distinct leaks, to avoid quadratic complexity
-// in LeakReport::Add(). We don't expect to ever see this many leaks in
-// real-world applications.
+// in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
+// in real-world applications.
// FIXME: Get rid of this limit by changing the implementation of LeakReport to
// use a hash table.
const uptr kMaxLeaksConsidered = 5000;
-void LeakReport::Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag) {
+void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
+ uptr leaked_size, ChunkTag tag) {
CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
bool is_directly_leaked = (tag == kDirectlyLeaked);
- for (uptr i = 0; i < leaks_.size(); i++)
+ uptr i;
+ for (i = 0; i < leaks_.size(); i++) {
if (leaks_[i].stack_trace_id == stack_trace_id &&
leaks_[i].is_directly_leaked == is_directly_leaked) {
leaks_[i].hit_count++;
leaks_[i].total_size += leaked_size;
- return;
+ break;
}
- if (leaks_.size() == kMaxLeaksConsidered) return;
- Leak leak = { /* hit_count */ 1, leaked_size, stack_trace_id,
- is_directly_leaked, /* is_suppressed */ false };
- leaks_.push_back(leak);
+ }
+ if (i == leaks_.size()) {
+ if (leaks_.size() == kMaxLeaksConsidered) return;
+ Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id,
+ is_directly_leaked, /* is_suppressed */ false };
+ leaks_.push_back(leak);
+ }
+ if (flags()->report_objects) {
+ LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
+ leaked_objects_.push_back(obj);
+ }
}
static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
@@ -468,7 +534,7 @@ static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
return leak1.is_directly_leaked;
}
-void LeakReport::PrintLargest(uptr num_leaks_to_print) {
+void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
CHECK(leaks_.size() <= kMaxLeaksConsidered);
Printf("\n");
if (leaks_.size() == kMaxLeaksConsidered)
@@ -476,31 +542,49 @@ void LeakReport::PrintLargest(uptr num_leaks_to_print) {
"reported.\n",
kMaxLeaksConsidered);
- uptr unsuppressed_count = 0;
- for (uptr i = 0; i < leaks_.size(); i++)
- if (!leaks_[i].is_suppressed) unsuppressed_count++;
- if (num_leaks_to_print > 0 && num_leaks_to_print < unsuppressed_count)
- Printf("The %zu largest leak(s):\n", num_leaks_to_print);
+ uptr unsuppressed_count = UnsuppressedLeakCount();
+ if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
+ Printf("The %zu top leak(s):\n", num_leaks_to_report);
InternalSort(&leaks_, leaks_.size(), LeakComparator);
- uptr leaks_printed = 0;
- Decorator d;
+ uptr leaks_reported = 0;
for (uptr i = 0; i < leaks_.size(); i++) {
if (leaks_[i].is_suppressed) continue;
- Printf("%s", d.Leak());
- Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
- leaks_[i].is_directly_leaked ? "Direct" : "Indirect",
- leaks_[i].total_size, leaks_[i].hit_count);
- Printf("%s", d.End());
- PrintStackTraceById(leaks_[i].stack_trace_id);
- leaks_printed++;
- if (leaks_printed == num_leaks_to_print) break;
+ PrintReportForLeak(i);
+ leaks_reported++;
+ if (leaks_reported == num_leaks_to_report) break;
}
- if (leaks_printed < unsuppressed_count) {
- uptr remaining = unsuppressed_count - leaks_printed;
+ if (leaks_reported < unsuppressed_count) {
+ uptr remaining = unsuppressed_count - leaks_reported;
Printf("Omitting %zu more leak(s).\n", remaining);
}
}
+void LeakReport::PrintReportForLeak(uptr index) {
+ Decorator d;
+ Printf("%s", d.Leak());
+ Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
+ leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
+ leaks_[index].total_size, leaks_[index].hit_count);
+ Printf("%s", d.End());
+
+ PrintStackTraceById(leaks_[index].stack_trace_id);
+
+ if (flags()->report_objects) {
+ Printf("Objects leaked above:\n");
+ PrintLeakedObjectsForLeak(index);
+ Printf("\n");
+ }
+}
+
+void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
+ u32 leak_id = leaks_[index].id;
+ for (uptr j = 0; j < leaked_objects_.size(); j++) {
+ if (leaked_objects_[j].leak_id == leak_id)
+ Printf("%p (%zu bytes)\n", leaked_objects_[j].addr,
+ leaked_objects_[j].size);
+ }
+}
+
void LeakReport::PrintSummary() {
CHECK(leaks_.size() <= kMaxLeaksConsidered);
uptr bytes = 0, allocations = 0;
@@ -516,20 +600,24 @@ void LeakReport::PrintSummary() {
ReportErrorSummary(summary.data());
}
-uptr LeakReport::ApplySuppressions() {
- uptr unsuppressed_count = 0;
+void LeakReport::ApplySuppressions() {
for (uptr i = 0; i < leaks_.size(); i++) {
Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
if (s) {
s->weight += leaks_[i].total_size;
s->hit_count += leaks_[i].hit_count;
leaks_[i].is_suppressed = true;
- } else {
- unsuppressed_count++;
}
}
- return unsuppressed_count;
}
+
+uptr LeakReport::UnsuppressedLeakCount() {
+ uptr result = 0;
+ for (uptr i = 0; i < leaks_.size(); i++)
+ if (!leaks_[i].is_suppressed) result++;
+ return result;
+}
+
} // namespace __lsan
#endif // CAN_SANITIZE_LEAKS
@@ -545,13 +633,51 @@ void __lsan_ignore_object(const void *p) {
// locked.
BlockingMutexLock l(&global_mutex);
IgnoreObjectResult res = IgnoreObjectLocked(p);
- if (res == kIgnoreObjectInvalid && flags()->verbosity >= 2)
- Report("__lsan_ignore_object(): no heap object found at %p", p);
- if (res == kIgnoreObjectAlreadyIgnored && flags()->verbosity >= 2)
- Report("__lsan_ignore_object(): "
+ if (res == kIgnoreObjectInvalid)
+ VReport(1, "__lsan_ignore_object(): no heap object found at %p", p);
+ if (res == kIgnoreObjectAlreadyIgnored)
+ VReport(1, "__lsan_ignore_object(): "
"heap object at %p is already being ignored\n", p);
- if (res == kIgnoreObjectSuccess && flags()->verbosity >= 3)
- Report("__lsan_ignore_object(): ignoring heap object at %p\n", p);
+ if (res == kIgnoreObjectSuccess)
+ VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
+#endif // CAN_SANITIZE_LEAKS
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_register_root_region(const void *begin, uptr size) {
+#if CAN_SANITIZE_LEAKS
+ BlockingMutexLock l(&global_mutex);
+ CHECK(root_regions);
+ RootRegion region = {begin, size};
+ root_regions->push_back(region);
+ VReport(1, "Registered root region at %p of size %llu\n", begin, size);
+#endif // CAN_SANITIZE_LEAKS
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_unregister_root_region(const void *begin, uptr size) {
+#if CAN_SANITIZE_LEAKS
+ BlockingMutexLock l(&global_mutex);
+ CHECK(root_regions);
+ bool removed = false;
+ for (uptr i = 0; i < root_regions->size(); i++) {
+ RootRegion region = (*root_regions)[i];
+ if (region.begin == begin && region.size == size) {
+ removed = true;
+ uptr last_index = root_regions->size() - 1;
+ (*root_regions)[i] = (*root_regions)[last_index];
+ root_regions->pop_back();
+ VReport(1, "Unregistered root region at %p of size %llu\n", begin, size);
+ break;
+ }
+ }
+ if (!removed) {
+ Report(
+ "__lsan_unregister_root_region(): region at %p of size %llu has not "
+ "been registered.\n",
+ begin, size);
+ Die();
+ }
#endif // CAN_SANITIZE_LEAKS
}
diff --git a/libsanitizer/lsan/lsan_common.h b/libsanitizer/lsan/lsan_common.h
index 5d9b4eb62e1a..0c84d414be2e 100644
--- a/libsanitizer/lsan/lsan_common.h
+++ b/libsanitizer/lsan/lsan_common.h
@@ -19,7 +19,7 @@
#include "sanitizer_common/sanitizer_platform.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
-#if SANITIZER_LINUX && defined(__x86_64__)
+#if SANITIZER_LINUX && defined(__x86_64__) && (SANITIZER_WORDSIZE == 64)
#define CAN_SANITIZE_LEAKS 1
#else
#define CAN_SANITIZE_LEAKS 0
@@ -49,6 +49,8 @@ struct Flags {
int max_leaks;
// If nonzero kill the process with this exit code upon finding leaks.
int exitcode;
+ // Print matched suppressions after leak checking.
+ bool print_suppressions;
// Suppressions file name.
const char* suppressions;
@@ -61,12 +63,13 @@ struct Flags {
bool use_registers;
// TLS and thread-specific storage.
bool use_tls;
+ // Regions added via __lsan_register_root_region().
+ bool use_root_regions;
// Consider unaligned pointers valid.
bool use_unaligned;
-
- // User-visible verbosity.
- int verbosity;
+ // Consider pointers found in poisoned memory to be valid.
+ bool use_poisoned;
// Debug logging.
bool log_pointers;
@@ -77,6 +80,7 @@ extern Flags lsan_flags;
inline Flags *flags() { return &lsan_flags; }
struct Leak {
+ u32 id;
uptr hit_count;
uptr total_size;
u32 stack_trace_id;
@@ -84,17 +88,31 @@ struct Leak {
bool is_suppressed;
};
+struct LeakedObject {
+ u32 leak_id;
+ uptr addr;
+ uptr size;
+};
+
// Aggregates leaks by stack trace prefix.
class LeakReport {
public:
- LeakReport() : leaks_(1) {}
- void Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag);
- void PrintLargest(uptr max_leaks);
+ LeakReport() : next_id_(0), leaks_(1), leaked_objects_(1) {}
+ void AddLeakedChunk(uptr chunk, u32 stack_trace_id, uptr leaked_size,
+ ChunkTag tag);
+ void ReportTopLeaks(uptr max_leaks);
void PrintSummary();
- bool IsEmpty() { return leaks_.size() == 0; }
- uptr ApplySuppressions();
+ void ApplySuppressions();
+ uptr UnsuppressedLeakCount();
+
+
private:
+ void PrintReportForLeak(uptr index);
+ void PrintLeakedObjectsForLeak(uptr index);
+
+ u32 next_id_;
InternalMmapVector<Leak> leaks_;
+ InternalMmapVector<LeakedObject> leaked_objects_;
};
typedef InternalMmapVector<uptr> Frontier;
@@ -119,6 +137,15 @@ void InitCommonLsan();
void DoLeakCheck();
bool DisabledInThisThread();
+// Special case for "new T[0]" where T is a type with DTOR.
+// new T[0] will allocate one word for the array size (0) and store a pointer
+// to the end of allocated chunk.
+inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg, uptr chunk_size,
+ uptr addr) {
+ return chunk_size == sizeof(uptr) && chunk_beg + chunk_size == addr &&
+ *reinterpret_cast<uptr *>(chunk_beg) == 0;
+}
+
// The following must be implemented in the parent tool.
void ForEachChunk(ForEachChunkCallback callback, void *arg);
@@ -127,6 +154,8 @@ void GetAllocatorGlobalRange(uptr *begin, uptr *end);
// Wrappers for allocator's ForceLock()/ForceUnlock().
void LockAllocator();
void UnlockAllocator();
+// Returns true if [addr, addr + sizeof(void *)) is poisoned.
+bool WordIsPoisoned(uptr addr);
// Wrappers for ThreadRegistry access.
void LockThreadRegistry();
void UnlockThreadRegistry();
diff --git a/libsanitizer/lsan/lsan_common_linux.cc b/libsanitizer/lsan/lsan_common_linux.cc
index 80d2459a9ad1..c318fbc4c3b4 100644
--- a/libsanitizer/lsan/lsan_common_linux.cc
+++ b/libsanitizer/lsan/lsan_common_linux.cc
@@ -17,6 +17,7 @@
#include <link.h>
#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
@@ -41,11 +42,11 @@ void InitializePlatformSpecificModules() {
return;
}
if (num_matches == 0)
- Report("LeakSanitizer: Dynamic linker not found. "
- "TLS will not be handled correctly.\n");
+ VReport(1, "LeakSanitizer: Dynamic linker not found. "
+ "TLS will not be handled correctly.\n");
else if (num_matches > 1)
- Report("LeakSanitizer: Multiple modules match \"%s\". "
- "TLS will not be handled correctly.\n", kLinkerName);
+ VReport(1, "LeakSanitizer: Multiple modules match \"%s\". "
+ "TLS will not be handled correctly.\n", kLinkerName);
linker = 0;
}
@@ -81,6 +82,7 @@ static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
// Scans global variables for heap pointers.
void ProcessGlobalRegions(Frontier *frontier) {
+ if (!flags()->use_globals) return;
// FIXME: dl_iterate_phdr acquires a linker lock, so we run a risk of
// deadlocking by running this under StopTheWorld. However, the lock is
// reentrant, so we should be able to fix this by acquiring the lock before
@@ -127,6 +129,21 @@ static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) {
// Handles dynamically allocated TLS blocks by treating all chunks allocated
// from ld-linux.so as reachable.
+// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
+// They are allocated with a __libc_memalign() call in allocate_and_init()
+// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
+// blocks, but we can make sure they come from our own allocator by intercepting
+// __libc_memalign(). On top of that, there is no easy way to reach them. Their
+// addresses are stored in a dynamically allocated array (the DTV) which is
+// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
+// being reachable from the static TLS, and the dynamic TLS being reachable from
+// the DTV. This is because the initial DTV is allocated before our interception
+// mechanism kicks in, and thus we don't recognize it as allocated memory. We
+// can't special-case it either, since we don't know its size.
+// Our solution is to include in the root set all allocations made from
+// ld-linux.so (which is where allocate_and_init() is implemented). This is
+// guaranteed to include all dynamic TLS blocks (and possibly other allocations
+// which we don't care about).
void ProcessPlatformSpecificAllocations(Frontier *frontier) {
if (!flags()->use_tls) return;
if (!linker) return;
diff --git a/libsanitizer/lsan/lsan_interceptors.cc b/libsanitizer/lsan/lsan_interceptors.cc
index 1940902ef837..dfaad325672e 100644
--- a/libsanitizer/lsan/lsan_interceptors.cc
+++ b/libsanitizer/lsan/lsan_interceptors.cc
@@ -10,11 +10,11 @@
//
//===----------------------------------------------------------------------===//
-#include "interception/interception.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_interception.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
@@ -32,19 +32,19 @@ int pthread_key_create(unsigned *key, void (*destructor)(void* v));
int pthread_setspecific(unsigned key, const void *v);
}
-#define GET_STACK_TRACE \
- StackTrace stack; \
- { \
- uptr stack_top = 0, stack_bottom = 0; \
- ThreadContext *t; \
- bool fast = common_flags()->fast_unwind_on_malloc; \
- if (fast && (t = CurrentThreadContext())) { \
- stack_top = t->stack_end(); \
- stack_bottom = t->stack_begin(); \
- } \
- stack.Unwind(__sanitizer::common_flags()->malloc_context_size, \
- StackTrace::GetCurrentPc(), \
- GET_CURRENT_FRAME(), stack_top, stack_bottom, fast); \
+#define GET_STACK_TRACE \
+ StackTrace stack; \
+ { \
+ uptr stack_top = 0, stack_bottom = 0; \
+ ThreadContext *t; \
+ bool fast = common_flags()->fast_unwind_on_malloc; \
+ if (fast && (t = CurrentThreadContext())) { \
+ stack_top = t->stack_end(); \
+ stack_bottom = t->stack_begin(); \
+ } \
+ stack.Unwind(__sanitizer::common_flags()->malloc_context_size, \
+ StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), 0, \
+ stack_top, stack_bottom, fast); \
}
#define ENSURE_LSAN_INITED do { \
@@ -150,7 +150,7 @@ INTERCEPTOR(void*, pvalloc, uptr size) {
return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory);
}
-INTERCEPTOR(void, cfree, void *p) ALIAS("free");
+INTERCEPTOR(void, cfree, void *p) ALIAS(WRAPPER_NAME(free));
#define OPERATOR_NEW_BODY \
ENSURE_LSAN_INITED; \
@@ -171,9 +171,9 @@ void *operator new[](uptr size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
Deallocate(ptr);
INTERCEPTOR_ATTRIBUTE
-void operator delete(void *ptr) { OPERATOR_DELETE_BODY; }
+void operator delete(void *ptr) throw() { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
-void operator delete[](void *ptr) { OPERATOR_DELETE_BODY; }
+void operator delete[](void *ptr) throw() { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
@@ -183,7 +183,8 @@ void operator delete[](void *ptr, std::nothrow_t const &) {
// We need this to intercept the __libc_memalign calls that are used to
// allocate dynamic TLS space in ld-linux.so.
-INTERCEPTOR(void *, __libc_memalign, uptr align, uptr s) ALIAS("memalign");
+INTERCEPTOR(void *, __libc_memalign, uptr align, uptr s)
+ ALIAS(WRAPPER_NAME(memalign));
///// Thread initialization and finalization. /////
@@ -236,7 +237,7 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr,
pthread_attr_init(&myattr);
attr = &myattr;
}
- AdjustStackSizeLinux(attr);
+ AdjustStackSize(attr);
int detached = 0;
pthread_attr_getdetachstate(attr, &detached);
ThreadParam p;
diff --git a/libsanitizer/sanitizer_common/Makefile.am b/libsanitizer/sanitizer_common/Makefile.am
index 8e9038dc8cff..089c6936e2ab 100644
--- a/libsanitizer/sanitizer_common/Makefile.am
+++ b/libsanitizer/sanitizer_common/Makefile.am
@@ -22,6 +22,8 @@ sanitizer_common_files = \
sanitizer_common.cc \
sanitizer_common_libcdep.cc \
sanitizer_coverage.cc \
+ sanitizer_deadlock_detector1.cc \
+ sanitizer_deadlock_detector2.cc \
sanitizer_flags.cc \
sanitizer_libc.cc \
sanitizer_libignore.cc \
@@ -30,20 +32,23 @@ sanitizer_common_files = \
sanitizer_mac.cc \
sanitizer_platform_limits_linux.cc \
sanitizer_platform_limits_posix.cc \
- sanitizer_posix_libcdep.cc \
sanitizer_posix.cc \
+ sanitizer_posix_libcdep.cc \
sanitizer_printf.cc \
+ sanitizer_procmaps_linux.cc \
+ sanitizer_procmaps_mac.cc \
sanitizer_stackdepot.cc \
sanitizer_stacktrace.cc \
sanitizer_stacktrace_libcdep.cc \
sanitizer_stoptheworld_linux_libcdep.cc \
sanitizer_suppressions.cc \
- sanitizer_symbolizer_posix_libcdep.cc \
- sanitizer_symbolizer_win.cc \
sanitizer_symbolizer.cc \
sanitizer_symbolizer_libbacktrace.cc \
sanitizer_symbolizer_libcdep.cc \
+ sanitizer_symbolizer_posix_libcdep.cc \
+ sanitizer_symbolizer_win.cc \
sanitizer_thread_registry.cc \
+ sanitizer_tls_get_addr.cc \
sanitizer_win.cc
libsanitizer_common_la_SOURCES = $(sanitizer_common_files)
diff --git a/libsanitizer/sanitizer_common/Makefile.in b/libsanitizer/sanitizer_common/Makefile.in
index e9fd115e9761..1ea631590c09 100644
--- a/libsanitizer/sanitizer_common/Makefile.in
+++ b/libsanitizer/sanitizer_common/Makefile.in
@@ -65,19 +65,23 @@ LTLIBRARIES = $(noinst_LTLIBRARIES)
libsanitizer_common_la_LIBADD =
am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \
sanitizer_common_libcdep.lo sanitizer_coverage.lo \
- sanitizer_flags.lo sanitizer_libc.lo sanitizer_libignore.lo \
- sanitizer_linux.lo sanitizer_linux_libcdep.lo sanitizer_mac.lo \
+ sanitizer_deadlock_detector1.lo \
+ sanitizer_deadlock_detector2.lo sanitizer_flags.lo \
+ sanitizer_libc.lo sanitizer_libignore.lo sanitizer_linux.lo \
+ sanitizer_linux_libcdep.lo sanitizer_mac.lo \
sanitizer_platform_limits_linux.lo \
- sanitizer_platform_limits_posix.lo sanitizer_posix_libcdep.lo \
- sanitizer_posix.lo sanitizer_printf.lo sanitizer_stackdepot.lo \
- sanitizer_stacktrace.lo sanitizer_stacktrace_libcdep.lo \
+ sanitizer_platform_limits_posix.lo sanitizer_posix.lo \
+ sanitizer_posix_libcdep.lo sanitizer_printf.lo \
+ sanitizer_procmaps_linux.lo sanitizer_procmaps_mac.lo \
+ sanitizer_stackdepot.lo sanitizer_stacktrace.lo \
+ sanitizer_stacktrace_libcdep.lo \
sanitizer_stoptheworld_linux_libcdep.lo \
- sanitizer_suppressions.lo \
- sanitizer_symbolizer_posix_libcdep.lo \
- sanitizer_symbolizer_win.lo sanitizer_symbolizer.lo \
+ sanitizer_suppressions.lo sanitizer_symbolizer.lo \
sanitizer_symbolizer_libbacktrace.lo \
- sanitizer_symbolizer_libcdep.lo sanitizer_thread_registry.lo \
- sanitizer_win.lo
+ sanitizer_symbolizer_libcdep.lo \
+ sanitizer_symbolizer_posix_libcdep.lo \
+ sanitizer_symbolizer_win.lo sanitizer_thread_registry.lo \
+ sanitizer_tls_get_addr.lo sanitizer_win.lo
am_libsanitizer_common_la_OBJECTS = $(am__objects_1)
libsanitizer_common_la_OBJECTS = $(am_libsanitizer_common_la_OBJECTS)
DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)
@@ -253,6 +257,8 @@ sanitizer_common_files = \
sanitizer_common.cc \
sanitizer_common_libcdep.cc \
sanitizer_coverage.cc \
+ sanitizer_deadlock_detector1.cc \
+ sanitizer_deadlock_detector2.cc \
sanitizer_flags.cc \
sanitizer_libc.cc \
sanitizer_libignore.cc \
@@ -261,20 +267,23 @@ sanitizer_common_files = \
sanitizer_mac.cc \
sanitizer_platform_limits_linux.cc \
sanitizer_platform_limits_posix.cc \
- sanitizer_posix_libcdep.cc \
sanitizer_posix.cc \
+ sanitizer_posix_libcdep.cc \
sanitizer_printf.cc \
+ sanitizer_procmaps_linux.cc \
+ sanitizer_procmaps_mac.cc \
sanitizer_stackdepot.cc \
sanitizer_stacktrace.cc \
sanitizer_stacktrace_libcdep.cc \
sanitizer_stoptheworld_linux_libcdep.cc \
sanitizer_suppressions.cc \
- sanitizer_symbolizer_posix_libcdep.cc \
- sanitizer_symbolizer_win.cc \
sanitizer_symbolizer.cc \
sanitizer_symbolizer_libbacktrace.cc \
sanitizer_symbolizer_libcdep.cc \
+ sanitizer_symbolizer_posix_libcdep.cc \
+ sanitizer_symbolizer_win.cc \
sanitizer_thread_registry.cc \
+ sanitizer_tls_get_addr.cc \
sanitizer_win.cc
libsanitizer_common_la_SOURCES = $(sanitizer_common_files)
@@ -374,6 +383,8 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_common.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_common_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_coverage.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_deadlock_detector1.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_deadlock_detector2.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_flags.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_libc.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_libignore.Plo@am__quote@
@@ -385,6 +396,8 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_posix.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_posix_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_printf.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_procmaps_linux.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_procmaps_mac.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stackdepot.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stacktrace.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stacktrace_libcdep.Plo@am__quote@
@@ -396,6 +409,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_posix_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_win.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_thread_registry.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_tls_get_addr.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_win.Plo@am__quote@
.cc.o:
diff --git a/libsanitizer/sanitizer_common/sanitizer_addrhashmap.h b/libsanitizer/sanitizer_common/sanitizer_addrhashmap.h
new file mode 100644
index 000000000000..9e15d51c66c1
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_addrhashmap.h
@@ -0,0 +1,340 @@
+//===-- sanitizer_addrhashmap.h ---------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Concurrent uptr->T hashmap.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ADDRHASHMAP_H
+#define SANITIZER_ADDRHASHMAP_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_atomic.h"
+#include "sanitizer_allocator_internal.h"
+
+namespace __sanitizer {
+
+// Concurrent uptr->T hashmap.
+// T must be a POD type, kSize is preferably a prime but can be any number.
+// Usage example:
+//
+// typedef AddrHashMap<uptr, 11> Map;
+// Map m;
+// {
+// Map::Handle h(&m, addr);
+// use h.operator->() to access the data
+// if h.created() then the element was just created, and the current thread
+// has exclusive access to it
+// otherwise the current thread has only read access to the data
+// }
+// {
+// Map::Handle h(&m, addr, true);
+// this will remove the data from the map in Handle dtor
+// the current thread has exclusive access to the data
+// if !h.exists() then the element never existed
+// }
+template<typename T, uptr kSize>
+class AddrHashMap {
+ private:
+ struct Cell {
+ atomic_uintptr_t addr;
+ T val;
+ };
+
+ struct AddBucket {
+ uptr cap;
+ uptr size;
+ Cell cells[1]; // variable len
+ };
+
+ static const uptr kBucketSize = 3;
+
+ struct Bucket {
+ RWMutex mtx;
+ atomic_uintptr_t add;
+ Cell cells[kBucketSize];
+ };
+
+ public:
+ AddrHashMap();
+
+ class Handle {
+ public:
+ Handle(AddrHashMap<T, kSize> *map, uptr addr);
+ Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove);
+ Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove, bool create);
+
+ ~Handle();
+ T *operator->();
+ bool created() const;
+ bool exists() const;
+
+ private:
+ friend AddrHashMap<T, kSize>;
+ AddrHashMap<T, kSize> *map_;
+ Bucket *bucket_;
+ Cell *cell_;
+ uptr addr_;
+ uptr addidx_;
+ bool created_;
+ bool remove_;
+ bool create_;
+ };
+
+ private:
+ friend class Handle;
+ Bucket *table_;
+
+ void acquire(Handle *h);
+ void release(Handle *h);
+ uptr calcHash(uptr addr);
+};
+
+template<typename T, uptr kSize>
+AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr) {
+ map_ = map;
+ addr_ = addr;
+ remove_ = false;
+ create_ = true;
+ map_->acquire(this);
+}
+
+template<typename T, uptr kSize>
+AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr,
+ bool remove) {
+ map_ = map;
+ addr_ = addr;
+ remove_ = remove;
+ create_ = true;
+ map_->acquire(this);
+}
+
+template<typename T, uptr kSize>
+AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr,
+ bool remove, bool create) {
+ map_ = map;
+ addr_ = addr;
+ remove_ = remove;
+ create_ = create;
+ map_->acquire(this);
+}
+
+template<typename T, uptr kSize>
+AddrHashMap<T, kSize>::Handle::~Handle() {
+ map_->release(this);
+}
+
+template <typename T, uptr kSize>
+T *AddrHashMap<T, kSize>::Handle::operator->() {
+ return &cell_->val;
+}
+
+template<typename T, uptr kSize>
+bool AddrHashMap<T, kSize>::Handle::created() const {
+ return created_;
+}
+
+template<typename T, uptr kSize>
+bool AddrHashMap<T, kSize>::Handle::exists() const {
+ return cell_ != 0;
+}
+
+template<typename T, uptr kSize>
+AddrHashMap<T, kSize>::AddrHashMap() {
+ table_ = (Bucket*)MmapOrDie(kSize * sizeof(table_[0]), "AddrHashMap");
+}
+
+template<typename T, uptr kSize>
+void AddrHashMap<T, kSize>::acquire(Handle *h) {
+ uptr addr = h->addr_;
+ uptr hash = calcHash(addr);
+ Bucket *b = &table_[hash];
+
+ h->created_ = false;
+ h->addidx_ = -1U;
+ h->bucket_ = b;
+ h->cell_ = 0;
+
+ // If we want to remove the element, we need exclusive access to the bucket,
+ // so skip the lock-free phase.
+ if (h->remove_)
+ goto locked;
+
+ retry:
+ // First try to find an existing element w/o read mutex.
+ CHECK(!h->remove_);
+ // Check the embed cells.
+ for (uptr i = 0; i < kBucketSize; i++) {
+ Cell *c = &b->cells[i];
+ uptr addr1 = atomic_load(&c->addr, memory_order_acquire);
+ if (addr1 == addr) {
+ h->cell_ = c;
+ return;
+ }
+ }
+
+ // Check the add cells with read lock.
+ if (atomic_load(&b->add, memory_order_relaxed)) {
+ b->mtx.ReadLock();
+ AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
+ for (uptr i = 0; i < add->size; i++) {
+ Cell *c = &add->cells[i];
+ uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
+ if (addr1 == addr) {
+ h->addidx_ = i;
+ h->cell_ = c;
+ return;
+ }
+ }
+ b->mtx.ReadUnlock();
+ }
+
+ locked:
+ // Re-check existence under write lock.
+ // Embed cells.
+ b->mtx.Lock();
+ for (uptr i = 0; i < kBucketSize; i++) {
+ Cell *c = &b->cells[i];
+ uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
+ if (addr1 == addr) {
+ if (h->remove_) {
+ h->cell_ = c;
+ return;
+ }
+ b->mtx.Unlock();
+ goto retry;
+ }
+ }
+
+ // Add cells.
+ AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
+ if (add) {
+ for (uptr i = 0; i < add->size; i++) {
+ Cell *c = &add->cells[i];
+ uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
+ if (addr1 == addr) {
+ if (h->remove_) {
+ h->addidx_ = i;
+ h->cell_ = c;
+ return;
+ }
+ b->mtx.Unlock();
+ goto retry;
+ }
+ }
+ }
+
+ // The element does not exist, no need to create it if we want to remove.
+ if (h->remove_ || !h->create_) {
+ b->mtx.Unlock();
+ return;
+ }
+
+ // Now try to create it under the mutex.
+ h->created_ = true;
+ // See if we have a free embed cell.
+ for (uptr i = 0; i < kBucketSize; i++) {
+ Cell *c = &b->cells[i];
+ uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
+ if (addr1 == 0) {
+ h->cell_ = c;
+ return;
+ }
+ }
+
+ // Store in the add cells.
+ if (add == 0) {
+ // Allocate a new add array.
+ const uptr kInitSize = 64;
+ add = (AddBucket*)InternalAlloc(kInitSize);
+ internal_memset(add, 0, kInitSize);
+ add->cap = (kInitSize - sizeof(*add)) / sizeof(add->cells[0]) + 1;
+ add->size = 0;
+ atomic_store(&b->add, (uptr)add, memory_order_relaxed);
+ }
+ if (add->size == add->cap) {
+ // Grow existing add array.
+ uptr oldsize = sizeof(*add) + (add->cap - 1) * sizeof(add->cells[0]);
+ uptr newsize = oldsize * 2;
+ AddBucket *add1 = (AddBucket*)InternalAlloc(newsize);
+ internal_memset(add1, 0, newsize);
+ add1->cap = (newsize - sizeof(*add)) / sizeof(add->cells[0]) + 1;
+ add1->size = add->size;
+ internal_memcpy(add1->cells, add->cells, add->size * sizeof(add->cells[0]));
+ InternalFree(add);
+ atomic_store(&b->add, (uptr)add1, memory_order_relaxed);
+ add = add1;
+ }
+ // Store.
+ uptr i = add->size++;
+ Cell *c = &add->cells[i];
+ CHECK_EQ(atomic_load(&c->addr, memory_order_relaxed), 0);
+ h->addidx_ = i;
+ h->cell_ = c;
+}
+
+template<typename T, uptr kSize>
+void AddrHashMap<T, kSize>::release(Handle *h) {
+ if (h->cell_ == 0)
+ return;
+ Bucket *b = h->bucket_;
+ Cell *c = h->cell_;
+ uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
+ if (h->created_) {
+ // Denote completion of insertion.
+ CHECK_EQ(addr1, 0);
+ // After the following store, the element becomes available
+ // for lock-free reads.
+ atomic_store(&c->addr, h->addr_, memory_order_release);
+ b->mtx.Unlock();
+ } else if (h->remove_) {
+ // Denote that the cell is empty now.
+ CHECK_EQ(addr1, h->addr_);
+ atomic_store(&c->addr, 0, memory_order_release);
+ // See if we need to compact the bucket.
+ AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
+ if (h->addidx_ == -1U) {
+ // Removed from embed array, move an add element into the freed cell.
+ if (add && add->size != 0) {
+ uptr last = --add->size;
+ Cell *c1 = &add->cells[last];
+ c->val = c1->val;
+ uptr addr1 = atomic_load(&c1->addr, memory_order_relaxed);
+ atomic_store(&c->addr, addr1, memory_order_release);
+ atomic_store(&c1->addr, 0, memory_order_release);
+ }
+ } else {
+ // Removed from add array, compact it.
+ uptr last = --add->size;
+ Cell *c1 = &add->cells[last];
+ if (c != c1) {
+ *c = *c1;
+ atomic_store(&c1->addr, 0, memory_order_relaxed);
+ }
+ }
+ if (add && add->size == 0) {
+ // FIXME(dvyukov): free add?
+ }
+ b->mtx.Unlock();
+ } else {
+ CHECK_EQ(addr1, h->addr_);
+ if (h->addidx_ != -1U)
+ b->mtx.ReadUnlock();
+ }
+}
+
+template<typename T, uptr kSize>
+uptr AddrHashMap<T, kSize>::calcHash(uptr addr) {
+ addr += addr << 10;
+ addr ^= addr >> 6;
+ return addr % kSize;
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ADDRHASHMAP_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator.cc b/libsanitizer/sanitizer_common/sanitizer_allocator.cc
index 9d38e9429ac6..f4e3af1b06af 100644
--- a/libsanitizer/sanitizer_common/sanitizer_allocator.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_allocator.cc
@@ -17,7 +17,7 @@
namespace __sanitizer {
// ThreadSanitizer for Go uses libc malloc/free.
-#if defined(SANITIZER_GO)
+#if defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC)
# if SANITIZER_LINUX && !SANITIZER_ANDROID
extern "C" void *__libc_malloc(uptr size);
extern "C" void __libc_free(void *ptr);
@@ -115,7 +115,7 @@ void *LowLevelAllocator::Allocate(uptr size) {
if (allocated_end_ - allocated_current_ < (sptr)size) {
uptr size_to_allocate = Max(size, GetPageSizeCached());
allocated_current_ =
- (char*)MmapOrDie(size_to_allocate, __FUNCTION__);
+ (char*)MmapOrDie(size_to_allocate, __func__);
allocated_end_ = allocated_current_ + size_to_allocate;
if (low_level_alloc_callback) {
low_level_alloc_callback((uptr)allocated_current_,
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator_internal.h b/libsanitizer/sanitizer_common/sanitizer_allocator_internal.h
index efdb89e36825..74e4402f7366 100644
--- a/libsanitizer/sanitizer_common/sanitizer_allocator_internal.h
+++ b/libsanitizer/sanitizer_common/sanitizer_allocator_internal.h
@@ -22,14 +22,13 @@ namespace __sanitizer {
typedef CompactSizeClassMap InternalSizeClassMap;
static const uptr kInternalAllocatorSpace = 0;
+static const u64 kInternalAllocatorSize = SANITIZER_MMAP_RANGE_SIZE;
#if SANITIZER_WORDSIZE == 32
-static const u64 kInternalAllocatorSize = (1ULL << 32);
static const uptr kInternalAllocatorRegionSizeLog = 20;
static const uptr kInternalAllocatorNumRegions =
kInternalAllocatorSize >> kInternalAllocatorRegionSizeLog;
typedef FlatByteMap<kInternalAllocatorNumRegions> ByteMap;
#else
-static const u64 kInternalAllocatorSize = (1ULL << 47);
static const uptr kInternalAllocatorRegionSizeLog = 24;
static const uptr kInternalAllocatorNumRegions =
kInternalAllocatorSize >> kInternalAllocatorRegionSizeLog;
@@ -46,10 +45,10 @@ typedef SizeClassAllocatorLocalCache<PrimaryInternalAllocator>
// LargeMmapAllocator.
struct CrashOnMapUnmap {
void OnMap(uptr p, uptr size) const {
- RAW_CHECK_MSG(0, "Unexpected mmap in InternalAllocator!");
+ RAW_CHECK_MSG(0, "Unexpected mmap in InternalAllocator!\n");
}
void OnUnmap(uptr p, uptr size) const {
- RAW_CHECK_MSG(0, "Unexpected munmap in InternalAllocator!");
+ RAW_CHECK_MSG(0, "Unexpected munmap in InternalAllocator!\n");
}
};
diff --git a/libsanitizer/sanitizer_common/sanitizer_atomic.h b/libsanitizer/sanitizer_common/sanitizer_atomic.h
index f2bf23588a44..6387664f63b7 100644
--- a/libsanitizer/sanitizer_common/sanitizer_atomic.h
+++ b/libsanitizer/sanitizer_common/sanitizer_atomic.h
@@ -42,7 +42,8 @@ struct atomic_uint32_t {
struct atomic_uint64_t {
typedef u64 Type;
- volatile Type val_dont_use;
+ // On 32-bit platforms u64 is not necessary aligned on 8 bytes.
+ volatile ALIGNED(8) Type val_dont_use;
};
struct atomic_uintptr_t {
diff --git a/libsanitizer/sanitizer_common/sanitizer_atomic_clang.h b/libsanitizer/sanitizer_common/sanitizer_atomic_clang.h
index 88819e32a737..c600999e67a8 100644
--- a/libsanitizer/sanitizer_common/sanitizer_atomic_clang.h
+++ b/libsanitizer/sanitizer_common/sanitizer_atomic_clang.h
@@ -13,8 +13,26 @@
#ifndef SANITIZER_ATOMIC_CLANG_H
#define SANITIZER_ATOMIC_CLANG_H
+#if defined(__i386__) || defined(__x86_64__)
+# include "sanitizer_atomic_clang_x86.h"
+#else
+# include "sanitizer_atomic_clang_other.h"
+#endif
+
namespace __sanitizer {
+// We would like to just use compiler builtin atomic operations
+// for loads and stores, but they are mostly broken in clang:
+// - they lead to vastly inefficient code generation
+// (http://llvm.org/bugs/show_bug.cgi?id=17281)
+// - 64-bit atomic operations are not implemented on x86_32
+// (http://llvm.org/bugs/show_bug.cgi?id=15034)
+// - they are not implemented on ARM
+// error: undefined reference to '__atomic_load_4'
+
+// See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+// for mappings of the memory model to different processors.
+
INLINE void atomic_signal_fence(memory_order) {
__asm__ __volatile__("" ::: "memory");
}
@@ -23,59 +41,6 @@ INLINE void atomic_thread_fence(memory_order) {
__sync_synchronize();
}
-INLINE void proc_yield(int cnt) {
- __asm__ __volatile__("" ::: "memory");
-#if defined(__i386__) || defined(__x86_64__)
- for (int i = 0; i < cnt; i++)
- __asm__ __volatile__("pause");
-#endif
- __asm__ __volatile__("" ::: "memory");
-}
-
-template<typename T>
-INLINE typename T::Type atomic_load(
- const volatile T *a, memory_order mo) {
- DCHECK(mo & (memory_order_relaxed | memory_order_consume
- | memory_order_acquire | memory_order_seq_cst));
- DCHECK(!((uptr)a % sizeof(*a)));
- typename T::Type v;
- // FIXME:
- // 64-bit atomic operations are not atomic on 32-bit platforms.
- // The implementation lacks necessary memory fences on ARM/PPC.
- // We would like to use compiler builtin atomic operations,
- // but they are mostly broken:
- // - they lead to vastly inefficient code generation
- // (http://llvm.org/bugs/show_bug.cgi?id=17281)
- // - 64-bit atomic operations are not implemented on x86_32
- // (http://llvm.org/bugs/show_bug.cgi?id=15034)
- // - they are not implemented on ARM
- // error: undefined reference to '__atomic_load_4'
- if (mo == memory_order_relaxed) {
- v = a->val_dont_use;
- } else {
- atomic_signal_fence(memory_order_seq_cst);
- v = a->val_dont_use;
- atomic_signal_fence(memory_order_seq_cst);
- }
- return v;
-}
-
-template<typename T>
-INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
- DCHECK(mo & (memory_order_relaxed | memory_order_release
- | memory_order_seq_cst));
- DCHECK(!((uptr)a % sizeof(*a)));
- if (mo == memory_order_relaxed) {
- a->val_dont_use = v;
- } else {
- atomic_signal_fence(memory_order_seq_cst);
- a->val_dont_use = v;
- atomic_signal_fence(memory_order_seq_cst);
- }
- if (mo == memory_order_seq_cst)
- atomic_thread_fence(memory_order_seq_cst);
-}
-
template<typename T>
INLINE typename T::Type atomic_fetch_add(volatile T *a,
typename T::Type v, memory_order mo) {
diff --git a/libsanitizer/sanitizer_common/sanitizer_atomic_clang_other.h b/libsanitizer/sanitizer_common/sanitizer_atomic_clang_other.h
new file mode 100644
index 000000000000..c66c0992e1b0
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_atomic_clang_other.h
@@ -0,0 +1,95 @@
+//===-- sanitizer_atomic_clang_other.h --------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+// Not intended for direct inclusion. Include sanitizer_atomic.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ATOMIC_CLANG_OTHER_H
+#define SANITIZER_ATOMIC_CLANG_OTHER_H
+
+namespace __sanitizer {
+
+INLINE void proc_yield(int cnt) {
+ __asm__ __volatile__("" ::: "memory");
+}
+
+template<typename T>
+INLINE typename T::Type atomic_load(
+ const volatile T *a, memory_order mo) {
+ DCHECK(mo & (memory_order_relaxed | memory_order_consume
+ | memory_order_acquire | memory_order_seq_cst));
+ DCHECK(!((uptr)a % sizeof(*a)));
+ typename T::Type v;
+
+ if (sizeof(*a) < 8 || sizeof(void*) == 8) {
+ // Assume that aligned loads are atomic.
+ if (mo == memory_order_relaxed) {
+ v = a->val_dont_use;
+ } else if (mo == memory_order_consume) {
+ // Assume that processor respects data dependencies
+ // (and that compiler won't break them).
+ __asm__ __volatile__("" ::: "memory");
+ v = a->val_dont_use;
+ __asm__ __volatile__("" ::: "memory");
+ } else if (mo == memory_order_acquire) {
+ __asm__ __volatile__("" ::: "memory");
+ v = a->val_dont_use;
+ __sync_synchronize();
+ } else { // seq_cst
+ // E.g. on POWER we need a hw fence even before the store.
+ __sync_synchronize();
+ v = a->val_dont_use;
+ __sync_synchronize();
+ }
+ } else {
+ // 64-bit load on 32-bit platform.
+ // Gross, but simple and reliable.
+ // Assume that it is not in read-only memory.
+ v = __sync_fetch_and_add(
+ const_cast<typename T::Type volatile *>(&a->val_dont_use), 0);
+ }
+ return v;
+}
+
+template<typename T>
+INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
+ DCHECK(mo & (memory_order_relaxed | memory_order_release
+ | memory_order_seq_cst));
+ DCHECK(!((uptr)a % sizeof(*a)));
+
+ if (sizeof(*a) < 8 || sizeof(void*) == 8) {
+ // Assume that aligned loads are atomic.
+ if (mo == memory_order_relaxed) {
+ a->val_dont_use = v;
+ } else if (mo == memory_order_release) {
+ __sync_synchronize();
+ a->val_dont_use = v;
+ __asm__ __volatile__("" ::: "memory");
+ } else { // seq_cst
+ __sync_synchronize();
+ a->val_dont_use = v;
+ __sync_synchronize();
+ }
+ } else {
+ // 64-bit store on 32-bit platform.
+ // Gross, but simple and reliable.
+ typename T::Type cmp = a->val_dont_use;
+ typename T::Type cur;
+ for (;;) {
+ cur = __sync_val_compare_and_swap(&a->val_dont_use, cmp, v);
+ if (cmp == v)
+ break;
+ cmp = cur;
+ }
+ }
+}
+
+} // namespace __sanitizer
+
+#endif // #ifndef SANITIZER_ATOMIC_CLANG_OTHER_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_atomic_clang_x86.h b/libsanitizer/sanitizer_common/sanitizer_atomic_clang_x86.h
new file mode 100644
index 000000000000..5df210eca795
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_atomic_clang_x86.h
@@ -0,0 +1,114 @@
+//===-- sanitizer_atomic_clang_x86.h ----------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+// Not intended for direct inclusion. Include sanitizer_atomic.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ATOMIC_CLANG_X86_H
+#define SANITIZER_ATOMIC_CLANG_X86_H
+
+namespace __sanitizer {
+
+INLINE void proc_yield(int cnt) {
+ __asm__ __volatile__("" ::: "memory");
+ for (int i = 0; i < cnt; i++)
+ __asm__ __volatile__("pause");
+ __asm__ __volatile__("" ::: "memory");
+}
+
+template<typename T>
+INLINE typename T::Type atomic_load(
+ const volatile T *a, memory_order mo) {
+ DCHECK(mo & (memory_order_relaxed | memory_order_consume
+ | memory_order_acquire | memory_order_seq_cst));
+ DCHECK(!((uptr)a % sizeof(*a)));
+ typename T::Type v;
+
+ if (sizeof(*a) < 8 || sizeof(void*) == 8) {
+ // Assume that aligned loads are atomic.
+ if (mo == memory_order_relaxed) {
+ v = a->val_dont_use;
+ } else if (mo == memory_order_consume) {
+ // Assume that processor respects data dependencies
+ // (and that compiler won't break them).
+ __asm__ __volatile__("" ::: "memory");
+ v = a->val_dont_use;
+ __asm__ __volatile__("" ::: "memory");
+ } else if (mo == memory_order_acquire) {
+ __asm__ __volatile__("" ::: "memory");
+ v = a->val_dont_use;
+ // On x86 loads are implicitly acquire.
+ __asm__ __volatile__("" ::: "memory");
+ } else { // seq_cst
+ // On x86 plain MOV is enough for seq_cst store.
+ __asm__ __volatile__("" ::: "memory");
+ v = a->val_dont_use;
+ __asm__ __volatile__("" ::: "memory");
+ }
+ } else {
+ // 64-bit load on 32-bit platform.
+ __asm__ __volatile__(
+ "movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves
+ "movq %%mm0, %0;" // (ptr could be read-only)
+ "emms;" // Empty mmx state/Reset FP regs
+ : "=m" (v)
+ : "m" (a->val_dont_use)
+ : // mark the FP stack and mmx registers as clobbered
+ "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)",
+#ifdef __MMX__
+ "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
+#endif // #ifdef __MMX__
+ "memory");
+ }
+ return v;
+}
+
+template<typename T>
+INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
+ DCHECK(mo & (memory_order_relaxed | memory_order_release
+ | memory_order_seq_cst));
+ DCHECK(!((uptr)a % sizeof(*a)));
+
+ if (sizeof(*a) < 8 || sizeof(void*) == 8) {
+ // Assume that aligned loads are atomic.
+ if (mo == memory_order_relaxed) {
+ a->val_dont_use = v;
+ } else if (mo == memory_order_release) {
+ // On x86 stores are implicitly release.
+ __asm__ __volatile__("" ::: "memory");
+ a->val_dont_use = v;
+ __asm__ __volatile__("" ::: "memory");
+ } else { // seq_cst
+ // On x86 stores are implicitly release.
+ __asm__ __volatile__("" ::: "memory");
+ a->val_dont_use = v;
+ __sync_synchronize();
+ }
+ } else {
+ // 64-bit store on 32-bit platform.
+ __asm__ __volatile__(
+ "movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves
+ "movq %%mm0, %0;"
+ "emms;" // Empty mmx state/Reset FP regs
+ : "=m" (a->val_dont_use)
+ : "m" (v)
+ : // mark the FP stack and mmx registers as clobbered
+ "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)",
+#ifdef __MMX__
+ "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
+#endif // #ifdef __MMX__
+ "memory");
+ if (mo == memory_order_seq_cst)
+ __sync_synchronize();
+ }
+}
+
+} // namespace __sanitizer
+
+#endif // #ifndef SANITIZER_ATOMIC_CLANG_X86_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h b/libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h
index dac7c19199b9..7e18fa38748a 100644
--- a/libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h
+++ b/libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h
@@ -22,8 +22,20 @@ extern "C" void _mm_pause();
extern "C" long _InterlockedExchangeAdd( // NOLINT
long volatile * Addend, long Value); // NOLINT
#pragma intrinsic(_InterlockedExchangeAdd)
+extern "C" short _InterlockedCompareExchange16( // NOLINT
+ short volatile *Destination, // NOLINT
+ short Exchange, short Comparand); // NOLINT
+#pragma intrinsic(_InterlockedCompareExchange16)
+extern "C"
+long long _InterlockedCompareExchange64( // NOLINT
+ long long volatile *Destination, // NOLINT
+ long long Exchange, long long Comparand); // NOLINT
+#pragma intrinsic(_InterlockedCompareExchange64)
#ifdef _WIN64
+extern "C" long long _InterlockedExchangeAdd64( // NOLINT
+ long long volatile * Addend, long long Value); // NOLINT
+#pragma intrinsic(_InterlockedExchangeAdd64)
extern "C" void *_InterlockedCompareExchangePointer(
void *volatile *Destination,
void *Exchange, void *Comparand);
@@ -106,6 +118,40 @@ INLINE u32 atomic_fetch_add(volatile atomic_uint32_t *a,
(volatile long*)&a->val_dont_use, (long)v); // NOLINT
}
+INLINE uptr atomic_fetch_add(volatile atomic_uintptr_t *a,
+ uptr v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+#ifdef _WIN64
+ return (uptr)_InterlockedExchangeAdd64(
+ (volatile long long*)&a->val_dont_use, (long long)v); // NOLINT
+#else
+ return (uptr)_InterlockedExchangeAdd(
+ (volatile long*)&a->val_dont_use, (long)v); // NOLINT
+#endif
+}
+
+INLINE u32 atomic_fetch_sub(volatile atomic_uint32_t *a,
+ u32 v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+ return (u32)_InterlockedExchangeAdd(
+ (volatile long*)&a->val_dont_use, -(long)v); // NOLINT
+}
+
+INLINE uptr atomic_fetch_sub(volatile atomic_uintptr_t *a,
+ uptr v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+#ifdef _WIN64
+ return (uptr)_InterlockedExchangeAdd64(
+ (volatile long long*)&a->val_dont_use, -(long long)v); // NOLINT
+#else
+ return (uptr)_InterlockedExchangeAdd(
+ (volatile long*)&a->val_dont_use, -(long)v); // NOLINT
+#endif
+}
+
INLINE u8 atomic_exchange(volatile atomic_uint8_t *a,
u8 v, memory_order mo) {
(void)mo;
@@ -166,6 +212,45 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
return false;
}
+INLINE bool atomic_compare_exchange_strong(volatile atomic_uint16_t *a,
+ u16 *cmp,
+ u16 xchg,
+ memory_order mo) {
+ u16 cmpv = *cmp;
+ u16 prev = (u16)_InterlockedCompareExchange16(
+ (volatile short*)&a->val_dont_use, (short)xchg, (short)cmpv);
+ if (prev == cmpv)
+ return true;
+ *cmp = prev;
+ return false;
+}
+
+INLINE bool atomic_compare_exchange_strong(volatile atomic_uint32_t *a,
+ u32 *cmp,
+ u32 xchg,
+ memory_order mo) {
+ u32 cmpv = *cmp;
+ u32 prev = (u32)_InterlockedCompareExchange(
+ (volatile long*)&a->val_dont_use, (long)xchg, (long)cmpv);
+ if (prev == cmpv)
+ return true;
+ *cmp = prev;
+ return false;
+}
+
+INLINE bool atomic_compare_exchange_strong(volatile atomic_uint64_t *a,
+ u64 *cmp,
+ u64 xchg,
+ memory_order mo) {
+ u64 cmpv = *cmp;
+ u64 prev = (u64)_InterlockedCompareExchange64(
+ (volatile long long*)&a->val_dont_use, (long long)xchg, (long long)cmpv);
+ if (prev == cmpv)
+ return true;
+ *cmp = prev;
+ return false;
+}
+
template<typename T>
INLINE bool atomic_compare_exchange_weak(volatile T *a,
typename T::Type *cmp,
diff --git a/libsanitizer/sanitizer_common/sanitizer_bitvector.h b/libsanitizer/sanitizer_common/sanitizer_bitvector.h
new file mode 100644
index 000000000000..bb2872facded
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_bitvector.h
@@ -0,0 +1,349 @@
+//===-- sanitizer_bitvector.h -----------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Specializer BitVector implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_BITVECTOR_H
+#define SANITIZER_BITVECTOR_H
+
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+
+// Fixed size bit vector based on a single basic integer.
+template <class basic_int_t = uptr>
+class BasicBitVector {
+ public:
+ enum SizeEnum { kSize = sizeof(basic_int_t) * 8 };
+
+ uptr size() const { return kSize; }
+ // No CTOR.
+ void clear() { bits_ = 0; }
+ void setAll() { bits_ = ~(basic_int_t)0; }
+ bool empty() const { return bits_ == 0; }
+
+ // Returns true if the bit has changed from 0 to 1.
+ bool setBit(uptr idx) {
+ basic_int_t old = bits_;
+ bits_ |= mask(idx);
+ return bits_ != old;
+ }
+
+ // Returns true if the bit has changed from 1 to 0.
+ bool clearBit(uptr idx) {
+ basic_int_t old = bits_;
+ bits_ &= ~mask(idx);
+ return bits_ != old;
+ }
+
+ bool getBit(uptr idx) const { return (bits_ & mask(idx)) != 0; }
+
+ uptr getAndClearFirstOne() {
+ CHECK(!empty());
+ uptr idx = LeastSignificantSetBitIndex(bits_);
+ clearBit(idx);
+ return idx;
+ }
+
+ // Do "this |= v" and return whether new bits have been added.
+ bool setUnion(const BasicBitVector &v) {
+ basic_int_t old = bits_;
+ bits_ |= v.bits_;
+ return bits_ != old;
+ }
+
+ // Do "this &= v" and return whether any bits have been removed.
+ bool setIntersection(const BasicBitVector &v) {
+ basic_int_t old = bits_;
+ bits_ &= v.bits_;
+ return bits_ != old;
+ }
+
+ // Do "this &= ~v" and return whether any bits have been removed.
+ bool setDifference(const BasicBitVector &v) {
+ basic_int_t old = bits_;
+ bits_ &= ~v.bits_;
+ return bits_ != old;
+ }
+
+ void copyFrom(const BasicBitVector &v) { bits_ = v.bits_; }
+
+ // Returns true if 'this' intersects with 'v'.
+ bool intersectsWith(const BasicBitVector &v) const {
+ return (bits_ & v.bits_) != 0;
+ }
+
+ // for (BasicBitVector<>::Iterator it(bv); it.hasNext();) {
+ // uptr idx = it.next();
+ // use(idx);
+ // }
+ class Iterator {
+ public:
+ Iterator() { }
+ explicit Iterator(const BasicBitVector &bv) : bv_(bv) {}
+ bool hasNext() const { return !bv_.empty(); }
+ uptr next() { return bv_.getAndClearFirstOne(); }
+ void clear() { bv_.clear(); }
+ private:
+ BasicBitVector bv_;
+ };
+
+ private:
+ basic_int_t mask(uptr idx) const {
+ CHECK_LT(idx, size());
+ return (basic_int_t)1UL << idx;
+ }
+ basic_int_t bits_;
+};
+
+// Fixed size bit vector of (kLevel1Size*BV::kSize**2) bits.
+// The implementation is optimized for better performance on
+// sparse bit vectors, i.e. the those with few set bits.
+template <uptr kLevel1Size = 1, class BV = BasicBitVector<> >
+class TwoLevelBitVector {
+ // This is essentially a 2-level bit vector.
+ // Set bit in the first level BV indicates that there are set bits
+ // in the corresponding BV of the second level.
+ // This structure allows O(kLevel1Size) time for clear() and empty(),
+ // as well fast handling of sparse BVs.
+ public:
+ enum SizeEnum { kSize = BV::kSize * BV::kSize * kLevel1Size };
+ // No CTOR.
+
+ uptr size() const { return kSize; }
+
+ void clear() {
+ for (uptr i = 0; i < kLevel1Size; i++)
+ l1_[i].clear();
+ }
+
+ void setAll() {
+ for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
+ l1_[i0].setAll();
+ for (uptr i1 = 0; i1 < BV::kSize; i1++)
+ l2_[i0][i1].setAll();
+ }
+ }
+
+ bool empty() const {
+ for (uptr i = 0; i < kLevel1Size; i++)
+ if (!l1_[i].empty())
+ return false;
+ return true;
+ }
+
+ // Returns true if the bit has changed from 0 to 1.
+ bool setBit(uptr idx) {
+ check(idx);
+ uptr i0 = idx0(idx);
+ uptr i1 = idx1(idx);
+ uptr i2 = idx2(idx);
+ if (!l1_[i0].getBit(i1)) {
+ l1_[i0].setBit(i1);
+ l2_[i0][i1].clear();
+ }
+ bool res = l2_[i0][i1].setBit(i2);
+ // Printf("%s: %zd => %zd %zd %zd; %d\n", __func__,
+ // idx, i0, i1, i2, res);
+ return res;
+ }
+
+ bool clearBit(uptr idx) {
+ check(idx);
+ uptr i0 = idx0(idx);
+ uptr i1 = idx1(idx);
+ uptr i2 = idx2(idx);
+ bool res = false;
+ if (l1_[i0].getBit(i1)) {
+ res = l2_[i0][i1].clearBit(i2);
+ if (l2_[i0][i1].empty())
+ l1_[i0].clearBit(i1);
+ }
+ return res;
+ }
+
+ bool getBit(uptr idx) const {
+ check(idx);
+ uptr i0 = idx0(idx);
+ uptr i1 = idx1(idx);
+ uptr i2 = idx2(idx);
+ // Printf("%s: %zd => %zd %zd %zd\n", __func__, idx, i0, i1, i2);
+ return l1_[i0].getBit(i1) && l2_[i0][i1].getBit(i2);
+ }
+
+ uptr getAndClearFirstOne() {
+ for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
+ if (l1_[i0].empty()) continue;
+ uptr i1 = l1_[i0].getAndClearFirstOne();
+ uptr i2 = l2_[i0][i1].getAndClearFirstOne();
+ if (!l2_[i0][i1].empty())
+ l1_[i0].setBit(i1);
+ uptr res = i0 * BV::kSize * BV::kSize + i1 * BV::kSize + i2;
+ // Printf("getAndClearFirstOne: %zd %zd %zd => %zd\n", i0, i1, i2, res);
+ return res;
+ }
+ CHECK(0);
+ return 0;
+ }
+
+ // Do "this |= v" and return whether new bits have been added.
+ bool setUnion(const TwoLevelBitVector &v) {
+ bool res = false;
+ for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
+ BV t = v.l1_[i0];
+ while (!t.empty()) {
+ uptr i1 = t.getAndClearFirstOne();
+ if (l1_[i0].setBit(i1))
+ l2_[i0][i1].clear();
+ if (l2_[i0][i1].setUnion(v.l2_[i0][i1]))
+ res = true;
+ }
+ }
+ return res;
+ }
+
+ // Do "this &= v" and return whether any bits have been removed.
+ bool setIntersection(const TwoLevelBitVector &v) {
+ bool res = false;
+ for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
+ if (l1_[i0].setIntersection(v.l1_[i0]))
+ res = true;
+ if (!l1_[i0].empty()) {
+ BV t = l1_[i0];
+ while (!t.empty()) {
+ uptr i1 = t.getAndClearFirstOne();
+ if (l2_[i0][i1].setIntersection(v.l2_[i0][i1]))
+ res = true;
+ if (l2_[i0][i1].empty())
+ l1_[i0].clearBit(i1);
+ }
+ }
+ }
+ return res;
+ }
+
+ // Do "this &= ~v" and return whether any bits have been removed.
+ bool setDifference(const TwoLevelBitVector &v) {
+ bool res = false;
+ for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
+ BV t = l1_[i0];
+ t.setIntersection(v.l1_[i0]);
+ while (!t.empty()) {
+ uptr i1 = t.getAndClearFirstOne();
+ if (l2_[i0][i1].setDifference(v.l2_[i0][i1]))
+ res = true;
+ if (l2_[i0][i1].empty())
+ l1_[i0].clearBit(i1);
+ }
+ }
+ return res;
+ }
+
+ void copyFrom(const TwoLevelBitVector &v) {
+ clear();
+ setUnion(v);
+ }
+
+ // Returns true if 'this' intersects with 'v'.
+ bool intersectsWith(const TwoLevelBitVector &v) const {
+ for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
+ BV t = l1_[i0];
+ t.setIntersection(v.l1_[i0]);
+ while (!t.empty()) {
+ uptr i1 = t.getAndClearFirstOne();
+ if (!v.l1_[i0].getBit(i1)) continue;
+ if (l2_[i0][i1].intersectsWith(v.l2_[i0][i1]))
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // for (TwoLevelBitVector<>::Iterator it(bv); it.hasNext();) {
+ // uptr idx = it.next();
+ // use(idx);
+ // }
+ class Iterator {
+ public:
+ Iterator() { }
+ explicit Iterator(const TwoLevelBitVector &bv) : bv_(bv), i0_(0), i1_(0) {
+ it1_.clear();
+ it2_.clear();
+ }
+
+ bool hasNext() const {
+ if (it1_.hasNext()) return true;
+ for (uptr i = i0_; i < kLevel1Size; i++)
+ if (!bv_.l1_[i].empty()) return true;
+ return false;
+ }
+
+ uptr next() {
+ // Printf("++++: %zd %zd; %d %d; size %zd\n", i0_, i1_, it1_.hasNext(),
+ // it2_.hasNext(), kSize);
+ if (!it1_.hasNext() && !it2_.hasNext()) {
+ for (; i0_ < kLevel1Size; i0_++) {
+ if (bv_.l1_[i0_].empty()) continue;
+ it1_ = typename BV::Iterator(bv_.l1_[i0_]);
+ // Printf("+i0: %zd %zd; %d %d; size %zd\n", i0_, i1_, it1_.hasNext(),
+ // it2_.hasNext(), kSize);
+ break;
+ }
+ }
+ if (!it2_.hasNext()) {
+ CHECK(it1_.hasNext());
+ i1_ = it1_.next();
+ it2_ = typename BV::Iterator(bv_.l2_[i0_][i1_]);
+ // Printf("++i1: %zd %zd; %d %d; size %zd\n", i0_, i1_, it1_.hasNext(),
+ // it2_.hasNext(), kSize);
+ }
+ CHECK(it2_.hasNext());
+ uptr i2 = it2_.next();
+ uptr res = i0_ * BV::kSize * BV::kSize + i1_ * BV::kSize + i2;
+ // Printf("+ret: %zd %zd; %d %d; size %zd; res: %zd\n", i0_, i1_,
+ // it1_.hasNext(), it2_.hasNext(), kSize, res);
+ if (!it1_.hasNext() && !it2_.hasNext())
+ i0_++;
+ return res;
+ }
+
+ private:
+ const TwoLevelBitVector &bv_;
+ uptr i0_, i1_;
+ typename BV::Iterator it1_, it2_;
+ };
+
+ private:
+ void check(uptr idx) const { CHECK_LE(idx, size()); }
+
+ uptr idx0(uptr idx) const {
+ uptr res = idx / (BV::kSize * BV::kSize);
+ CHECK_LE(res, kLevel1Size);
+ return res;
+ }
+
+ uptr idx1(uptr idx) const {
+ uptr res = (idx / BV::kSize) % BV::kSize;
+ CHECK_LE(res, BV::kSize);
+ return res;
+ }
+
+ uptr idx2(uptr idx) const {
+ uptr res = idx % BV::kSize;
+ CHECK_LE(res, BV::kSize);
+ return res;
+ }
+
+ BV l1_[kLevel1Size];
+ BV l2_[kLevel1Size][BV::kSize];
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_BITVECTOR_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_bvgraph.h b/libsanitizer/sanitizer_common/sanitizer_bvgraph.h
new file mode 100644
index 000000000000..6ef0e81e044f
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_bvgraph.h
@@ -0,0 +1,163 @@
+//===-- sanitizer_bvgraph.h -------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer runtime.
+// BVGraph -- a directed graph.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_BVGRAPH_H
+#define SANITIZER_BVGRAPH_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_bitvector.h"
+
+namespace __sanitizer {
+
+// Directed graph of fixed size implemented as an array of bit vectors.
+// Not thread-safe, all accesses should be protected by an external lock.
+template<class BV>
+class BVGraph {
+ public:
+ enum SizeEnum { kSize = BV::kSize };
+ uptr size() const { return kSize; }
+ // No CTOR.
+ void clear() {
+ for (uptr i = 0; i < size(); i++)
+ v[i].clear();
+ }
+
+ bool empty() const {
+ for (uptr i = 0; i < size(); i++)
+ if (!v[i].empty())
+ return false;
+ return true;
+ }
+
+ // Returns true if a new edge was added.
+ bool addEdge(uptr from, uptr to) {
+ check(from, to);
+ return v[from].setBit(to);
+ }
+
+ // Returns true if at least one new edge was added.
+ uptr addEdges(const BV &from, uptr to, uptr added_edges[],
+ uptr max_added_edges) {
+ uptr res = 0;
+ t1.copyFrom(from);
+ while (!t1.empty()) {
+ uptr node = t1.getAndClearFirstOne();
+ if (v[node].setBit(to))
+ if (res < max_added_edges)
+ added_edges[res++] = node;
+ }
+ return res;
+ }
+
+ // *EXPERIMENTAL*
+ // Returns true if an edge from=>to exist.
+ // This function does not use any global state except for 'this' itself,
+ // and thus can be called from different threads w/o locking.
+ // This would be racy.
+ // FIXME: investigate how much we can prove about this race being "benign".
+ bool hasEdge(uptr from, uptr to) { return v[from].getBit(to); }
+
+ // Returns true if the edge from=>to was removed.
+ bool removeEdge(uptr from, uptr to) {
+ return v[from].clearBit(to);
+ }
+
+ // Returns true if at least one edge *=>to was removed.
+ bool removeEdgesTo(const BV &to) {
+ bool res = 0;
+ for (uptr from = 0; from < size(); from++) {
+ if (v[from].setDifference(to))
+ res = true;
+ }
+ return res;
+ }
+
+ // Returns true if at least one edge from=>* was removed.
+ bool removeEdgesFrom(const BV &from) {
+ bool res = false;
+ t1.copyFrom(from);
+ while (!t1.empty()) {
+ uptr idx = t1.getAndClearFirstOne();
+ if (!v[idx].empty()) {
+ v[idx].clear();
+ res = true;
+ }
+ }
+ return res;
+ }
+
+ void removeEdgesFrom(uptr from) {
+ return v[from].clear();
+ }
+
+ bool hasEdge(uptr from, uptr to) const {
+ check(from, to);
+ return v[from].getBit(to);
+ }
+
+ // Returns true if there is a path from the node 'from'
+ // to any of the nodes in 'targets'.
+ bool isReachable(uptr from, const BV &targets) {
+ BV &to_visit = t1,
+ &visited = t2;
+ to_visit.copyFrom(v[from]);
+ visited.clear();
+ visited.setBit(from);
+ while (!to_visit.empty()) {
+ uptr idx = to_visit.getAndClearFirstOne();
+ if (visited.setBit(idx))
+ to_visit.setUnion(v[idx]);
+ }
+ return targets.intersectsWith(visited);
+ }
+
+ // Finds a path from 'from' to one of the nodes in 'target',
+ // stores up to 'path_size' items of the path into 'path',
+ // returns the path length, or 0 if there is no path of size 'path_size'.
+ uptr findPath(uptr from, const BV &targets, uptr *path, uptr path_size) {
+ if (path_size == 0)
+ return 0;
+ path[0] = from;
+ if (targets.getBit(from))
+ return 1;
+ // The function is recursive, so we don't want to create BV on stack.
+ // Instead of a getAndClearFirstOne loop we use the slower iterator.
+ for (typename BV::Iterator it(v[from]); it.hasNext(); ) {
+ uptr idx = it.next();
+ if (uptr res = findPath(idx, targets, path + 1, path_size - 1))
+ return res + 1;
+ }
+ return 0;
+ }
+
+ // Same as findPath, but finds a shortest path.
+ uptr findShortestPath(uptr from, const BV &targets, uptr *path,
+ uptr path_size) {
+ for (uptr p = 1; p <= path_size; p++)
+ if (findPath(from, targets, path, p) == p)
+ return p;
+ return 0;
+ }
+
+ private:
+ void check(uptr idx1, uptr idx2) const {
+ CHECK_LT(idx1, size());
+ CHECK_LT(idx2, size());
+ }
+ BV v[kSize];
+ // Keep temporary vectors here since we can not create large objects on stack.
+ BV t1, t2;
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_BVGRAPH_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_common.cc b/libsanitizer/sanitizer_common/sanitizer_common.cc
index 0d93527aa5e5..e76d4d558a1f 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_common.cc
@@ -91,7 +91,7 @@ uptr ReadFileToBuffer(const char *file_name, char **buff,
if (internal_iserror(openrv)) return 0;
fd_t fd = openrv;
UnmapOrDie(*buff, *buff_size);
- *buff = (char*)MmapOrDie(size, __FUNCTION__);
+ *buff = (char*)MmapOrDie(size, __func__);
*buff_size = size;
// Read up to one page at a time.
read_len = 0;
@@ -200,11 +200,11 @@ void ReportErrorSummary(const char *error_type, StackTrace *stack) {
return;
AddressInfo ai;
#if !SANITIZER_GO
- if (stack->size > 0 && Symbolizer::Get()->IsAvailable()) {
+ if (stack->size > 0 && Symbolizer::Get()->CanReturnFileLineInfo()) {
// Currently, we include the first stack frame into the report summary.
// Maybe sometimes we need to choose another frame (e.g. skip memcpy/etc).
uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]);
- Symbolizer::Get()->SymbolizeCode(pc, &ai, 1);
+ Symbolizer::Get()->SymbolizePC(pc, &ai, 1);
}
#endif
ReportErrorSummary(error_type, ai.file, ai.line, ai.function);
@@ -242,6 +242,30 @@ char *StripModuleName(const char *module) {
return internal_strdup(short_module_name);
}
+static atomic_uintptr_t g_total_mmaped;
+
+void IncreaseTotalMmap(uptr size) {
+ if (!common_flags()->mmap_limit_mb) return;
+ uptr total_mmaped =
+ atomic_fetch_add(&g_total_mmaped, size, memory_order_relaxed) + size;
+ if ((total_mmaped >> 20) > common_flags()->mmap_limit_mb) {
+ // Since for now mmap_limit_mb is not a user-facing flag, just CHECK.
+ uptr mmap_limit_mb = common_flags()->mmap_limit_mb;
+ common_flags()->mmap_limit_mb = 0; // Allow mmap in CHECK.
+ RAW_CHECK(total_mmaped >> 20 < mmap_limit_mb);
+ }
+}
+
+void DecreaseTotalMmap(uptr size) {
+ if (!common_flags()->mmap_limit_mb) return;
+ atomic_fetch_sub(&g_total_mmaped, size, memory_order_relaxed);
+}
+
+static void (*sandboxing_callback)();
+void SetSandboxingCallback(void (*f)()) {
+ sandboxing_callback = f;
+}
+
} // namespace __sanitizer
using namespace __sanitizer; // NOLINT
@@ -274,9 +298,11 @@ void __sanitizer_set_report_path(const char *path) {
}
}
-void NOINLINE __sanitizer_sandbox_on_notify(void *reserved) {
- (void)reserved;
- PrepareForSandboxing();
+void NOINLINE
+__sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args) {
+ PrepareForSandboxing(args);
+ if (sandboxing_callback)
+ sandboxing_callback();
}
void __sanitizer_report_error_summary(const char *error_summary) {
diff --git a/libsanitizer/sanitizer_common/sanitizer_common.h b/libsanitizer/sanitizer_common/sanitizer_common.h
index 07d1b63db584..86171068f6e5 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common.h
+++ b/libsanitizer/sanitizer_common/sanitizer_common.h
@@ -17,6 +17,7 @@
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_mutex.h"
+#include "sanitizer_flags.h"
namespace __sanitizer {
struct StackTrace;
@@ -25,14 +26,12 @@ struct StackTrace;
const uptr kWordSize = SANITIZER_WORDSIZE / 8;
const uptr kWordSizeInBits = 8 * kWordSize;
-#if defined(__powerpc__) || defined(__powerpc64__)
-const uptr kCacheLineSize = 128;
-#else
const uptr kCacheLineSize = 64;
-#endif
const uptr kMaxPathLength = 512;
+const uptr kMaxThreadStackSize = 1 << 30; // 1Gb
+
extern const char *SanitizerToolName; // Can be changed by the tool.
uptr GetPageSize();
@@ -51,6 +50,7 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
void *MmapOrDie(uptr size, const char *mem_type);
void UnmapOrDie(void *addr, uptr size);
void *MmapFixedNoReserve(uptr fixed_addr, uptr size);
+void *MmapNoReserveOrDie(uptr size, const char *mem_type);
void *MmapFixedOrDie(uptr fixed_addr, uptr size);
void *Mprotect(uptr fixed_addr, uptr size);
// Map aligned chunk of address space; size and alignment are powers of two.
@@ -58,6 +58,8 @@ void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type);
// Used to check if we can map shadow memory to a fixed location.
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
void FlushUnneededShadowMemory(uptr addr, uptr size);
+void IncreaseTotalMmap(uptr size);
+void DecreaseTotalMmap(uptr size);
// InternalScopedBuffer can be used instead of large stack arrays to
// keep frame size low.
@@ -123,9 +125,18 @@ void RawWrite(const char *buffer);
bool PrintsToTty();
// Caching version of PrintsToTty(). Not thread-safe.
bool PrintsToTtyCached();
+bool ColorizeReports();
void Printf(const char *format, ...);
void Report(const char *format, ...);
void SetPrintfAndReportCallback(void (*callback)(const char *));
+#define VReport(level, ...) \
+ do { \
+ if ((uptr)common_flags()->verbosity >= (level)) Report(__VA_ARGS__); \
+ } while (0)
+#define VPrintf(level, ...) \
+ do { \
+ if ((uptr)common_flags()->verbosity >= (level)) Printf(__VA_ARGS__); \
+ } while (0)
// Can be used to prevent mixing error reports from different sanitizers.
extern StaticSpinMutex CommonSanitizerReportMutex;
@@ -169,7 +180,10 @@ u32 GetUid();
void ReExec();
bool StackSizeIsUnlimited();
void SetStackSizeLimitInBytes(uptr limit);
-void PrepareForSandboxing();
+void AdjustStackSize(void *attr);
+void PrepareForSandboxing(__sanitizer_sandbox_arguments *args);
+void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args);
+void SetSandboxingCallback(void (*f)());
void InitTlsSize();
uptr GetTlsSize();
@@ -206,6 +220,14 @@ typedef void (*CheckFailedCallbackType)(const char *, int, const char *,
u64, u64);
void SetCheckFailedCallback(CheckFailedCallbackType callback);
+// Functions related to signal handling.
+typedef void (*SignalHandlerType)(int, void *, void *);
+bool IsDeadlySignal(int signum);
+void InstallDeadlySignalHandlers(SignalHandlerType handler);
+// Alternative signal stack (POSIX-only).
+void SetAlternateSignalStack();
+void UnsetAlternateSignalStack();
+
// We don't want a summary too long.
const int kMaxSummaryLength = 1024;
// Construct a one-line string:
@@ -243,6 +265,19 @@ INLINE uptr MostSignificantSetBitIndex(uptr x) {
return up;
}
+INLINE uptr LeastSignificantSetBitIndex(uptr x) {
+ CHECK_NE(x, 0U);
+ unsigned long up; // NOLINT
+#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
+ up = __builtin_ctzl(x);
+#elif defined(_WIN64)
+ _BitScanForward64(&up, x);
+#else
+ _BitScanForward(&up, x);
+#endif
+ return up;
+}
+
INLINE bool IsPowerOfTwo(uptr x) {
return (x & (x - 1)) == 0;
}
@@ -307,12 +342,6 @@ INLINE int ToLower(int c) {
return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
}
-#if SANITIZER_WORDSIZE == 64
-# define FIRST_32_SECOND_64(a, b) (b)
-#else
-# define FIRST_32_SECOND_64(a, b) (a)
-#endif
-
// A low-level vector based on mmap. May incur a significant memory overhead for
// small vectors.
// WARNING: The current implementation supports only POD types.
@@ -477,6 +506,33 @@ const uptr kPthreadDestructorIterations = 0;
// Callback type for iterating over a set of memory ranges.
typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
+
+#if (SANITIZER_FREEBSD || SANITIZER_LINUX) && !defined(SANITIZER_GO)
+extern uptr indirect_call_wrapper;
+void SetIndirectCallWrapper(uptr wrapper);
+
+template <typename F>
+F IndirectExternCall(F f) {
+ typedef F (*WrapF)(F);
+ return indirect_call_wrapper ? ((WrapF)indirect_call_wrapper)(f) : f;
+}
+#else
+INLINE void SetIndirectCallWrapper(uptr wrapper) {}
+template <typename F>
+F IndirectExternCall(F f) {
+ return f;
+}
+#endif
+
+#if SANITIZER_ANDROID
+void AndroidLogWrite(const char *buffer);
+void GetExtraActivationFlags(char *buf, uptr size);
+void SanitizerInitializeUnwinder();
+#else
+INLINE void AndroidLogWrite(const char *buffer_unused) {}
+INLINE void GetExtraActivationFlags(char *buf, uptr size) { *buf = '\0'; }
+INLINE void SanitizerInitializeUnwinder() {}
+#endif
} // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_size_type size,
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc b/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc
index e301dc17bd35..f758c1a6c526 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc
+++ b/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc
@@ -26,7 +26,10 @@
// COMMON_INTERCEPTOR_HANDLE_RECVMSG
//===----------------------------------------------------------------------===//
#include "interception/interception.h"
+#include "sanitizer_addrhashmap.h"
+#include "sanitizer_placement_new.h"
#include "sanitizer_platform_interceptors.h"
+#include "sanitizer_tls_get_addr.h"
#include <stdarg.h>
@@ -35,7 +38,11 @@
#endif // _WIN32
#ifndef COMMON_INTERCEPTOR_INITIALIZE_RANGE
-#define COMMON_INTERCEPTOR_INITIALIZE_RANGE(ctx, p, size) {}
+#define COMMON_INTERCEPTOR_INITIALIZE_RANGE(p, size) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_UNPOISON_PARAM
+#define COMMON_INTERCEPTOR_UNPOISON_PARAM(count) {}
#endif
#ifndef COMMON_INTERCEPTOR_FD_ACCESS
@@ -58,14 +65,70 @@
#define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) ((void)(msg))
#endif
+#ifndef COMMON_INTERCEPTOR_FILE_OPEN
+#define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_FILE_CLOSE
+#define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) {}
+#endif
+
+struct FileMetadata {
+ // For open_memstream().
+ char **addr;
+ SIZE_T *size;
+};
+
+struct CommonInterceptorMetadata {
+ enum {
+ CIMT_INVALID = 0,
+ CIMT_FILE
+ } type;
+ union {
+ FileMetadata file;
+ };
+};
+
+typedef AddrHashMap<CommonInterceptorMetadata, 31051> MetadataHashMap;
+
+static MetadataHashMap *interceptor_metadata_map;
+
+#if SI_NOT_WINDOWS
+UNUSED static void SetInterceptorMetadata(__sanitizer_FILE *addr,
+ const FileMetadata &file) {
+ MetadataHashMap::Handle h(interceptor_metadata_map, (uptr)addr);
+ CHECK(h.created());
+ h->type = CommonInterceptorMetadata::CIMT_FILE;
+ h->file = file;
+}
+
+UNUSED static const FileMetadata *GetInterceptorMetadata(
+ __sanitizer_FILE *addr) {
+ MetadataHashMap::Handle h(interceptor_metadata_map, (uptr)addr,
+ /* remove */ false,
+ /* create */ false);
+ if (h.exists()) {
+ CHECK(!h.created());
+ CHECK(h->type == CommonInterceptorMetadata::CIMT_FILE);
+ return &h->file;
+ } else {
+ return 0;
+ }
+}
+
+UNUSED static void DeleteInterceptorMetadata(void *addr) {
+ MetadataHashMap::Handle h(interceptor_metadata_map, (uptr)addr, true);
+ CHECK(h.exists());
+}
+#endif // SI_NOT_WINDOWS
+
#if SANITIZER_INTERCEPT_TEXTDOMAIN
INTERCEPTOR(char*, textdomain, const char *domainname) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, textdomain, domainname);
char* domain = REAL(textdomain)(domainname);
if (domain) {
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(ctx, domain,
- REAL(strlen)(domain) + 1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(domain, REAL(strlen)(domain) + 1);
}
return domain;
}
@@ -160,6 +223,34 @@ INTERCEPTOR(int, strncasecmp, const char *s1, const char *s2, SIZE_T n) {
#define INIT_STRNCASECMP
#endif
+#if SANITIZER_INTERCEPT_MEMCHR
+INTERCEPTOR(void*, memchr, const void *s, int c, SIZE_T n) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, memchr, s, c, n);
+ void *res = REAL(memchr)(s, c, n);
+ uptr len = res ? (char*)res - (char*)s + 1 : n;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, len);
+ return res;
+}
+
+#define INIT_MEMCHR COMMON_INTERCEPT_FUNCTION(memchr)
+#else
+#define INIT_MEMCHR
+#endif
+
+#if SANITIZER_INTERCEPT_MEMRCHR
+INTERCEPTOR(void*, memrchr, const void *s, int c, SIZE_T n) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, memrchr, s, c, n);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, n);
+ return REAL(memrchr)(s, c, n);
+}
+
+#define INIT_MEMRCHR COMMON_INTERCEPT_FUNCTION(memrchr)
+#else
+#define INIT_MEMRCHR
+#endif
+
#if SANITIZER_INTERCEPT_FREXP
INTERCEPTOR(double, frexp, double x, int *exp) {
void *ctx;
@@ -449,7 +540,7 @@ static void unpoison_tm(void *ctx, __sanitizer_tm *tm) {
if (tm->tm_zone) {
// Can not use COMMON_INTERCEPTOR_WRITE_RANGE here, because tm->tm_zone
// can point to shared memory and tsan would report a data race.
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(ctx, tm->tm_zone,
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(tm->tm_zone,
REAL(strlen(tm->tm_zone)) + 1);
}
}
@@ -533,6 +624,20 @@ INTERCEPTOR(char *, asctime_r, __sanitizer_tm *tm, char *result) {
}
return res;
}
+INTERCEPTOR(long, mktime, __sanitizer_tm *tm) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, mktime, tm);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_sec, sizeof(tm->tm_sec));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_min, sizeof(tm->tm_min));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_hour, sizeof(tm->tm_hour));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_mday, sizeof(tm->tm_mday));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_mon, sizeof(tm->tm_mon));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_year, sizeof(tm->tm_year));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_isdst, sizeof(tm->tm_isdst));
+ long res = REAL(mktime)(tm);
+ if (res != -1) unpoison_tm(ctx, tm);
+ return res;
+}
#define INIT_LOCALTIME_AND_FRIENDS \
COMMON_INTERCEPT_FUNCTION(localtime); \
COMMON_INTERCEPT_FUNCTION(localtime_r); \
@@ -541,7 +646,8 @@ INTERCEPTOR(char *, asctime_r, __sanitizer_tm *tm, char *result) {
COMMON_INTERCEPT_FUNCTION(ctime); \
COMMON_INTERCEPT_FUNCTION(ctime_r); \
COMMON_INTERCEPT_FUNCTION(asctime); \
- COMMON_INTERCEPT_FUNCTION(asctime_r);
+ COMMON_INTERCEPT_FUNCTION(asctime_r); \
+ COMMON_INTERCEPT_FUNCTION(mktime);
#else
#define INIT_LOCALTIME_AND_FRIENDS
#endif // SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS
@@ -567,9 +673,23 @@ INTERCEPTOR(char *, strptime, char *s, char *format, __sanitizer_tm *tm) {
#define INIT_STRPTIME
#endif
-#if SANITIZER_INTERCEPT_SCANF
+#if SANITIZER_INTERCEPT_SCANF || SANITIZER_INTERCEPT_PRINTF
+#include "sanitizer_common_interceptors_format.inc"
-#include "sanitizer_common_interceptors_scanf.inc"
+#define FORMAT_INTERCEPTOR_IMPL(name, vname, ...) \
+ { \
+ void *ctx; \
+ va_list ap; \
+ va_start(ap, format); \
+ COMMON_INTERCEPTOR_ENTER(ctx, vname, __VA_ARGS__, ap); \
+ int res = WRAP(vname)(__VA_ARGS__, ap); \
+ va_end(ap); \
+ return res; \
+ }
+
+#endif
+
+#if SANITIZER_INTERCEPT_SCANF
#define VSCANF_INTERCEPTOR_IMPL(vname, allowGnuMalloc, ...) \
{ \
@@ -605,35 +725,24 @@ INTERCEPTOR(int, __isoc99_vfscanf, void *stream, const char *format, va_list ap)
VSCANF_INTERCEPTOR_IMPL(__isoc99_vfscanf, false, stream, format, ap)
#endif // SANITIZER_INTERCEPT_ISOC99_SCANF
-#define SCANF_INTERCEPTOR_IMPL(name, vname, ...) \
- { \
- void *ctx; \
- va_list ap; \
- va_start(ap, format); \
- COMMON_INTERCEPTOR_ENTER(ctx, vname, __VA_ARGS__, ap); \
- int res = vname(__VA_ARGS__, ap); \
- va_end(ap); \
- return res; \
- }
-
INTERCEPTOR(int, scanf, const char *format, ...)
-SCANF_INTERCEPTOR_IMPL(scanf, vscanf, format)
+FORMAT_INTERCEPTOR_IMPL(scanf, vscanf, format)
INTERCEPTOR(int, fscanf, void *stream, const char *format, ...)
-SCANF_INTERCEPTOR_IMPL(fscanf, vfscanf, stream, format)
+FORMAT_INTERCEPTOR_IMPL(fscanf, vfscanf, stream, format)
INTERCEPTOR(int, sscanf, const char *str, const char *format, ...)
-SCANF_INTERCEPTOR_IMPL(sscanf, vsscanf, str, format)
+FORMAT_INTERCEPTOR_IMPL(sscanf, vsscanf, str, format)
#if SANITIZER_INTERCEPT_ISOC99_SCANF
INTERCEPTOR(int, __isoc99_scanf, const char *format, ...)
-SCANF_INTERCEPTOR_IMPL(__isoc99_scanf, __isoc99_vscanf, format)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_scanf, __isoc99_vscanf, format)
INTERCEPTOR(int, __isoc99_fscanf, void *stream, const char *format, ...)
-SCANF_INTERCEPTOR_IMPL(__isoc99_fscanf, __isoc99_vfscanf, stream, format)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_fscanf, __isoc99_vfscanf, stream, format)
INTERCEPTOR(int, __isoc99_sscanf, const char *str, const char *format, ...)
-SCANF_INTERCEPTOR_IMPL(__isoc99_sscanf, __isoc99_vsscanf, str, format)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_sscanf, __isoc99_vsscanf, str, format)
#endif
#endif
@@ -662,6 +771,171 @@ SCANF_INTERCEPTOR_IMPL(__isoc99_sscanf, __isoc99_vsscanf, str, format)
#define INIT_ISOC99_SCANF
#endif
+#if SANITIZER_INTERCEPT_PRINTF
+
+#define VPRINTF_INTERCEPTOR_ENTER(vname, ...) \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, vname, __VA_ARGS__); \
+ va_list aq; \
+ va_copy(aq, ap);
+
+#define VPRINTF_INTERCEPTOR_RETURN() \
+ va_end(aq);
+
+#define VPRINTF_INTERCEPTOR_IMPL(vname, ...) \
+ { \
+ VPRINTF_INTERCEPTOR_ENTER(vname, __VA_ARGS__); \
+ if (common_flags()->check_printf) \
+ printf_common(ctx, format, aq); \
+ int res = REAL(vname)(__VA_ARGS__); \
+ VPRINTF_INTERCEPTOR_RETURN(); \
+ return res; \
+ }
+
+#define VSPRINTF_INTERCEPTOR_IMPL(vname, str, ...) \
+ { \
+ VPRINTF_INTERCEPTOR_ENTER(vname, str, __VA_ARGS__) \
+ if (common_flags()->check_printf) { \
+ printf_common(ctx, format, aq); \
+ } \
+ int res = REAL(vname)(str, __VA_ARGS__); \
+ if (res >= 0) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, str, res + 1); \
+ } \
+ VPRINTF_INTERCEPTOR_RETURN(); \
+ return res; \
+ }
+
+#define VSNPRINTF_INTERCEPTOR_IMPL(vname, str, size, ...) \
+ { \
+ VPRINTF_INTERCEPTOR_ENTER(vname, str, size, __VA_ARGS__) \
+ if (common_flags()->check_printf) { \
+ printf_common(ctx, format, aq); \
+ } \
+ int res = REAL(vname)(str, size, __VA_ARGS__); \
+ if (res >= 0) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, str, Min(size, (SIZE_T)(res + 1))); \
+ } \
+ VPRINTF_INTERCEPTOR_RETURN(); \
+ return res; \
+ }
+
+#define VASPRINTF_INTERCEPTOR_IMPL(vname, strp, ...) \
+ { \
+ VPRINTF_INTERCEPTOR_ENTER(vname, strp, __VA_ARGS__) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, strp, sizeof(char *)); \
+ if (common_flags()->check_printf) { \
+ printf_common(ctx, format, aq); \
+ } \
+ int res = REAL(vname)(strp, __VA_ARGS__); \
+ if (res >= 0) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *strp, res + 1); \
+ } \
+ VPRINTF_INTERCEPTOR_RETURN(); \
+ return res; \
+ }
+
+INTERCEPTOR(int, vprintf, const char *format, va_list ap)
+VPRINTF_INTERCEPTOR_IMPL(vprintf, format, ap)
+
+INTERCEPTOR(int, vfprintf, __sanitizer_FILE *stream, const char *format,
+ va_list ap)
+VPRINTF_INTERCEPTOR_IMPL(vfprintf, stream, format, ap)
+
+INTERCEPTOR(int, vsnprintf, char *str, SIZE_T size, const char *format,
+ va_list ap)
+VSNPRINTF_INTERCEPTOR_IMPL(vsnprintf, str, size, format, ap)
+
+INTERCEPTOR(int, vsprintf, char *str, const char *format, va_list ap)
+VSPRINTF_INTERCEPTOR_IMPL(vsprintf, str, format, ap)
+
+INTERCEPTOR(int, vasprintf, char **strp, const char *format, va_list ap)
+VASPRINTF_INTERCEPTOR_IMPL(vasprintf, strp, format, ap)
+
+#if SANITIZER_INTERCEPT_ISOC99_PRINTF
+INTERCEPTOR(int, __isoc99_vprintf, const char *format, va_list ap)
+VPRINTF_INTERCEPTOR_IMPL(__isoc99_vprintf, format, ap)
+
+INTERCEPTOR(int, __isoc99_vfprintf, __sanitizer_FILE *stream,
+ const char *format, va_list ap)
+VPRINTF_INTERCEPTOR_IMPL(__isoc99_vfprintf, stream, format, ap)
+
+INTERCEPTOR(int, __isoc99_vsnprintf, char *str, SIZE_T size, const char *format,
+ va_list ap)
+VSNPRINTF_INTERCEPTOR_IMPL(__isoc99_vsnprintf, str, size, format, ap)
+
+INTERCEPTOR(int, __isoc99_vsprintf, char *str, const char *format,
+ va_list ap)
+VSPRINTF_INTERCEPTOR_IMPL(__isoc99_vsprintf, str, format,
+ ap)
+
+#endif // SANITIZER_INTERCEPT_ISOC99_PRINTF
+
+INTERCEPTOR(int, printf, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(printf, vprintf, format)
+
+INTERCEPTOR(int, fprintf, __sanitizer_FILE *stream, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(fprintf, vfprintf, stream, format)
+
+INTERCEPTOR(int, sprintf, char *str, const char *format, ...) // NOLINT
+FORMAT_INTERCEPTOR_IMPL(sprintf, vsprintf, str, format) // NOLINT
+
+INTERCEPTOR(int, snprintf, char *str, SIZE_T size, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(snprintf, vsnprintf, str, size, format)
+
+INTERCEPTOR(int, asprintf, char **strp, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(asprintf, vasprintf, strp, format)
+
+#if SANITIZER_INTERCEPT_ISOC99_PRINTF
+INTERCEPTOR(int, __isoc99_printf, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_printf, __isoc99_vprintf, format)
+
+INTERCEPTOR(int, __isoc99_fprintf, __sanitizer_FILE *stream, const char *format,
+ ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_fprintf, __isoc99_vfprintf, stream, format)
+
+INTERCEPTOR(int, __isoc99_sprintf, char *str, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_sprintf, __isoc99_vsprintf, str, format)
+
+INTERCEPTOR(int, __isoc99_snprintf, char *str, SIZE_T size,
+ const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_snprintf, __isoc99_vsnprintf, str, size,
+ format)
+
+#endif // SANITIZER_INTERCEPT_ISOC99_PRINTF
+
+#endif // SANITIZER_INTERCEPT_PRINTF
+
+#if SANITIZER_INTERCEPT_PRINTF
+#define INIT_PRINTF \
+ COMMON_INTERCEPT_FUNCTION(printf); \
+ COMMON_INTERCEPT_FUNCTION(sprintf); \
+ COMMON_INTERCEPT_FUNCTION(snprintf); \
+ COMMON_INTERCEPT_FUNCTION(asprintf); \
+ COMMON_INTERCEPT_FUNCTION(fprintf); \
+ COMMON_INTERCEPT_FUNCTION(vprintf); \
+ COMMON_INTERCEPT_FUNCTION(vsprintf); \
+ COMMON_INTERCEPT_FUNCTION(vsnprintf); \
+ COMMON_INTERCEPT_FUNCTION(vasprintf); \
+ COMMON_INTERCEPT_FUNCTION(vfprintf);
+#else
+#define INIT_PRINTF
+#endif
+
+#if SANITIZER_INTERCEPT_ISOC99_PRINTF
+#define INIT_ISOC99_PRINTF \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_printf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_sprintf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_snprintf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_fprintf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_vprintf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_vsprintf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_vsnprintf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_vfprintf);
+#else
+#define INIT_ISOC99_PRINTF
+#endif
+
#if SANITIZER_INTERCEPT_IOCTL
#include "sanitizer_common_interceptors_ioctl.inc"
INTERCEPTOR(int, ioctl, int d, unsigned request, void *arg) {
@@ -675,7 +949,14 @@ INTERCEPTOR(int, ioctl, int d, unsigned request, void *arg) {
if (!common_flags()->handle_ioctl) return REAL(ioctl)(d, request, arg);
const ioctl_desc *desc = ioctl_lookup(request);
- if (!desc) Printf("WARNING: unknown ioctl %x\n", request);
+ ioctl_desc decoded_desc;
+ if (!desc) {
+ VPrintf(2, "Decoding unknown ioctl 0x%x\n", request);
+ if (!ioctl_decode(request, &decoded_desc))
+ Printf("WARNING: failed decoding unknown ioctl 0x%x\n", request);
+ else
+ desc = &decoded_desc;
+ }
if (desc) ioctl_common_pre(ctx, desc, d, request, arg);
int res = REAL(ioctl)(d, request, arg);
@@ -690,35 +971,85 @@ INTERCEPTOR(int, ioctl, int d, unsigned request, void *arg) {
#define INIT_IOCTL
#endif
+#if SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS || \
+ SANITIZER_INTERCEPT_GETPWENT || SANITIZER_INTERCEPT_FGETPWENT
+static void unpoison_passwd(void *ctx, __sanitizer_passwd *pwd) {
+ if (pwd) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd, sizeof(*pwd));
+ if (pwd->pw_name)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(pwd->pw_name,
+ REAL(strlen)(pwd->pw_name) + 1);
+ if (pwd->pw_passwd)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(pwd->pw_passwd,
+ REAL(strlen)(pwd->pw_passwd) + 1);
+#if !SANITIZER_ANDROID
+ if (pwd->pw_gecos)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(pwd->pw_gecos,
+ REAL(strlen)(pwd->pw_gecos) + 1);
+#endif
+#if SANITIZER_MAC
+ if (pwd->pw_class)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(pwd->pw_class,
+ REAL(strlen)(pwd->pw_class) + 1);
+#endif
+ if (pwd->pw_dir)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(pwd->pw_dir,
+ REAL(strlen)(pwd->pw_dir) + 1);
+ if (pwd->pw_shell)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(pwd->pw_shell,
+ REAL(strlen)(pwd->pw_shell) + 1);
+ }
+}
+
+static void unpoison_group(void *ctx, __sanitizer_group *grp) {
+ if (grp) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp, sizeof(*grp));
+ if (grp->gr_name)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(grp->gr_name,
+ REAL(strlen)(grp->gr_name) + 1);
+ if (grp->gr_passwd)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(grp->gr_passwd,
+ REAL(strlen)(grp->gr_passwd) + 1);
+ char **p = grp->gr_mem;
+ for (; *p; ++p) {
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*p, REAL(strlen)(*p) + 1);
+ }
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(grp->gr_mem,
+ (p - grp->gr_mem + 1) * sizeof(*p));
+ }
+}
+#endif // SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS ||
+ // SANITIZER_INTERCEPT_GETPWENT || SANITIZER_INTERCEPT_FGETPWENT
+
#if SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS
-INTERCEPTOR(void *, getpwnam, const char *name) {
+INTERCEPTOR(__sanitizer_passwd *, getpwnam, const char *name) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getpwnam, name);
COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
- void *res = REAL(getpwnam)(name);
- if (res != 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, struct_passwd_sz);
+ __sanitizer_passwd *res = REAL(getpwnam)(name);
+ if (res != 0) unpoison_passwd(ctx, res);
return res;
}
-INTERCEPTOR(void *, getpwuid, u32 uid) {
+INTERCEPTOR(__sanitizer_passwd *, getpwuid, u32 uid) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getpwuid, uid);
- void *res = REAL(getpwuid)(uid);
- if (res != 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, struct_passwd_sz);
+ __sanitizer_passwd *res = REAL(getpwuid)(uid);
+ if (res != 0) unpoison_passwd(ctx, res);
return res;
}
-INTERCEPTOR(void *, getgrnam, const char *name) {
+INTERCEPTOR(__sanitizer_group *, getgrnam, const char *name) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getgrnam, name);
COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
- void *res = REAL(getgrnam)(name);
- if (res != 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, struct_group_sz);
+ __sanitizer_group *res = REAL(getgrnam)(name);
+ if (res != 0) unpoison_group(ctx, res);
return res;
}
-INTERCEPTOR(void *, getgrgid, u32 gid) {
+INTERCEPTOR(__sanitizer_group *, getgrgid, u32 gid) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getgrgid, gid);
- void *res = REAL(getgrgid)(gid);
- if (res != 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, struct_group_sz);
+ __sanitizer_group *res = REAL(getgrgid)(gid);
+ if (res != 0) unpoison_group(ctx, res);
return res;
}
#define INIT_GETPWNAM_AND_FRIENDS \
@@ -731,50 +1062,54 @@ INTERCEPTOR(void *, getgrgid, u32 gid) {
#endif
#if SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS
-INTERCEPTOR(int, getpwnam_r, const char *name, void *pwd, char *buf,
- SIZE_T buflen, void **result) {
+INTERCEPTOR(int, getpwnam_r, const char *name, __sanitizer_passwd *pwd,
+ char *buf, SIZE_T buflen, __sanitizer_passwd **result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getpwnam_r, name, pwd, buf, buflen, result);
COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
int res = REAL(getpwnam_r)(name, pwd, buf, buflen, result);
if (!res) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd, struct_passwd_sz);
+ if (result && *result) unpoison_passwd(ctx, *result);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen);
}
+ if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
return res;
}
-INTERCEPTOR(int, getpwuid_r, u32 uid, void *pwd, char *buf, SIZE_T buflen,
- void **result) {
+INTERCEPTOR(int, getpwuid_r, u32 uid, __sanitizer_passwd *pwd, char *buf,
+ SIZE_T buflen, __sanitizer_passwd **result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getpwuid_r, uid, pwd, buf, buflen, result);
int res = REAL(getpwuid_r)(uid, pwd, buf, buflen, result);
if (!res) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd, struct_passwd_sz);
+ if (result && *result) unpoison_passwd(ctx, *result);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen);
}
+ if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
return res;
}
-INTERCEPTOR(int, getgrnam_r, const char *name, void *grp, char *buf,
- SIZE_T buflen, void **result) {
+INTERCEPTOR(int, getgrnam_r, const char *name, __sanitizer_group *grp,
+ char *buf, SIZE_T buflen, __sanitizer_group **result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getgrnam_r, name, grp, buf, buflen, result);
COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
int res = REAL(getgrnam_r)(name, grp, buf, buflen, result);
if (!res) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp, struct_group_sz);
+ if (result && *result) unpoison_group(ctx, *result);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen);
}
+ if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
return res;
}
-INTERCEPTOR(int, getgrgid_r, u32 gid, void *grp, char *buf, SIZE_T buflen,
- void **result) {
+INTERCEPTOR(int, getgrgid_r, u32 gid, __sanitizer_group *grp, char *buf,
+ SIZE_T buflen, __sanitizer_group **result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getgrgid_r, gid, grp, buf, buflen, result);
int res = REAL(getgrgid_r)(gid, grp, buf, buflen, result);
if (!res) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp, struct_group_sz);
+ if (result && *result) unpoison_group(ctx, *result);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen);
}
+ if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
return res;
}
#define INIT_GETPWNAM_R_AND_FRIENDS \
@@ -786,6 +1121,141 @@ INTERCEPTOR(int, getgrgid_r, u32 gid, void *grp, char *buf, SIZE_T buflen,
#define INIT_GETPWNAM_R_AND_FRIENDS
#endif
+#if SANITIZER_INTERCEPT_GETPWENT
+INTERCEPTOR(__sanitizer_passwd *, getpwent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getpwent, dummy);
+ __sanitizer_passwd *res = REAL(getpwent)(dummy);
+ if (res != 0) unpoison_passwd(ctx, res);
+ return res;
+}
+INTERCEPTOR(__sanitizer_group *, getgrent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getgrent, dummy);
+ __sanitizer_group *res = REAL(getgrent)(dummy);
+ if (res != 0) unpoison_group(ctx, res);;
+ return res;
+}
+#define INIT_GETPWENT \
+ COMMON_INTERCEPT_FUNCTION(getpwent); \
+ COMMON_INTERCEPT_FUNCTION(getgrent);
+#else
+#define INIT_GETPWENT
+#endif
+
+#if SANITIZER_INTERCEPT_FGETPWENT
+INTERCEPTOR(__sanitizer_passwd *, fgetpwent, void *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fgetpwent, fp);
+ __sanitizer_passwd *res = REAL(fgetpwent)(fp);
+ if (res != 0) unpoison_passwd(ctx, res);
+ return res;
+}
+INTERCEPTOR(__sanitizer_group *, fgetgrent, void *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fgetgrent, fp);
+ __sanitizer_group *res = REAL(fgetgrent)(fp);
+ if (res != 0) unpoison_group(ctx, res);
+ return res;
+}
+#define INIT_FGETPWENT \
+ COMMON_INTERCEPT_FUNCTION(fgetpwent); \
+ COMMON_INTERCEPT_FUNCTION(fgetgrent);
+#else
+#define INIT_FGETPWENT
+#endif
+
+#if SANITIZER_INTERCEPT_GETPWENT_R
+INTERCEPTOR(int, getpwent_r, __sanitizer_passwd *pwbuf, char *buf,
+ SIZE_T buflen, __sanitizer_passwd **pwbufp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getpwent_r, pwbuf, buf, buflen, pwbufp);
+ int res = REAL(getpwent_r)(pwbuf, buf, buflen, pwbufp);
+ if (!res) {
+ if (pwbufp && *pwbufp) unpoison_passwd(ctx, *pwbufp);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen);
+ }
+ if (pwbufp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwbufp, sizeof(*pwbufp));
+ return res;
+}
+INTERCEPTOR(int, fgetpwent_r, void *fp, __sanitizer_passwd *pwbuf, char *buf,
+ SIZE_T buflen, __sanitizer_passwd **pwbufp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fgetpwent_r, fp, pwbuf, buf, buflen, pwbufp);
+ int res = REAL(fgetpwent_r)(fp, pwbuf, buf, buflen, pwbufp);
+ if (!res) {
+ if (pwbufp && *pwbufp) unpoison_passwd(ctx, *pwbufp);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen);
+ }
+ if (pwbufp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwbufp, sizeof(*pwbufp));
+ return res;
+}
+INTERCEPTOR(int, getgrent_r, __sanitizer_group *pwbuf, char *buf, SIZE_T buflen,
+ __sanitizer_group **pwbufp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getgrent_r, pwbuf, buf, buflen, pwbufp);
+ int res = REAL(getgrent_r)(pwbuf, buf, buflen, pwbufp);
+ if (!res) {
+ if (pwbufp && *pwbufp) unpoison_group(ctx, *pwbufp);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen);
+ }
+ if (pwbufp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwbufp, sizeof(*pwbufp));
+ return res;
+}
+INTERCEPTOR(int, fgetgrent_r, void *fp, __sanitizer_group *pwbuf, char *buf,
+ SIZE_T buflen, __sanitizer_group **pwbufp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fgetgrent_r, fp, pwbuf, buf, buflen, pwbufp);
+ int res = REAL(fgetgrent_r)(fp, pwbuf, buf, buflen, pwbufp);
+ if (!res) {
+ if (pwbufp && *pwbufp) unpoison_group(ctx, *pwbufp);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen);
+ }
+ if (pwbufp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwbufp, sizeof(*pwbufp));
+ return res;
+}
+#define INIT_GETPWENT_R \
+ COMMON_INTERCEPT_FUNCTION(getpwent_r); \
+ COMMON_INTERCEPT_FUNCTION(fgetpwent_r); \
+ COMMON_INTERCEPT_FUNCTION(getgrent_r); \
+ COMMON_INTERCEPT_FUNCTION(fgetgrent_r);
+#else
+#define INIT_GETPWENT_R
+#endif
+
+#if SANITIZER_INTERCEPT_SETPWENT
+// The only thing these interceptors do is disable any nested interceptors.
+// These functions may open nss modules and call uninstrumented functions from
+// them, and we don't want things like strlen() to trigger.
+INTERCEPTOR(void, setpwent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, setpwent, dummy);
+ REAL(setpwent)(dummy);
+}
+INTERCEPTOR(void, endpwent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, endpwent, dummy);
+ REAL(endpwent)(dummy);
+}
+INTERCEPTOR(void, setgrent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, setgrent, dummy);
+ REAL(setgrent)(dummy);
+}
+INTERCEPTOR(void, endgrent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, endgrent, dummy);
+ REAL(endgrent)(dummy);
+}
+#define INIT_SETPWENT \
+ COMMON_INTERCEPT_FUNCTION(setpwent); \
+ COMMON_INTERCEPT_FUNCTION(endpwent); \
+ COMMON_INTERCEPT_FUNCTION(setgrent); \
+ COMMON_INTERCEPT_FUNCTION(endgrent);
+#else
+#define INIT_SETPWENT
+#endif
+
#if SANITIZER_INTERCEPT_CLOCK_GETTIME
INTERCEPTOR(int, clock_getres, u32 clk_id, void *tp) {
void *ctx;
@@ -861,34 +1331,33 @@ static void unpoison_glob_t(void *ctx, __sanitizer_glob_t *pglob) {
}
static THREADLOCAL __sanitizer_glob_t *pglob_copy;
-static THREADLOCAL void *glob_ctx;
static void wrapped_gl_closedir(void *dir) {
- COMMON_INTERCEPTOR_UNPOISON_PARAM(glob_ctx, 1);
- pglob_copy->gl_closedir(dir);
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
+ IndirectExternCall(pglob_copy->gl_closedir)(dir);
}
static void *wrapped_gl_readdir(void *dir) {
- COMMON_INTERCEPTOR_UNPOISON_PARAM(glob_ctx, 1);
- return pglob_copy->gl_readdir(dir);
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
+ return IndirectExternCall(pglob_copy->gl_readdir)(dir);
}
static void *wrapped_gl_opendir(const char *s) {
- COMMON_INTERCEPTOR_UNPOISON_PARAM(glob_ctx, 1);
- COMMON_INTERCEPTOR_WRITE_RANGE(glob_ctx, s, REAL(strlen)(s) + 1);
- return pglob_copy->gl_opendir(s);
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, REAL(strlen)(s) + 1);
+ return IndirectExternCall(pglob_copy->gl_opendir)(s);
}
static int wrapped_gl_lstat(const char *s, void *st) {
- COMMON_INTERCEPTOR_UNPOISON_PARAM(glob_ctx, 2);
- COMMON_INTERCEPTOR_WRITE_RANGE(glob_ctx, s, REAL(strlen)(s) + 1);
- return pglob_copy->gl_lstat(s, st);
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, REAL(strlen)(s) + 1);
+ return IndirectExternCall(pglob_copy->gl_lstat)(s, st);
}
static int wrapped_gl_stat(const char *s, void *st) {
- COMMON_INTERCEPTOR_UNPOISON_PARAM(glob_ctx, 2);
- COMMON_INTERCEPTOR_WRITE_RANGE(glob_ctx, s, REAL(strlen)(s) + 1);
- return pglob_copy->gl_stat(s, st);
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, REAL(strlen)(s) + 1);
+ return IndirectExternCall(pglob_copy->gl_stat)(s, st);
}
INTERCEPTOR(int, glob, const char *pattern, int flags,
@@ -907,7 +1376,6 @@ INTERCEPTOR(int, glob, const char *pattern, int flags,
Swap(pglob->gl_lstat, glob_copy.gl_lstat);
Swap(pglob->gl_stat, glob_copy.gl_stat);
pglob_copy = &glob_copy;
- glob_ctx = ctx;
}
int res = REAL(glob)(pattern, flags, errfunc, pglob);
if (flags & glob_altdirfunc) {
@@ -918,7 +1386,6 @@ INTERCEPTOR(int, glob, const char *pattern, int flags,
Swap(pglob->gl_stat, glob_copy.gl_stat);
}
pglob_copy = 0;
- glob_ctx = 0;
if ((!res || res == glob_nomatch) && pglob) unpoison_glob_t(ctx, pglob);
return res;
}
@@ -939,7 +1406,6 @@ INTERCEPTOR(int, glob64, const char *pattern, int flags,
Swap(pglob->gl_lstat, glob_copy.gl_lstat);
Swap(pglob->gl_stat, glob_copy.gl_stat);
pglob_copy = &glob_copy;
- glob_ctx = ctx;
}
int res = REAL(glob64)(pattern, flags, errfunc, pglob);
if (flags & glob_altdirfunc) {
@@ -950,7 +1416,6 @@ INTERCEPTOR(int, glob64, const char *pattern, int flags,
Swap(pglob->gl_stat, glob_copy.gl_stat);
}
pglob_copy = 0;
- glob_ctx = 0;
if ((!res || res == glob_nomatch) && pglob) unpoison_glob_t(ctx, pglob);
return res;
}
@@ -1000,6 +1465,19 @@ INTERCEPTOR(int, wait3, int *status, int options, void *rusage) {
}
return res;
}
+#if SANITIZER_ANDROID
+INTERCEPTOR(int, __wait4, int pid, int *status, int options, void *rusage) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __wait4, pid, status, options, rusage);
+ int res = REAL(__wait4)(pid, status, options, rusage);
+ if (res != -1) {
+ if (status) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
+ if (rusage) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rusage, struct_rusage_sz);
+ }
+ return res;
+}
+#define INIT_WAIT4 COMMON_INTERCEPT_FUNCTION(__wait4);
+#else
INTERCEPTOR(int, wait4, int pid, int *status, int options, void *rusage) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, wait4, pid, status, options, rusage);
@@ -1010,14 +1488,16 @@ INTERCEPTOR(int, wait4, int pid, int *status, int options, void *rusage) {
}
return res;
}
+#define INIT_WAIT4 COMMON_INTERCEPT_FUNCTION(wait4);
+#endif // SANITIZER_ANDROID
#define INIT_WAIT \
COMMON_INTERCEPT_FUNCTION(wait); \
COMMON_INTERCEPT_FUNCTION(waitid); \
COMMON_INTERCEPT_FUNCTION(waitpid); \
- COMMON_INTERCEPT_FUNCTION(wait3); \
- COMMON_INTERCEPT_FUNCTION(wait4);
+ COMMON_INTERCEPT_FUNCTION(wait3);
#else
#define INIT_WAIT
+#define INIT_WAIT4
#endif
#if SANITIZER_INTERCEPT_INET
@@ -1227,14 +1707,12 @@ INTERCEPTOR(int, gethostent_r, struct __sanitizer_hostent *ret, char *buf,
COMMON_INTERCEPTOR_ENTER(ctx, gethostent_r, ret, buf, buflen, result,
h_errnop);
int res = REAL(gethostent_r)(ret, buf, buflen, result, h_errnop);
- if (res == 0) {
- if (result) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
- if (*result) write_hostent(ctx, *result);
- }
- if (h_errnop)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
+ if (result) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ if (res == 0 && *result) write_hostent(ctx, *result);
}
+ if (h_errnop)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
return res;
}
@@ -1247,14 +1725,12 @@ INTERCEPTOR(int, gethostbyaddr_r, void *addr, int len, int type,
COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, len);
int res = REAL(gethostbyaddr_r)(addr, len, type, ret, buf, buflen, result,
h_errnop);
- if (res == 0) {
- if (result) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
- if (*result) write_hostent(ctx, *result);
- }
- if (h_errnop)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
+ if (result) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ if (res == 0 && *result) write_hostent(ctx, *result);
}
+ if (h_errnop)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
return res;
}
@@ -1265,14 +1741,12 @@ INTERCEPTOR(int, gethostbyname_r, char *name, struct __sanitizer_hostent *ret,
COMMON_INTERCEPTOR_ENTER(ctx, gethostbyname_r, name, ret, buf, buflen, result,
h_errnop);
int res = REAL(gethostbyname_r)(name, ret, buf, buflen, result, h_errnop);
- if (res == 0) {
- if (result) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
- if (*result) write_hostent(ctx, *result);
- }
- if (h_errnop)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
+ if (result) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ if (res == 0 && *result) write_hostent(ctx, *result);
}
+ if (h_errnop)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
return res;
}
@@ -1284,14 +1758,12 @@ INTERCEPTOR(int, gethostbyname2_r, char *name, int af,
result, h_errnop);
int res =
REAL(gethostbyname2_r)(name, af, ret, buf, buflen, result, h_errnop);
- if (res == 0) {
- if (result) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
- if (*result) write_hostent(ctx, *result);
- }
- if (h_errnop)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
+ if (result) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ if (res == 0 && *result) write_hostent(ctx, *result);
}
+ if (h_errnop)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
return res;
}
#define INIT_GETHOSTBYNAME_R \
@@ -1834,7 +2306,7 @@ INTERCEPTOR(char *, strerror, int errnum) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strerror, errnum);
char *res = REAL(strerror)(errnum);
- if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
return res;
}
#define INIT_STRERROR COMMON_INTERCEPT_FUNCTION(strerror);
@@ -1890,24 +2362,23 @@ typedef int (*scandir_filter_f)(const struct __sanitizer_dirent *);
typedef int (*scandir_compar_f)(const struct __sanitizer_dirent **,
const struct __sanitizer_dirent **);
-static THREADLOCAL void *scandir_ctx;
static THREADLOCAL scandir_filter_f scandir_filter;
static THREADLOCAL scandir_compar_f scandir_compar;
static int wrapped_scandir_filter(const struct __sanitizer_dirent *dir) {
- COMMON_INTERCEPTOR_UNPOISON_PARAM(scandir_ctx, 1);
- COMMON_INTERCEPTOR_WRITE_RANGE(scandir_ctx, dir, dir->d_reclen);
- return scandir_filter(dir);
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(dir, dir->d_reclen);
+ return IndirectExternCall(scandir_filter)(dir);
}
static int wrapped_scandir_compar(const struct __sanitizer_dirent **a,
const struct __sanitizer_dirent **b) {
- COMMON_INTERCEPTOR_UNPOISON_PARAM(scandir_ctx, 2);
- COMMON_INTERCEPTOR_WRITE_RANGE(scandir_ctx, a, sizeof(*a));
- COMMON_INTERCEPTOR_WRITE_RANGE(scandir_ctx, *a, (*a)->d_reclen);
- COMMON_INTERCEPTOR_WRITE_RANGE(scandir_ctx, b, sizeof(*b));
- COMMON_INTERCEPTOR_WRITE_RANGE(scandir_ctx, *b, (*b)->d_reclen);
- return scandir_compar(a, b);
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, sizeof(*a));
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*a, (*a)->d_reclen);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, sizeof(*b));
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*b, (*b)->d_reclen);
+ return IndirectExternCall(scandir_compar)(a, b);
}
INTERCEPTOR(int, scandir, char *dirp, __sanitizer_dirent ***namelist,
@@ -1915,13 +2386,10 @@ INTERCEPTOR(int, scandir, char *dirp, __sanitizer_dirent ***namelist,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, scandir, dirp, namelist, filter, compar);
if (dirp) COMMON_INTERCEPTOR_READ_RANGE(ctx, dirp, REAL(strlen)(dirp) + 1);
- CHECK_EQ(0, scandir_ctx);
- scandir_ctx = ctx;
scandir_filter = filter;
scandir_compar = compar;
int res = REAL(scandir)(dirp, namelist, filter ? wrapped_scandir_filter : 0,
compar ? wrapped_scandir_compar : 0);
- scandir_ctx = 0;
scandir_filter = 0;
scandir_compar = 0;
if (namelist && res > 0) {
@@ -1943,24 +2411,23 @@ typedef int (*scandir64_filter_f)(const struct __sanitizer_dirent64 *);
typedef int (*scandir64_compar_f)(const struct __sanitizer_dirent64 **,
const struct __sanitizer_dirent64 **);
-static THREADLOCAL void *scandir64_ctx;
static THREADLOCAL scandir64_filter_f scandir64_filter;
static THREADLOCAL scandir64_compar_f scandir64_compar;
static int wrapped_scandir64_filter(const struct __sanitizer_dirent64 *dir) {
- COMMON_INTERCEPTOR_UNPOISON_PARAM(scandir64_ctx, 1);
- COMMON_INTERCEPTOR_WRITE_RANGE(scandir64_ctx, dir, dir->d_reclen);
- return scandir64_filter(dir);
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(dir, dir->d_reclen);
+ return IndirectExternCall(scandir64_filter)(dir);
}
static int wrapped_scandir64_compar(const struct __sanitizer_dirent64 **a,
const struct __sanitizer_dirent64 **b) {
- COMMON_INTERCEPTOR_UNPOISON_PARAM(scandir64_ctx, 2);
- COMMON_INTERCEPTOR_WRITE_RANGE(scandir64_ctx, a, sizeof(*a));
- COMMON_INTERCEPTOR_WRITE_RANGE(scandir64_ctx, *a, (*a)->d_reclen);
- COMMON_INTERCEPTOR_WRITE_RANGE(scandir64_ctx, b, sizeof(*b));
- COMMON_INTERCEPTOR_WRITE_RANGE(scandir64_ctx, *b, (*b)->d_reclen);
- return scandir64_compar(a, b);
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, sizeof(*a));
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*a, (*a)->d_reclen);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, sizeof(*b));
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*b, (*b)->d_reclen);
+ return IndirectExternCall(scandir64_compar)(a, b);
}
INTERCEPTOR(int, scandir64, char *dirp, __sanitizer_dirent64 ***namelist,
@@ -1968,14 +2435,11 @@ INTERCEPTOR(int, scandir64, char *dirp, __sanitizer_dirent64 ***namelist,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, scandir64, dirp, namelist, filter, compar);
if (dirp) COMMON_INTERCEPTOR_READ_RANGE(ctx, dirp, REAL(strlen)(dirp) + 1);
- CHECK_EQ(0, scandir64_ctx);
- scandir64_ctx = ctx;
scandir64_filter = filter;
scandir64_compar = compar;
int res =
REAL(scandir64)(dirp, namelist, filter ? wrapped_scandir64_filter : 0,
compar ? wrapped_scandir64_compar : 0);
- scandir64_ctx = 0;
scandir64_filter = 0;
scandir64_compar = 0;
if (namelist && res > 0) {
@@ -2243,53 +2707,6 @@ INTERCEPTOR(int, pthread_mutex_unlock, void *m) {
#define INIT_PTHREAD_MUTEX_UNLOCK
#endif
-#if SANITIZER_INTERCEPT_PTHREAD_COND
-INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, pthread_cond_wait, c, m);
- COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, c, pthread_cond_t_sz);
- int res = REAL(pthread_cond_wait)(c, m);
- COMMON_INTERCEPTOR_MUTEX_LOCK(ctx, m);
- return res;
-}
-
-INTERCEPTOR(int, pthread_cond_init, void *c, void *a) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, pthread_cond_init, c, a);
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, c, pthread_cond_t_sz);
- return REAL(pthread_cond_init)(c, a);
-}
-
-INTERCEPTOR(int, pthread_cond_signal, void *c) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, pthread_cond_signal, c);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, c, pthread_cond_t_sz);
- return REAL(pthread_cond_signal)(c);
-}
-
-INTERCEPTOR(int, pthread_cond_broadcast, void *c) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, pthread_cond_broadcast, c);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, c, pthread_cond_t_sz);
- return REAL(pthread_cond_broadcast)(c);
-}
-
-#define INIT_PTHREAD_COND_WAIT \
- INTERCEPT_FUNCTION_VER(pthread_cond_wait, "GLIBC_2.3.2")
-#define INIT_PTHREAD_COND_INIT \
- INTERCEPT_FUNCTION_VER(pthread_cond_init, "GLIBC_2.3.2")
-#define INIT_PTHREAD_COND_SIGNAL \
- INTERCEPT_FUNCTION_VER(pthread_cond_signal, "GLIBC_2.3.2")
-#define INIT_PTHREAD_COND_BROADCAST \
- INTERCEPT_FUNCTION_VER(pthread_cond_broadcast, "GLIBC_2.3.2")
-#else
-#define INIT_PTHREAD_COND_WAIT
-#define INIT_PTHREAD_COND_INIT
-#define INIT_PTHREAD_COND_SIGNAL
-#define INIT_PTHREAD_COND_BROADCAST
-#endif
-
#if SANITIZER_INTERCEPT_GETMNTENT || SANITIZER_INTERCEPT_GETMNTENT_R
static void write_mntent(void *ctx, __sanitizer_mntent *mnt) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt, sizeof(*mnt));
@@ -2446,7 +2863,7 @@ INTERCEPTOR(char *, ether_ntoa, __sanitizer_ether_addr *addr) {
COMMON_INTERCEPTOR_ENTER(ctx, ether_ntoa, addr);
if (addr) COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, sizeof(*addr));
char *res = REAL(ether_ntoa)(addr);
- if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
return res;
}
INTERCEPTOR(__sanitizer_ether_addr *, ether_aton, char *buf) {
@@ -2454,7 +2871,7 @@ INTERCEPTOR(__sanitizer_ether_addr *, ether_aton, char *buf) {
COMMON_INTERCEPTOR_ENTER(ctx, ether_aton, buf);
if (buf) COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, REAL(strlen)(buf) + 1);
__sanitizer_ether_addr *res = REAL(ether_aton)(buf);
- if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(ctx, res, sizeof(*res));
+ if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, sizeof(*res));
return res;
}
INTERCEPTOR(int, ether_ntohost, char *hostname, __sanitizer_ether_addr *addr) {
@@ -2592,9 +3009,13 @@ INTERCEPTOR(int, pthread_attr_getstack, void *attr, void **addr, SIZE_T *size) {
// We may need to call the real pthread_attr_getstack from the run-time
// in sanitizer_common, but we don't want to include the interception headers
// there. So, just define this function here.
-int __sanitizer_pthread_attr_getstack(void *attr, void **addr, SIZE_T *size) {
+namespace __sanitizer {
+extern "C" {
+int real_pthread_attr_getstack(void *attr, void **addr, SIZE_T *size) {
return REAL(pthread_attr_getstack)(attr, addr, size);
}
+} // extern "C"
+} // namespace __sanitizer
#define INIT_PTHREAD_ATTR_GET \
COMMON_INTERCEPT_FUNCTION(pthread_attr_getdetachstate); \
@@ -2644,7 +3065,7 @@ INTERCEPTOR(char *, tmpnam, char *s) {
if (s)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, REAL(strlen)(s) + 1);
else
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
}
return res;
}
@@ -2673,7 +3094,7 @@ INTERCEPTOR(char *, tempnam, char *dir, char *pfx) {
if (dir) COMMON_INTERCEPTOR_READ_RANGE(ctx, dir, REAL(strlen)(dir) + 1);
if (pfx) COMMON_INTERCEPTOR_READ_RANGE(ctx, pfx, REAL(strlen)(pfx) + 1);
char *res = REAL(tempnam)(dir, pfx);
- if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
return res;
}
#define INIT_TEMPNAM COMMON_INTERCEPT_FUNCTION(tempnam);
@@ -2836,6 +3257,18 @@ INTERCEPTOR(int, lrand48_r, void *buffer, long *result) {
#define INIT_DRAND48_R
#endif
+#if SANITIZER_INTERCEPT_RAND_R
+INTERCEPTOR(int, rand_r, unsigned *seedp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, rand_r, seedp);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, seedp, sizeof(*seedp));
+ return REAL(rand_r)(seedp);
+}
+#define INIT_RAND_R COMMON_INTERCEPT_FUNCTION(rand_r);
+#else
+#define INIT_RAND_R
+#endif
+
#if SANITIZER_INTERCEPT_GETLINE
INTERCEPTOR(SSIZE_T, getline, char **lineptr, SIZE_T *n, void *stream) {
void *ctx;
@@ -2906,117 +3339,840 @@ INTERCEPTOR(__sanitizer_clock_t, times, void *tms) {
#define INIT_TIMES
#endif
-#define SANITIZER_COMMON_INTERCEPTORS_INIT \
- INIT_TEXTDOMAIN; \
- INIT_STRCMP; \
- INIT_STRNCMP; \
- INIT_STRCASECMP; \
- INIT_STRNCASECMP; \
- INIT_READ; \
- INIT_PREAD; \
- INIT_PREAD64; \
- INIT_READV; \
- INIT_PREADV; \
- INIT_PREADV64; \
- INIT_WRITE; \
- INIT_PWRITE; \
- INIT_PWRITE64; \
- INIT_WRITEV; \
- INIT_PWRITEV; \
- INIT_PWRITEV64; \
- INIT_PRCTL; \
- INIT_LOCALTIME_AND_FRIENDS; \
- INIT_STRPTIME; \
- INIT_SCANF; \
- INIT_ISOC99_SCANF; \
- INIT_FREXP; \
- INIT_FREXPF_FREXPL; \
- INIT_GETPWNAM_AND_FRIENDS; \
- INIT_GETPWNAM_R_AND_FRIENDS; \
- INIT_CLOCK_GETTIME; \
- INIT_GETITIMER; \
- INIT_TIME; \
- INIT_GLOB; \
- INIT_WAIT; \
- INIT_INET; \
- INIT_PTHREAD_GETSCHEDPARAM; \
- INIT_GETADDRINFO; \
- INIT_GETNAMEINFO; \
- INIT_GETSOCKNAME; \
- INIT_GETHOSTBYNAME; \
- INIT_GETHOSTBYNAME_R; \
- INIT_GETSOCKOPT; \
- INIT_ACCEPT; \
- INIT_ACCEPT4; \
- INIT_MODF; \
- INIT_RECVMSG; \
- INIT_GETPEERNAME; \
- INIT_IOCTL; \
- INIT_INET_ATON; \
- INIT_SYSINFO; \
- INIT_READDIR; \
- INIT_READDIR64; \
- INIT_PTRACE; \
- INIT_SETLOCALE; \
- INIT_GETCWD; \
- INIT_GET_CURRENT_DIR_NAME; \
- INIT_STRTOIMAX; \
- INIT_MBSTOWCS; \
- INIT_MBSNRTOWCS; \
- INIT_WCSTOMBS; \
- INIT_WCSNRTOMBS; \
- INIT_TCGETATTR; \
- INIT_REALPATH; \
- INIT_CANONICALIZE_FILE_NAME; \
- INIT_CONFSTR; \
- INIT_SCHED_GETAFFINITY; \
- INIT_STRERROR; \
- INIT_STRERROR_R; \
- INIT_XPG_STRERROR_R; \
- INIT_SCANDIR; \
- INIT_SCANDIR64; \
- INIT_GETGROUPS; \
- INIT_POLL; \
- INIT_PPOLL; \
- INIT_WORDEXP; \
- INIT_SIGWAIT; \
- INIT_SIGWAITINFO; \
- INIT_SIGTIMEDWAIT; \
- INIT_SIGSETOPS; \
- INIT_SIGPENDING; \
- INIT_SIGPROCMASK; \
- INIT_BACKTRACE; \
- INIT__EXIT; \
- INIT_PTHREAD_MUTEX_LOCK; \
- INIT_PTHREAD_MUTEX_UNLOCK; \
- INIT_PTHREAD_COND_WAIT; \
- INIT_PTHREAD_COND_INIT; \
- INIT_PTHREAD_COND_SIGNAL; \
- INIT_PTHREAD_COND_BROADCAST; \
- INIT_GETMNTENT; \
- INIT_GETMNTENT_R; \
- INIT_STATFS; \
- INIT_STATFS64; \
- INIT_STATVFS; \
- INIT_STATVFS64; \
- INIT_INITGROUPS; \
- INIT_ETHER; \
- INIT_ETHER_R; \
- INIT_SHMCTL; \
- INIT_RANDOM_R; \
- INIT_PTHREAD_ATTR_GET; \
- INIT_PTHREAD_ATTR_GETINHERITSCHED; \
- INIT_PTHREAD_ATTR_GETAFFINITY_NP; \
- INIT_TMPNAM; \
- INIT_TMPNAM_R; \
- INIT_TEMPNAM; \
- INIT_PTHREAD_SETNAME_NP; \
- INIT_SINCOS; \
- INIT_REMQUO; \
- INIT_LGAMMA; \
- INIT_LGAMMA_R; \
- INIT_DRAND48_R; \
- INIT_GETLINE; \
- INIT_ICONV; \
- INIT_TIMES; \
-/**/
+#if SANITIZER_INTERCEPT_TLS_GET_ADDR
+#define INIT_TLS_GET_ADDR COMMON_INTERCEPT_FUNCTION(__tls_get_addr)
+INTERCEPTOR(void *, __tls_get_addr, void *arg) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __tls_get_addr, arg);
+ void *res = REAL(__tls_get_addr)(arg);
+ DTLS_on_tls_get_addr(arg, res);
+ return res;
+}
+#else
+#define INIT_TLS_GET_ADDR
+#endif
+
+#if SANITIZER_INTERCEPT_LISTXATTR
+INTERCEPTOR(SSIZE_T, listxattr, const char *path, char *list, SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, listxattr, path, list, size);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ SSIZE_T res = REAL(listxattr)(path, list, size);
+ // Here and below, size == 0 is a special case where nothing is written to the
+ // buffer, and res contains the desired buffer size.
+ if (size && res > 0 && list) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, list, res);
+ return res;
+}
+INTERCEPTOR(SSIZE_T, llistxattr, const char *path, char *list, SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, llistxattr, path, list, size);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ SSIZE_T res = REAL(llistxattr)(path, list, size);
+ if (size && res > 0 && list) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, list, res);
+ return res;
+}
+INTERCEPTOR(SSIZE_T, flistxattr, int fd, char *list, SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, flistxattr, fd, list, size);
+ SSIZE_T res = REAL(flistxattr)(fd, list, size);
+ if (size && res > 0 && list) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, list, res);
+ return res;
+}
+#define INIT_LISTXATTR \
+ COMMON_INTERCEPT_FUNCTION(listxattr); \
+ COMMON_INTERCEPT_FUNCTION(llistxattr); \
+ COMMON_INTERCEPT_FUNCTION(flistxattr);
+#else
+#define INIT_LISTXATTR
+#endif
+
+#if SANITIZER_INTERCEPT_GETXATTR
+INTERCEPTOR(SSIZE_T, getxattr, const char *path, const char *name, char *value,
+ SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getxattr, path, name, value, size);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ SSIZE_T res = REAL(getxattr)(path, name, value, size);
+ if (size && res > 0 && value) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, value, res);
+ return res;
+}
+INTERCEPTOR(SSIZE_T, lgetxattr, const char *path, const char *name, char *value,
+ SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, lgetxattr, path, name, value, size);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ SSIZE_T res = REAL(lgetxattr)(path, name, value, size);
+ if (size && res > 0 && value) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, value, res);
+ return res;
+}
+INTERCEPTOR(SSIZE_T, fgetxattr, int fd, const char *name, char *value,
+ SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fgetxattr, fd, name, value, size);
+ if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ SSIZE_T res = REAL(fgetxattr)(fd, name, value, size);
+ if (size && res > 0 && value) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, value, res);
+ return res;
+}
+#define INIT_GETXATTR \
+ COMMON_INTERCEPT_FUNCTION(getxattr); \
+ COMMON_INTERCEPT_FUNCTION(lgetxattr); \
+ COMMON_INTERCEPT_FUNCTION(fgetxattr);
+#else
+#define INIT_GETXATTR
+#endif
+
+#if SANITIZER_INTERCEPT_GETRESID
+INTERCEPTOR(int, getresuid, void *ruid, void *euid, void *suid) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getresuid, ruid, euid, suid);
+ int res = REAL(getresuid)(ruid, euid, suid);
+ if (res >= 0) {
+ if (ruid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ruid, uid_t_sz);
+ if (euid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, euid, uid_t_sz);
+ if (suid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, suid, uid_t_sz);
+ }
+ return res;
+}
+INTERCEPTOR(int, getresgid, void *rgid, void *egid, void *sgid) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getresgid, rgid, egid, sgid);
+ int res = REAL(getresgid)(rgid, egid, sgid);
+ if (res >= 0) {
+ if (rgid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rgid, gid_t_sz);
+ if (egid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, egid, gid_t_sz);
+ if (sgid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sgid, gid_t_sz);
+ }
+ return res;
+}
+#define INIT_GETRESID \
+ COMMON_INTERCEPT_FUNCTION(getresuid); \
+ COMMON_INTERCEPT_FUNCTION(getresgid);
+#else
+#define INIT_GETRESID
+#endif
+
+#if SANITIZER_INTERCEPT_GETIFADDRS
+// As long as getifaddrs()/freeifaddrs() use calloc()/free(), we don't need to
+// intercept freeifaddrs(). If that ceases to be the case, we might need to
+// intercept it to poison the memory again.
+INTERCEPTOR(int, getifaddrs, __sanitizer_ifaddrs **ifap) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getifaddrs, ifap);
+ int res = REAL(getifaddrs)(ifap);
+ if (res == 0 && ifap) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifap, sizeof(void *));
+ __sanitizer_ifaddrs *p = *ifap;
+ while (p) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(__sanitizer_ifaddrs));
+ if (p->ifa_name)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ifa_name,
+ REAL(strlen)(p->ifa_name) + 1);
+ if (p->ifa_addr)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ifa_addr, struct_sockaddr_sz);
+ if (p->ifa_netmask)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ifa_netmask, struct_sockaddr_sz);
+ // On Linux this is a union, but the other member also points to a
+ // struct sockaddr, so the following is sufficient.
+ if (p->ifa_dstaddr)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ifa_dstaddr, struct_sockaddr_sz);
+ // FIXME(smatveev): Unpoison p->ifa_data as well.
+ p = p->ifa_next;
+ }
+ }
+ return res;
+}
+#define INIT_GETIFADDRS \
+ COMMON_INTERCEPT_FUNCTION(getifaddrs);
+#else
+#define INIT_GETIFADDRS
+#endif
+
+#if SANITIZER_INTERCEPT_IF_INDEXTONAME
+INTERCEPTOR(char *, if_indextoname, unsigned int ifindex, char* ifname) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, if_indextoname, ifindex, ifname);
+ char *res = REAL(if_indextoname)(ifindex, ifname);
+ if (res && ifname)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifname, REAL(strlen)(ifname) + 1);
+ return res;
+}
+INTERCEPTOR(unsigned int, if_nametoindex, const char* ifname) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, if_nametoindex, ifname);
+ if (ifname)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ifname, REAL(strlen)(ifname) + 1);
+ return REAL(if_nametoindex)(ifname);
+}
+#define INIT_IF_INDEXTONAME \
+ COMMON_INTERCEPT_FUNCTION(if_indextoname); \
+ COMMON_INTERCEPT_FUNCTION(if_nametoindex);
+#else
+#define INIT_IF_INDEXTONAME
+#endif
+
+#if SANITIZER_INTERCEPT_CAPGET
+INTERCEPTOR(int, capget, void *hdrp, void *datap) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, capget, hdrp, datap);
+ if (hdrp)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, hdrp, __user_cap_header_struct_sz);
+ int res = REAL(capget)(hdrp, datap);
+ if (res == 0 && datap)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, datap, __user_cap_data_struct_sz);
+ // We can also return -1 and write to hdrp->version if the version passed in
+ // hdrp->version is unsupported. But that's not a trivial condition to check,
+ // and anyway COMMON_INTERCEPTOR_READ_RANGE protects us to some extent.
+ return res;
+}
+INTERCEPTOR(int, capset, void *hdrp, const void *datap) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, capset, hdrp, datap);
+ if (hdrp)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, hdrp, __user_cap_header_struct_sz);
+ if (datap)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, datap, __user_cap_data_struct_sz);
+ return REAL(capset)(hdrp, datap);
+}
+#define INIT_CAPGET \
+ COMMON_INTERCEPT_FUNCTION(capget); \
+ COMMON_INTERCEPT_FUNCTION(capset);
+#else
+#define INIT_CAPGET
+#endif
+
+#if SANITIZER_INTERCEPT_AEABI_MEM
+DECLARE_REAL_AND_INTERCEPTOR(void *, memmove, void *, const void *, uptr);
+DECLARE_REAL_AND_INTERCEPTOR(void *, memcpy, void *, const void *, uptr);
+DECLARE_REAL_AND_INTERCEPTOR(void *, memset, void *, int, uptr);
+
+INTERCEPTOR(void *, __aeabi_memmove, void *to, const void *from, uptr size) {
+ return WRAP(memmove)(to, from, size);
+}
+INTERCEPTOR(void *, __aeabi_memmove4, void *to, const void *from, uptr size) {
+ return WRAP(memmove)(to, from, size);
+}
+INTERCEPTOR(void *, __aeabi_memmove8, void *to, const void *from, uptr size) {
+ return WRAP(memmove)(to, from, size);
+}
+INTERCEPTOR(void *, __aeabi_memcpy, void *to, const void *from, uptr size) {
+ return WRAP(memcpy)(to, from, size);
+}
+INTERCEPTOR(void *, __aeabi_memcpy4, void *to, const void *from, uptr size) {
+ return WRAP(memcpy)(to, from, size);
+}
+INTERCEPTOR(void *, __aeabi_memcpy8, void *to, const void *from, uptr size) {
+ return WRAP(memcpy)(to, from, size);
+}
+// Note the argument order.
+INTERCEPTOR(void *, __aeabi_memset, void *block, uptr size, int c) {
+ return WRAP(memset)(block, c, size);
+}
+INTERCEPTOR(void *, __aeabi_memset4, void *block, uptr size, int c) {
+ return WRAP(memset)(block, c, size);
+}
+INTERCEPTOR(void *, __aeabi_memset8, void *block, uptr size, int c) {
+ return WRAP(memset)(block, c, size);
+}
+INTERCEPTOR(void *, __aeabi_memclr, void *block, uptr size) {
+ return WRAP(memset)(block, 0, size);
+}
+INTERCEPTOR(void *, __aeabi_memclr4, void *block, uptr size) {
+ return WRAP(memset)(block, 0, size);
+}
+INTERCEPTOR(void *, __aeabi_memclr8, void *block, uptr size) {
+ return WRAP(memset)(block, 0, size);
+}
+#define INIT_AEABI_MEM \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memmove); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memmove4); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memmove8); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy4); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy8); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memset); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memset4); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memset8); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memclr); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memclr4); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memclr8);
+#else
+#define INIT_AEABI_MEM
+#endif // SANITIZER_INTERCEPT_AEABI_MEM
+
+#if SANITIZER_INTERCEPT___BZERO
+DECLARE_REAL_AND_INTERCEPTOR(void *, memset, void *, int, uptr);
+
+INTERCEPTOR(void *, __bzero, void *block, uptr size) {
+ return WRAP(memset)(block, 0, size);
+}
+#define INIT___BZERO COMMON_INTERCEPT_FUNCTION(__bzero);
+#else
+#define INIT___BZERO
+#endif // SANITIZER_INTERCEPT___BZERO
+
+#if SANITIZER_INTERCEPT_FTIME
+INTERCEPTOR(int, ftime, __sanitizer_timeb *tp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ftime, tp);
+ int res = REAL(ftime)(tp);
+ if (tp)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tp, sizeof(*tp));
+ return res;
+}
+#define INIT_FTIME COMMON_INTERCEPT_FUNCTION(ftime);
+#else
+#define INIT_FTIME
+#endif // SANITIZER_INTERCEPT_FTIME
+
+#if SANITIZER_INTERCEPT_XDR
+INTERCEPTOR(void, xdrmem_create, __sanitizer_XDR *xdrs, uptr addr,
+ unsigned size, int op) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, xdrmem_create, xdrs, addr, size, op);
+ REAL(xdrmem_create)(xdrs, addr, size, op);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, xdrs, sizeof(*xdrs));
+ if (op == __sanitizer_XDR_ENCODE) {
+ // It's not obvious how much data individual xdr_ routines write.
+ // Simply unpoison the entire target buffer in advance.
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (void *)addr, size);
+ }
+}
+
+INTERCEPTOR(void, xdrstdio_create, __sanitizer_XDR *xdrs, void *file, int op) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, xdrstdio_create, xdrs, file, op);
+ REAL(xdrstdio_create)(xdrs, file, op);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, xdrs, sizeof(*xdrs));
+}
+
+#define XDR_INTERCEPTOR(F, T) \
+ INTERCEPTOR(int, F, __sanitizer_XDR *xdrs, T *p) { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, F, xdrs, p); \
+ if (p && xdrs->x_op == __sanitizer_XDR_ENCODE) \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, p, sizeof(*p)); \
+ int res = REAL(F)(xdrs, p); \
+ if (res && p && xdrs->x_op == __sanitizer_XDR_DECODE) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p)); \
+ return res; \
+ }
+
+XDR_INTERCEPTOR(xdr_short, short)
+XDR_INTERCEPTOR(xdr_u_short, unsigned short)
+XDR_INTERCEPTOR(xdr_int, int)
+XDR_INTERCEPTOR(xdr_u_int, unsigned)
+XDR_INTERCEPTOR(xdr_long, long)
+XDR_INTERCEPTOR(xdr_u_long, unsigned long)
+XDR_INTERCEPTOR(xdr_hyper, long long)
+XDR_INTERCEPTOR(xdr_u_hyper, unsigned long long)
+XDR_INTERCEPTOR(xdr_longlong_t, long long)
+XDR_INTERCEPTOR(xdr_u_longlong_t, unsigned long long)
+XDR_INTERCEPTOR(xdr_int8_t, u8)
+XDR_INTERCEPTOR(xdr_uint8_t, u8)
+XDR_INTERCEPTOR(xdr_int16_t, u16)
+XDR_INTERCEPTOR(xdr_uint16_t, u16)
+XDR_INTERCEPTOR(xdr_int32_t, u32)
+XDR_INTERCEPTOR(xdr_uint32_t, u32)
+XDR_INTERCEPTOR(xdr_int64_t, u64)
+XDR_INTERCEPTOR(xdr_uint64_t, u64)
+XDR_INTERCEPTOR(xdr_quad_t, long long)
+XDR_INTERCEPTOR(xdr_u_quad_t, unsigned long long)
+XDR_INTERCEPTOR(xdr_bool, bool)
+XDR_INTERCEPTOR(xdr_enum, int)
+XDR_INTERCEPTOR(xdr_char, char)
+XDR_INTERCEPTOR(xdr_u_char, unsigned char)
+XDR_INTERCEPTOR(xdr_float, float)
+XDR_INTERCEPTOR(xdr_double, double)
+
+// FIXME: intercept xdr_array, opaque, union, vector, reference, pointer,
+// wrapstring, sizeof
+
+INTERCEPTOR(int, xdr_bytes, __sanitizer_XDR *xdrs, char **p, unsigned *sizep,
+ unsigned maxsize) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, xdr_bytes, xdrs, p, sizep, maxsize);
+ if (p && sizep && xdrs->x_op == __sanitizer_XDR_ENCODE) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, p, sizeof(*p));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sizep, sizeof(*sizep));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, *p, *sizep);
+ }
+ int res = REAL(xdr_bytes)(xdrs, p, sizep, maxsize);
+ if (p && sizep && xdrs->x_op == __sanitizer_XDR_DECODE) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sizep, sizeof(*sizep));
+ if (res && *p && *sizep) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, *sizep);
+ }
+ return res;
+}
+
+INTERCEPTOR(int, xdr_string, __sanitizer_XDR *xdrs, char **p,
+ unsigned maxsize) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, xdr_string, xdrs, p, maxsize);
+ if (p && xdrs->x_op == __sanitizer_XDR_ENCODE) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, p, sizeof(*p));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, *p, REAL(strlen)(*p) + 1);
+ }
+ int res = REAL(xdr_string)(xdrs, p, maxsize);
+ if (p && xdrs->x_op == __sanitizer_XDR_DECODE) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
+ if (res && *p)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, REAL(strlen)(*p) + 1);
+ }
+ return res;
+}
+
+#define INIT_XDR \
+ COMMON_INTERCEPT_FUNCTION(xdrmem_create); \
+ COMMON_INTERCEPT_FUNCTION(xdrstdio_create); \
+ COMMON_INTERCEPT_FUNCTION(xdr_short); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_short); \
+ COMMON_INTERCEPT_FUNCTION(xdr_int); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_int); \
+ COMMON_INTERCEPT_FUNCTION(xdr_long); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_long); \
+ COMMON_INTERCEPT_FUNCTION(xdr_hyper); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_hyper); \
+ COMMON_INTERCEPT_FUNCTION(xdr_longlong_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_longlong_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_int8_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_uint8_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_int16_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_uint16_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_int32_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_uint32_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_int64_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_uint64_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_quad_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_quad_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_bool); \
+ COMMON_INTERCEPT_FUNCTION(xdr_enum); \
+ COMMON_INTERCEPT_FUNCTION(xdr_char); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_char); \
+ COMMON_INTERCEPT_FUNCTION(xdr_float); \
+ COMMON_INTERCEPT_FUNCTION(xdr_double); \
+ COMMON_INTERCEPT_FUNCTION(xdr_bytes); \
+ COMMON_INTERCEPT_FUNCTION(xdr_string);
+#else
+#define INIT_XDR
+#endif // SANITIZER_INTERCEPT_XDR
+
+#if SANITIZER_INTERCEPT_TSEARCH
+INTERCEPTOR(void *, tsearch, void *key, void **rootp,
+ int (*compar)(const void *, const void *)) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, tsearch, key, rootp, compar);
+ void *res = REAL(tsearch)(key, rootp, compar);
+ if (res && *(void **)res == key)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, sizeof(void *));
+ return res;
+}
+#define INIT_TSEARCH COMMON_INTERCEPT_FUNCTION(tsearch);
+#else
+#define INIT_TSEARCH
+#endif
+
+#if SANITIZER_INTERCEPT_LIBIO_INTERNALS || SANITIZER_INTERCEPT_FOPEN || \
+ SANITIZER_INTERCEPT_OPEN_MEMSTREAM
+void unpoison_file(__sanitizer_FILE *fp) {
+#if SANITIZER_HAS_STRUCT_FILE
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(fp, sizeof(*fp));
+ if (fp->_IO_read_base && fp->_IO_read_base < fp->_IO_read_end)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(fp->_IO_read_base,
+ fp->_IO_read_end - fp->_IO_read_base);
+#endif // SANITIZER_HAS_STRUCT_FILE
+}
+#endif
+
+#if SANITIZER_INTERCEPT_LIBIO_INTERNALS
+// These guys are called when a .c source is built with -O2.
+INTERCEPTOR(int, __uflow, __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __uflow, fp);
+ int res = REAL(__uflow)(fp);
+ unpoison_file(fp);
+ return res;
+}
+INTERCEPTOR(int, __underflow, __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __underflow, fp);
+ int res = REAL(__underflow)(fp);
+ unpoison_file(fp);
+ return res;
+}
+INTERCEPTOR(int, __overflow, __sanitizer_FILE *fp, int ch) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __overflow, fp, ch);
+ int res = REAL(__overflow)(fp, ch);
+ unpoison_file(fp);
+ return res;
+}
+INTERCEPTOR(int, __wuflow, __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __wuflow, fp);
+ int res = REAL(__wuflow)(fp);
+ unpoison_file(fp);
+ return res;
+}
+INTERCEPTOR(int, __wunderflow, __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __wunderflow, fp);
+ int res = REAL(__wunderflow)(fp);
+ unpoison_file(fp);
+ return res;
+}
+INTERCEPTOR(int, __woverflow, __sanitizer_FILE *fp, int ch) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __woverflow, fp, ch);
+ int res = REAL(__woverflow)(fp, ch);
+ unpoison_file(fp);
+ return res;
+}
+#define INIT_LIBIO_INTERNALS \
+ COMMON_INTERCEPT_FUNCTION(__uflow); \
+ COMMON_INTERCEPT_FUNCTION(__underflow); \
+ COMMON_INTERCEPT_FUNCTION(__overflow); \
+ COMMON_INTERCEPT_FUNCTION(__wuflow); \
+ COMMON_INTERCEPT_FUNCTION(__wunderflow); \
+ COMMON_INTERCEPT_FUNCTION(__woverflow);
+#else
+#define INIT_LIBIO_INTERNALS
+#endif
+
+#if SANITIZER_INTERCEPT_FOPEN
+INTERCEPTOR(__sanitizer_FILE *, fopen, const char *path, const char *mode) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fopen, path, mode);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ __sanitizer_FILE *res = REAL(fopen)(path, mode);
+ COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);
+ if (res) unpoison_file(res);
+ return res;
+}
+INTERCEPTOR(__sanitizer_FILE *, fdopen, int fd, const char *mode) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fdopen, fd, mode);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ __sanitizer_FILE *res = REAL(fdopen)(fd, mode);
+ if (res) unpoison_file(res);
+ return res;
+}
+INTERCEPTOR(__sanitizer_FILE *, freopen, const char *path, const char *mode,
+ __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, freopen, path, mode, fp);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp);
+ __sanitizer_FILE *res = REAL(freopen)(path, mode, fp);
+ COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);
+ if (res) unpoison_file(res);
+ return res;
+}
+#define INIT_FOPEN \
+ COMMON_INTERCEPT_FUNCTION(fopen); \
+ COMMON_INTERCEPT_FUNCTION(fdopen); \
+ COMMON_INTERCEPT_FUNCTION(freopen);
+#else
+#define INIT_FOPEN
+#endif
+
+#if SANITIZER_INTERCEPT_FOPEN64
+INTERCEPTOR(__sanitizer_FILE *, fopen64, const char *path, const char *mode) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fopen64, path, mode);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ __sanitizer_FILE *res = REAL(fopen64)(path, mode);
+ COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);
+ if (res) unpoison_file(res);
+ return res;
+}
+INTERCEPTOR(__sanitizer_FILE *, freopen64, const char *path, const char *mode,
+ __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, freopen64, path, mode, fp);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp);
+ __sanitizer_FILE *res = REAL(freopen64)(path, mode, fp);
+ COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);
+ if (res) unpoison_file(res);
+ return res;
+}
+#define INIT_FOPEN64 \
+ COMMON_INTERCEPT_FUNCTION(fopen64); \
+ COMMON_INTERCEPT_FUNCTION(freopen64);
+#else
+#define INIT_FOPEN64
+#endif
+
+#if SANITIZER_INTERCEPT_OPEN_MEMSTREAM
+INTERCEPTOR(__sanitizer_FILE *, open_memstream, char **ptr, SIZE_T *sizeloc) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, open_memstream, ptr, sizeloc);
+ __sanitizer_FILE *res = REAL(open_memstream)(ptr, sizeloc);
+ if (res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, sizeof(*ptr));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sizeloc, sizeof(*sizeloc));
+ unpoison_file(res);
+ FileMetadata file = {ptr, sizeloc};
+ SetInterceptorMetadata(res, file);
+ }
+ return res;
+}
+INTERCEPTOR(__sanitizer_FILE *, open_wmemstream, wchar_t **ptr,
+ SIZE_T *sizeloc) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, open_wmemstream, ptr, sizeloc);
+ __sanitizer_FILE *res = REAL(open_wmemstream)(ptr, sizeloc);
+ if (res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, sizeof(*ptr));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sizeloc, sizeof(*sizeloc));
+ unpoison_file(res);
+ FileMetadata file = {(char **)ptr, sizeloc};
+ SetInterceptorMetadata(res, file);
+ }
+ return res;
+}
+INTERCEPTOR(__sanitizer_FILE *, fmemopen, void *buf, SIZE_T size,
+ const char *mode) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fmemopen, buf, size, mode);
+ __sanitizer_FILE *res = REAL(fmemopen)(buf, size, mode);
+ if (res) unpoison_file(res);
+ return res;
+}
+#define INIT_OPEN_MEMSTREAM \
+ COMMON_INTERCEPT_FUNCTION(open_memstream); \
+ COMMON_INTERCEPT_FUNCTION(open_wmemstream); \
+ COMMON_INTERCEPT_FUNCTION(fmemopen);
+#else
+#define INIT_OPEN_MEMSTREAM
+#endif
+
+#if SANITIZER_INTERCEPT_OBSTACK
+static void initialize_obstack(__sanitizer_obstack *obstack) {
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(obstack, sizeof(*obstack));
+ if (obstack->chunk)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(obstack->chunk,
+ sizeof(*obstack->chunk));
+}
+
+INTERCEPTOR(int, _obstack_begin_1, __sanitizer_obstack *obstack, int sz,
+ int align, void *(*alloc_fn)(uptr arg, uptr sz),
+ void (*free_fn)(uptr arg, void *p)) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, _obstack_begin_1, obstack, sz, align, alloc_fn,
+ free_fn);
+ int res = REAL(_obstack_begin_1)(obstack, sz, align, alloc_fn, free_fn);
+ if (res) initialize_obstack(obstack);
+ return res;
+}
+INTERCEPTOR(int, _obstack_begin, __sanitizer_obstack *obstack, int sz,
+ int align, void *(*alloc_fn)(uptr sz), void (*free_fn)(void *p)) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, _obstack_begin, obstack, sz, align, alloc_fn,
+ free_fn);
+ int res = REAL(_obstack_begin)(obstack, sz, align, alloc_fn, free_fn);
+ if (res) initialize_obstack(obstack);
+ return res;
+}
+INTERCEPTOR(void, _obstack_newchunk, __sanitizer_obstack *obstack, int length) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, _obstack_newchunk, obstack, length);
+ REAL(_obstack_newchunk)(obstack, length);
+ if (obstack->chunk)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(
+ obstack->chunk, obstack->next_free - (char *)obstack->chunk);
+}
+#define INIT_OBSTACK \
+ COMMON_INTERCEPT_FUNCTION(_obstack_begin_1); \
+ COMMON_INTERCEPT_FUNCTION(_obstack_begin); \
+ COMMON_INTERCEPT_FUNCTION(_obstack_newchunk);
+#else
+#define INIT_OBSTACK
+#endif
+
+#if SANITIZER_INTERCEPT_FFLUSH
+INTERCEPTOR(int, fflush, __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fflush, fp);
+ int res = REAL(fflush)(fp);
+ // FIXME: handle fp == NULL
+ if (fp) {
+ const FileMetadata *m = GetInterceptorMetadata(fp);
+ if (m) COMMON_INTERCEPTOR_INITIALIZE_RANGE(*m->addr, *m->size);
+ }
+ return res;
+}
+#define INIT_FFLUSH COMMON_INTERCEPT_FUNCTION(fflush);
+#else
+#define INIT_FFLUSH
+#endif
+
+#if SANITIZER_INTERCEPT_FCLOSE
+INTERCEPTOR(int, fclose, __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fclose, fp);
+ if (fp) {
+ COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp);
+ const FileMetadata *m = GetInterceptorMetadata(fp);
+ if (m) {
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*m->addr, *m->size);
+ DeleteInterceptorMetadata(fp);
+ }
+ }
+ return REAL(fclose)(fp);
+}
+#define INIT_FCLOSE COMMON_INTERCEPT_FUNCTION(fclose);
+#else
+#define INIT_FCLOSE
+#endif
+
+static void InitializeCommonInterceptors() {
+ static u64 metadata_mem[sizeof(MetadataHashMap) / sizeof(u64) + 1];
+ interceptor_metadata_map = new((void *)&metadata_mem) MetadataHashMap();
+
+ INIT_TEXTDOMAIN;
+ INIT_STRCMP;
+ INIT_STRNCMP;
+ INIT_STRCASECMP;
+ INIT_STRNCASECMP;
+ INIT_MEMCHR;
+ INIT_MEMRCHR;
+ INIT_READ;
+ INIT_PREAD;
+ INIT_PREAD64;
+ INIT_READV;
+ INIT_PREADV;
+ INIT_PREADV64;
+ INIT_WRITE;
+ INIT_PWRITE;
+ INIT_PWRITE64;
+ INIT_WRITEV;
+ INIT_PWRITEV;
+ INIT_PWRITEV64;
+ INIT_PRCTL;
+ INIT_LOCALTIME_AND_FRIENDS;
+ INIT_STRPTIME;
+ INIT_SCANF;
+ INIT_ISOC99_SCANF;
+ INIT_PRINTF;
+ INIT_ISOC99_PRINTF;
+ INIT_FREXP;
+ INIT_FREXPF_FREXPL;
+ INIT_GETPWNAM_AND_FRIENDS;
+ INIT_GETPWNAM_R_AND_FRIENDS;
+ INIT_GETPWENT;
+ INIT_FGETPWENT;
+ INIT_GETPWENT_R;
+ INIT_SETPWENT;
+ INIT_CLOCK_GETTIME;
+ INIT_GETITIMER;
+ INIT_TIME;
+ INIT_GLOB;
+ INIT_WAIT;
+ INIT_WAIT4;
+ INIT_INET;
+ INIT_PTHREAD_GETSCHEDPARAM;
+ INIT_GETADDRINFO;
+ INIT_GETNAMEINFO;
+ INIT_GETSOCKNAME;
+ INIT_GETHOSTBYNAME;
+ INIT_GETHOSTBYNAME_R;
+ INIT_GETSOCKOPT;
+ INIT_ACCEPT;
+ INIT_ACCEPT4;
+ INIT_MODF;
+ INIT_RECVMSG;
+ INIT_GETPEERNAME;
+ INIT_IOCTL;
+ INIT_INET_ATON;
+ INIT_SYSINFO;
+ INIT_READDIR;
+ INIT_READDIR64;
+ INIT_PTRACE;
+ INIT_SETLOCALE;
+ INIT_GETCWD;
+ INIT_GET_CURRENT_DIR_NAME;
+ INIT_STRTOIMAX;
+ INIT_MBSTOWCS;
+ INIT_MBSNRTOWCS;
+ INIT_WCSTOMBS;
+ INIT_WCSNRTOMBS;
+ INIT_TCGETATTR;
+ INIT_REALPATH;
+ INIT_CANONICALIZE_FILE_NAME;
+ INIT_CONFSTR;
+ INIT_SCHED_GETAFFINITY;
+ INIT_STRERROR;
+ INIT_STRERROR_R;
+ INIT_XPG_STRERROR_R;
+ INIT_SCANDIR;
+ INIT_SCANDIR64;
+ INIT_GETGROUPS;
+ INIT_POLL;
+ INIT_PPOLL;
+ INIT_WORDEXP;
+ INIT_SIGWAIT;
+ INIT_SIGWAITINFO;
+ INIT_SIGTIMEDWAIT;
+ INIT_SIGSETOPS;
+ INIT_SIGPENDING;
+ INIT_SIGPROCMASK;
+ INIT_BACKTRACE;
+ INIT__EXIT;
+ INIT_PTHREAD_MUTEX_LOCK;
+ INIT_PTHREAD_MUTEX_UNLOCK;
+ INIT_GETMNTENT;
+ INIT_GETMNTENT_R;
+ INIT_STATFS;
+ INIT_STATFS64;
+ INIT_STATVFS;
+ INIT_STATVFS64;
+ INIT_INITGROUPS;
+ INIT_ETHER;
+ INIT_ETHER_R;
+ INIT_SHMCTL;
+ INIT_RANDOM_R;
+ INIT_PTHREAD_ATTR_GET;
+ INIT_PTHREAD_ATTR_GETINHERITSCHED;
+ INIT_PTHREAD_ATTR_GETAFFINITY_NP;
+ INIT_TMPNAM;
+ INIT_TMPNAM_R;
+ INIT_TEMPNAM;
+ INIT_PTHREAD_SETNAME_NP;
+ INIT_SINCOS;
+ INIT_REMQUO;
+ INIT_LGAMMA;
+ INIT_LGAMMA_R;
+ INIT_DRAND48_R;
+ INIT_RAND_R;
+ INIT_GETLINE;
+ INIT_ICONV;
+ INIT_TIMES;
+ INIT_TLS_GET_ADDR;
+ INIT_LISTXATTR;
+ INIT_GETXATTR;
+ INIT_GETRESID;
+ INIT_GETIFADDRS;
+ INIT_IF_INDEXTONAME;
+ INIT_CAPGET;
+ INIT_AEABI_MEM;
+ INIT___BZERO;
+ INIT_FTIME;
+ INIT_XDR;
+ INIT_TSEARCH;
+ INIT_LIBIO_INTERNALS;
+ INIT_FOPEN;
+ INIT_FOPEN64;
+ INIT_OPEN_MEMSTREAM;
+ INIT_OBSTACK;
+ INIT_FFLUSH;
+ INIT_FCLOSE;
+}
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_format.inc b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_format.inc
new file mode 100644
index 000000000000..dfc4ac6b556a
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_format.inc
@@ -0,0 +1,554 @@
+//===-- sanitizer_common_interceptors_format.inc ----------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Scanf/printf implementation for use in *Sanitizer interceptors.
+// Follows http://pubs.opengroup.org/onlinepubs/9699919799/functions/fscanf.html
+// and http://pubs.opengroup.org/onlinepubs/9699919799/functions/fprintf.html
+// with a few common GNU extensions.
+//
+//===----------------------------------------------------------------------===//
+#include <stdarg.h>
+
+static const char *parse_number(const char *p, int *out) {
+ *out = internal_atoll(p);
+ while (*p >= '0' && *p <= '9')
+ ++p;
+ return p;
+}
+
+static const char *maybe_parse_param_index(const char *p, int *out) {
+ // n$
+ if (*p >= '0' && *p <= '9') {
+ int number;
+ const char *q = parse_number(p, &number);
+ CHECK(q);
+ if (*q == '$') {
+ *out = number;
+ p = q + 1;
+ }
+ }
+
+ // Otherwise, do not change p. This will be re-parsed later as the field
+ // width.
+ return p;
+}
+
+static bool char_is_one_of(char c, const char *s) {
+ return !!internal_strchr(s, c);
+}
+
+static const char *maybe_parse_length_modifier(const char *p, char ll[2]) {
+ if (char_is_one_of(*p, "jztLq")) {
+ ll[0] = *p;
+ ++p;
+ } else if (*p == 'h') {
+ ll[0] = 'h';
+ ++p;
+ if (*p == 'h') {
+ ll[1] = 'h';
+ ++p;
+ }
+ } else if (*p == 'l') {
+ ll[0] = 'l';
+ ++p;
+ if (*p == 'l') {
+ ll[1] = 'l';
+ ++p;
+ }
+ }
+ return p;
+}
+
+// Returns true if the character is an integer conversion specifier.
+static bool format_is_integer_conv(char c) {
+ return char_is_one_of(c, "diouxXn");
+}
+
+// Returns true if the character is an floating point conversion specifier.
+static bool format_is_float_conv(char c) {
+ return char_is_one_of(c, "aAeEfFgG");
+}
+
+// Returns string output character size for string-like conversions,
+// or 0 if the conversion is invalid.
+static int format_get_char_size(char convSpecifier,
+ const char lengthModifier[2]) {
+ if (char_is_one_of(convSpecifier, "CS")) {
+ return sizeof(wchar_t);
+ }
+
+ if (char_is_one_of(convSpecifier, "cs[")) {
+ if (lengthModifier[0] == 'l' && lengthModifier[1] == '\0')
+ return sizeof(wchar_t);
+ else if (lengthModifier[0] == '\0')
+ return sizeof(char);
+ }
+
+ return 0;
+}
+
+enum FormatStoreSize {
+ // Store size not known in advance; can be calculated as wcslen() of the
+ // destination buffer.
+ FSS_WCSLEN = -2,
+ // Store size not known in advance; can be calculated as strlen() of the
+ // destination buffer.
+ FSS_STRLEN = -1,
+ // Invalid conversion specifier.
+ FSS_INVALID = 0
+};
+
+// Returns the memory size of a format directive (if >0), or a value of
+// FormatStoreSize.
+static int format_get_value_size(char convSpecifier,
+ const char lengthModifier[2],
+ bool promote_float) {
+ if (format_is_integer_conv(convSpecifier)) {
+ switch (lengthModifier[0]) {
+ case 'h':
+ return lengthModifier[1] == 'h' ? sizeof(char) : sizeof(short);
+ case 'l':
+ return lengthModifier[1] == 'l' ? sizeof(long long) : sizeof(long);
+ case 'q':
+ return sizeof(long long);
+ case 'L':
+ return sizeof(long long);
+ case 'j':
+ return sizeof(INTMAX_T);
+ case 'z':
+ return sizeof(SIZE_T);
+ case 't':
+ return sizeof(PTRDIFF_T);
+ case 0:
+ return sizeof(int);
+ default:
+ return FSS_INVALID;
+ }
+ }
+
+ if (format_is_float_conv(convSpecifier)) {
+ switch (lengthModifier[0]) {
+ case 'L':
+ case 'q':
+ return sizeof(long double);
+ case 'l':
+ return lengthModifier[1] == 'l' ? sizeof(long double)
+ : sizeof(double);
+ case 0:
+ // Printf promotes floats to doubles but scanf does not
+ return promote_float ? sizeof(double) : sizeof(float);
+ default:
+ return FSS_INVALID;
+ }
+ }
+
+ if (convSpecifier == 'p') {
+ if (lengthModifier[0] != 0)
+ return FSS_INVALID;
+ return sizeof(void *);
+ }
+
+ return FSS_INVALID;
+}
+
+struct ScanfDirective {
+ int argIdx; // argument index, or -1 if not specified ("%n$")
+ int fieldWidth;
+ const char *begin;
+ const char *end;
+ bool suppressed; // suppress assignment ("*")
+ bool allocate; // allocate space ("m")
+ char lengthModifier[2];
+ char convSpecifier;
+ bool maybeGnuMalloc;
+};
+
+// Parse scanf format string. If a valid directive in encountered, it is
+// returned in dir. This function returns the pointer to the first
+// unprocessed character, or 0 in case of error.
+// In case of the end-of-string, a pointer to the closing \0 is returned.
+static const char *scanf_parse_next(const char *p, bool allowGnuMalloc,
+ ScanfDirective *dir) {
+ internal_memset(dir, 0, sizeof(*dir));
+ dir->argIdx = -1;
+
+ while (*p) {
+ if (*p != '%') {
+ ++p;
+ continue;
+ }
+ dir->begin = p;
+ ++p;
+ // %%
+ if (*p == '%') {
+ ++p;
+ continue;
+ }
+ if (*p == '\0') {
+ return 0;
+ }
+ // %n$
+ p = maybe_parse_param_index(p, &dir->argIdx);
+ CHECK(p);
+ // *
+ if (*p == '*') {
+ dir->suppressed = true;
+ ++p;
+ }
+ // Field width
+ if (*p >= '0' && *p <= '9') {
+ p = parse_number(p, &dir->fieldWidth);
+ CHECK(p);
+ if (dir->fieldWidth <= 0) // Width if at all must be non-zero
+ return 0;
+ }
+ // m
+ if (*p == 'm') {
+ dir->allocate = true;
+ ++p;
+ }
+ // Length modifier.
+ p = maybe_parse_length_modifier(p, dir->lengthModifier);
+ // Conversion specifier.
+ dir->convSpecifier = *p++;
+ // Consume %[...] expression.
+ if (dir->convSpecifier == '[') {
+ if (*p == '^')
+ ++p;
+ if (*p == ']')
+ ++p;
+ while (*p && *p != ']')
+ ++p;
+ if (*p == 0)
+ return 0; // unexpected end of string
+ // Consume the closing ']'.
+ ++p;
+ }
+ // This is unfortunately ambiguous between old GNU extension
+ // of %as, %aS and %a[...] and newer POSIX %a followed by
+ // letters s, S or [.
+ if (allowGnuMalloc && dir->convSpecifier == 'a' &&
+ !dir->lengthModifier[0]) {
+ if (*p == 's' || *p == 'S') {
+ dir->maybeGnuMalloc = true;
+ ++p;
+ } else if (*p == '[') {
+ // Watch for %a[h-j%d], if % appears in the
+ // [...] range, then we need to give up, we don't know
+ // if scanf will parse it as POSIX %a [h-j %d ] or
+ // GNU allocation of string with range dh-j plus %.
+ const char *q = p + 1;
+ if (*q == '^')
+ ++q;
+ if (*q == ']')
+ ++q;
+ while (*q && *q != ']' && *q != '%')
+ ++q;
+ if (*q == 0 || *q == '%')
+ return 0;
+ p = q + 1; // Consume the closing ']'.
+ dir->maybeGnuMalloc = true;
+ }
+ }
+ dir->end = p;
+ break;
+ }
+ return p;
+}
+
+static int scanf_get_value_size(ScanfDirective *dir) {
+ if (dir->allocate) {
+ if (!char_is_one_of(dir->convSpecifier, "cCsS["))
+ return FSS_INVALID;
+ return sizeof(char *);
+ }
+
+ if (dir->maybeGnuMalloc) {
+ if (dir->convSpecifier != 'a' || dir->lengthModifier[0])
+ return FSS_INVALID;
+ // This is ambiguous, so check the smaller size of char * (if it is
+ // a GNU extension of %as, %aS or %a[...]) and float (if it is
+ // POSIX %a followed by s, S or [ letters).
+ return sizeof(char *) < sizeof(float) ? sizeof(char *) : sizeof(float);
+ }
+
+ if (char_is_one_of(dir->convSpecifier, "cCsS[")) {
+ bool needsTerminator = char_is_one_of(dir->convSpecifier, "sS[");
+ unsigned charSize =
+ format_get_char_size(dir->convSpecifier, dir->lengthModifier);
+ if (charSize == 0)
+ return FSS_INVALID;
+ if (dir->fieldWidth == 0) {
+ if (!needsTerminator)
+ return charSize;
+ return (charSize == sizeof(char)) ? FSS_STRLEN : FSS_WCSLEN;
+ }
+ return (dir->fieldWidth + needsTerminator) * charSize;
+ }
+
+ return format_get_value_size(dir->convSpecifier, dir->lengthModifier, false);
+}
+
+// Common part of *scanf interceptors.
+// Process format string and va_list, and report all store ranges.
+// Stops when "consuming" n_inputs input items.
+static void scanf_common(void *ctx, int n_inputs, bool allowGnuMalloc,
+ const char *format, va_list aq) {
+ CHECK_GT(n_inputs, 0);
+ const char *p = format;
+
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, format, internal_strlen(format) + 1);
+
+ while (*p) {
+ ScanfDirective dir;
+ p = scanf_parse_next(p, allowGnuMalloc, &dir);
+ if (!p)
+ break;
+ if (dir.convSpecifier == 0) {
+ // This can only happen at the end of the format string.
+ CHECK_EQ(*p, 0);
+ break;
+ }
+ // Here the directive is valid. Do what it says.
+ if (dir.argIdx != -1) {
+ // Unsupported.
+ break;
+ }
+ if (dir.suppressed)
+ continue;
+ int size = scanf_get_value_size(&dir);
+ if (size == FSS_INVALID) {
+ Report("WARNING: unexpected format specifier in scanf interceptor: "
+ "%.*s\n", dir.end - dir.begin, dir.begin);
+ break;
+ }
+ void *argp = va_arg(aq, void *);
+ if (dir.convSpecifier != 'n')
+ --n_inputs;
+ if (n_inputs < 0)
+ break;
+ if (size == FSS_STRLEN) {
+ size = internal_strlen((const char *)argp) + 1;
+ } else if (size == FSS_WCSLEN) {
+ // FIXME: actually use wcslen() to calculate it.
+ size = 0;
+ }
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, argp, size);
+ }
+}
+
+#if SANITIZER_INTERCEPT_PRINTF
+
+struct PrintfDirective {
+ int fieldWidth;
+ int fieldPrecision;
+ int argIdx; // width argument index, or -1 if not specified ("%*n$")
+ int precisionIdx; // precision argument index, or -1 if not specified (".*n$")
+ const char *begin;
+ const char *end;
+ bool starredWidth;
+ bool starredPrecision;
+ char lengthModifier[2];
+ char convSpecifier;
+};
+
+static const char *maybe_parse_number(const char *p, int *out) {
+ if (*p >= '0' && *p <= '9')
+ p = parse_number(p, out);
+ return p;
+}
+
+static const char *maybe_parse_number_or_star(const char *p, int *out,
+ bool *star) {
+ if (*p == '*') {
+ *star = true;
+ ++p;
+ } else {
+ *star = false;
+ p = maybe_parse_number(p, out);
+ }
+ return p;
+}
+
+// Parse printf format string. Same as scanf_parse_next.
+static const char *printf_parse_next(const char *p, PrintfDirective *dir) {
+ internal_memset(dir, 0, sizeof(*dir));
+ dir->argIdx = -1;
+ dir->precisionIdx = -1;
+
+ while (*p) {
+ if (*p != '%') {
+ ++p;
+ continue;
+ }
+ dir->begin = p;
+ ++p;
+ // %%
+ if (*p == '%') {
+ ++p;
+ continue;
+ }
+ if (*p == '\0') {
+ return 0;
+ }
+ // %n$
+ p = maybe_parse_param_index(p, &dir->precisionIdx);
+ CHECK(p);
+ // Flags
+ while (char_is_one_of(*p, "'-+ #0")) {
+ ++p;
+ }
+ // Field width
+ p = maybe_parse_number_or_star(p, &dir->fieldWidth,
+ &dir->starredWidth);
+ if (!p)
+ return 0;
+ // Precision
+ if (*p == '.') {
+ ++p;
+ // Actual precision is optional (surprise!)
+ p = maybe_parse_number_or_star(p, &dir->fieldPrecision,
+ &dir->starredPrecision);
+ if (!p)
+ return 0;
+ // m$
+ if (dir->starredPrecision) {
+ p = maybe_parse_param_index(p, &dir->precisionIdx);
+ CHECK(p);
+ }
+ }
+ // Length modifier.
+ p = maybe_parse_length_modifier(p, dir->lengthModifier);
+ // Conversion specifier.
+ dir->convSpecifier = *p++;
+ dir->end = p;
+ break;
+ }
+ return p;
+}
+
+static int printf_get_value_size(PrintfDirective *dir) {
+ if (dir->convSpecifier == 'm') {
+ return sizeof(char *);
+ }
+
+ if (char_is_one_of(dir->convSpecifier, "cCsS")) {
+ unsigned charSize =
+ format_get_char_size(dir->convSpecifier, dir->lengthModifier);
+ if (charSize == 0)
+ return FSS_INVALID;
+ if (char_is_one_of(dir->convSpecifier, "sS")) {
+ return (charSize == sizeof(char)) ? FSS_STRLEN : FSS_WCSLEN;
+ }
+ return charSize;
+ }
+
+ return format_get_value_size(dir->convSpecifier, dir->lengthModifier, true);
+}
+
+#define SKIP_SCALAR_ARG(aq, convSpecifier, size) \
+ do { \
+ if (format_is_float_conv(convSpecifier)) { \
+ switch (size) { \
+ case 8: \
+ va_arg(*aq, double); \
+ break; \
+ case 16: \
+ va_arg(*aq, long double); \
+ break; \
+ default: \
+ Report("WARNING: unexpected floating-point arg size" \
+ " in printf interceptor: %d\n", size); \
+ return; \
+ } \
+ } else { \
+ switch (size) { \
+ case 1: \
+ case 2: \
+ case 4: \
+ va_arg(*aq, u32); \
+ break; \
+ case 8: \
+ va_arg(*aq, u64); \
+ break; \
+ default: \
+ Report("WARNING: unexpected arg size" \
+ " in printf interceptor: %d\n", size); \
+ return; \
+ } \
+ } \
+ } while (0)
+
+// Common part of *printf interceptors.
+// Process format string and va_list, and report all load ranges.
+static void printf_common(void *ctx, const char *format, va_list aq) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, format, internal_strlen(format) + 1);
+
+ const char *p = format;
+
+ while (*p) {
+ PrintfDirective dir;
+ p = printf_parse_next(p, &dir);
+ if (!p)
+ break;
+ if (dir.convSpecifier == 0) {
+ // This can only happen at the end of the format string.
+ CHECK_EQ(*p, 0);
+ break;
+ }
+ // Here the directive is valid. Do what it says.
+ if (dir.argIdx != -1 || dir.precisionIdx != -1) {
+ // Unsupported.
+ break;
+ }
+ if (dir.starredWidth) {
+ // Dynamic width
+ SKIP_SCALAR_ARG(&aq, 'd', sizeof(int));
+ }
+ if (dir.starredPrecision) {
+ // Dynamic precision
+ SKIP_SCALAR_ARG(&aq, 'd', sizeof(int));
+ }
+ int size = printf_get_value_size(&dir);
+ if (size == FSS_INVALID) {
+ Report("WARNING: unexpected format specifier in printf "
+ "interceptor: %.*s\n", dir.end - dir.begin, dir.begin);
+ break;
+ }
+ if (dir.convSpecifier == 'n') {
+ void *argp = va_arg(aq, void *);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, argp, size);
+ continue;
+ } else if (size == FSS_STRLEN) {
+ if (void *argp = va_arg(aq, void *)) {
+ if (dir.starredPrecision) {
+ // FIXME: properly support starred precision for strings.
+ size = 0;
+ } else if (dir.fieldPrecision > 0) {
+ // Won't read more than "precision" symbols.
+ size = internal_strnlen((const char *)argp, dir.fieldPrecision);
+ if (size < dir.fieldPrecision) size++;
+ } else {
+ // Whole string will be accessed.
+ size = internal_strlen((const char *)argp) + 1;
+ }
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, argp, size);
+ }
+ } else if (size == FSS_WCSLEN) {
+ if (void *argp = va_arg(aq, void *)) {
+ // FIXME: Properly support wide-character strings (via wcsrtombs).
+ size = 0;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, argp, size);
+ }
+ } else {
+ // Skip non-pointer args
+ SKIP_SCALAR_ARG(&aq, dir.convSpecifier, size);
+ }
+ }
+}
+
+#endif // SANITIZER_INTERCEPT_PRINTF
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_ioctl.inc b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
index ac8cdae5b816..96d171a92f57 100755
--- a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
+++ b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
@@ -12,14 +12,18 @@
struct ioctl_desc {
unsigned req;
- // FIXME: support read+write arguments. Those are currently marked as WRITE.
+ // FIXME: support read+write arguments. Currently READWRITE and WRITE do the
+ // same thing.
+ // XXX: The declarations below may use WRITE instead of READWRITE, unless
+ // explicitly noted.
enum {
NONE,
READ,
WRITE,
+ READWRITE,
CUSTOM
- } type : 2;
- unsigned size : 30;
+ } type : 3;
+ unsigned size : 29;
const char* name;
};
@@ -487,11 +491,15 @@ static void ioctl_init() {
// Handle the most evil ioctls that encode argument value as part of request id.
static unsigned ioctl_request_fixup(unsigned req) {
#if SANITIZER_LINUX
- if ((req & ~0x3fff001fU) == IOCTL_EVIOCGBIT)
+ // Strip size and event number.
+ const unsigned kEviocgbitMask =
+ (IOC_SIZEMASK << IOC_SIZESHIFT) | EVIOC_EV_MAX;
+ if ((req & ~kEviocgbitMask) == IOCTL_EVIOCGBIT)
return IOCTL_EVIOCGBIT;
- if ((req & ~0x3fU) == IOCTL_EVIOCGABS)
+ // Strip absolute axis number.
+ if ((req & ~EVIOC_ABS_MAX) == IOCTL_EVIOCGABS)
return IOCTL_EVIOCGABS;
- if ((req & ~0x3fU) == IOCTL_EVIOCSABS)
+ if ((req & ~EVIOC_ABS_MAX) == IOCTL_EVIOCSABS)
return IOCTL_EVIOCSABS;
#endif
return req;
@@ -513,24 +521,56 @@ static const ioctl_desc *ioctl_table_lookup(unsigned req) {
return 0;
}
+static bool ioctl_decode(unsigned req, ioctl_desc *desc) {
+ CHECK(desc);
+ desc->req = req;
+ desc->name = "<DECODED_IOCTL>";
+ desc->size = IOC_SIZE(req);
+ // Sanity check.
+ if (desc->size > 1024) return false;
+ unsigned dir = IOC_DIR(req);
+ switch (dir) {
+ case IOC_NONE:
+ desc->type = ioctl_desc::NONE;
+ break;
+ case IOC_READ | IOC_WRITE:
+ desc->type = ioctl_desc::READWRITE;
+ break;
+ case IOC_READ:
+ desc->type = ioctl_desc::WRITE;
+ break;
+ case IOC_WRITE:
+ desc->type = ioctl_desc::READ;
+ break;
+ default:
+ return false;
+ }
+ if (desc->type != IOC_NONE && desc->size == 0) return false;
+ char id = IOC_TYPE(req);
+ // Sanity check.
+ if (!(id >= 'a' && id <= 'z') && !(id >= 'A' && id <= 'Z')) return false;
+ return true;
+}
+
static const ioctl_desc *ioctl_lookup(unsigned req) {
req = ioctl_request_fixup(req);
const ioctl_desc *desc = ioctl_table_lookup(req);
if (desc) return desc;
// Try stripping access size from the request id.
- desc = ioctl_table_lookup(req & ~0x3fff0000U);
+ desc = ioctl_table_lookup(req & ~(IOC_SIZEMASK << IOC_SIZESHIFT));
// Sanity check: requests that encode access size are either read or write and
// have size of 0 in the table.
if (desc && desc->size == 0 &&
- (desc->type == ioctl_desc::WRITE || desc->type == ioctl_desc::READ))
+ (desc->type == ioctl_desc::READWRITE || desc->type == ioctl_desc::WRITE ||
+ desc->type == ioctl_desc::READ))
return desc;
return 0;
}
static void ioctl_common_pre(void *ctx, const ioctl_desc *desc, int d,
unsigned request, void *arg) {
- if (desc->type == ioctl_desc::READ) {
+ if (desc->type == ioctl_desc::READ || desc->type == ioctl_desc::READWRITE) {
unsigned size = desc->size ? desc->size : IOC_SIZE(request);
COMMON_INTERCEPTOR_READ_RANGE(ctx, arg, size);
}
@@ -548,7 +588,7 @@ static void ioctl_common_pre(void *ctx, const ioctl_desc *desc, int d,
static void ioctl_common_post(void *ctx, const ioctl_desc *desc, int res, int d,
unsigned request, void *arg) {
- if (desc->type == ioctl_desc::WRITE) {
+ if (desc->type == ioctl_desc::WRITE || desc->type == ioctl_desc::READWRITE) {
// FIXME: add verbose output
unsigned size = desc->size ? desc->size : IOC_SIZE(request);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, arg, size);
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_scanf.inc b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_scanf.inc
deleted file mode 100644
index 2660dada2beb..000000000000
--- a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_scanf.inc
+++ /dev/null
@@ -1,309 +0,0 @@
-//===-- sanitizer_common_interceptors_scanf.inc -----------------*- C++ -*-===//
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Scanf implementation for use in *Sanitizer interceptors.
-// Follows http://pubs.opengroup.org/onlinepubs/9699919799/functions/fscanf.html
-// with a few common GNU extensions.
-//
-//===----------------------------------------------------------------------===//
-#include <stdarg.h>
-
-struct ScanfDirective {
- int argIdx; // argument index, or -1 of not specified ("%n$")
- int fieldWidth;
- bool suppressed; // suppress assignment ("*")
- bool allocate; // allocate space ("m")
- char lengthModifier[2];
- char convSpecifier;
- bool maybeGnuMalloc;
-};
-
-static const char *parse_number(const char *p, int *out) {
- *out = internal_atoll(p);
- while (*p >= '0' && *p <= '9')
- ++p;
- return p;
-}
-
-static bool char_is_one_of(char c, const char *s) {
- return !!internal_strchr(s, c);
-}
-
-// Parse scanf format string. If a valid directive in encountered, it is
-// returned in dir. This function returns the pointer to the first
-// unprocessed character, or 0 in case of error.
-// In case of the end-of-string, a pointer to the closing \0 is returned.
-static const char *scanf_parse_next(const char *p, bool allowGnuMalloc,
- ScanfDirective *dir) {
- internal_memset(dir, 0, sizeof(*dir));
- dir->argIdx = -1;
-
- while (*p) {
- if (*p != '%') {
- ++p;
- continue;
- }
- ++p;
- // %%
- if (*p == '%') {
- ++p;
- continue;
- }
- if (*p == '\0') {
- return 0;
- }
- // %n$
- if (*p >= '0' && *p <= '9') {
- int number;
- const char *q = parse_number(p, &number);
- if (*q == '$') {
- dir->argIdx = number;
- p = q + 1;
- }
- // Otherwise, do not change p. This will be re-parsed later as the field
- // width.
- }
- // *
- if (*p == '*') {
- dir->suppressed = true;
- ++p;
- }
- // Field width.
- if (*p >= '0' && *p <= '9') {
- p = parse_number(p, &dir->fieldWidth);
- if (dir->fieldWidth <= 0)
- return 0;
- }
- // m
- if (*p == 'm') {
- dir->allocate = true;
- ++p;
- }
- // Length modifier.
- if (char_is_one_of(*p, "jztLq")) {
- dir->lengthModifier[0] = *p;
- ++p;
- } else if (*p == 'h') {
- dir->lengthModifier[0] = 'h';
- ++p;
- if (*p == 'h') {
- dir->lengthModifier[1] = 'h';
- ++p;
- }
- } else if (*p == 'l') {
- dir->lengthModifier[0] = 'l';
- ++p;
- if (*p == 'l') {
- dir->lengthModifier[1] = 'l';
- ++p;
- }
- }
- // Conversion specifier.
- dir->convSpecifier = *p++;
- // Consume %[...] expression.
- if (dir->convSpecifier == '[') {
- if (*p == '^')
- ++p;
- if (*p == ']')
- ++p;
- while (*p && *p != ']')
- ++p;
- if (*p == 0)
- return 0; // unexpected end of string
- // Consume the closing ']'.
- ++p;
- }
- // This is unfortunately ambiguous between old GNU extension
- // of %as, %aS and %a[...] and newer POSIX %a followed by
- // letters s, S or [.
- if (allowGnuMalloc && dir->convSpecifier == 'a' &&
- !dir->lengthModifier[0]) {
- if (*p == 's' || *p == 'S') {
- dir->maybeGnuMalloc = true;
- ++p;
- } else if (*p == '[') {
- // Watch for %a[h-j%d], if % appears in the
- // [...] range, then we need to give up, we don't know
- // if scanf will parse it as POSIX %a [h-j %d ] or
- // GNU allocation of string with range dh-j plus %.
- const char *q = p + 1;
- if (*q == '^')
- ++q;
- if (*q == ']')
- ++q;
- while (*q && *q != ']' && *q != '%')
- ++q;
- if (*q == 0 || *q == '%')
- return 0;
- p = q + 1; // Consume the closing ']'.
- dir->maybeGnuMalloc = true;
- }
- }
- break;
- }
- return p;
-}
-
-// Returns true if the character is an integer conversion specifier.
-static bool scanf_is_integer_conv(char c) {
- return char_is_one_of(c, "diouxXn");
-}
-
-// Returns true if the character is an floating point conversion specifier.
-static bool scanf_is_float_conv(char c) {
- return char_is_one_of(c, "aAeEfFgG");
-}
-
-// Returns string output character size for string-like conversions,
-// or 0 if the conversion is invalid.
-static int scanf_get_char_size(ScanfDirective *dir) {
- if (char_is_one_of(dir->convSpecifier, "CS")) {
- // wchar_t
- return 0;
- }
-
- if (char_is_one_of(dir->convSpecifier, "cs[")) {
- if (dir->lengthModifier[0] == 'l')
- // wchar_t
- return 0;
- else if (dir->lengthModifier[0] == 0)
- return sizeof(char);
- else
- return 0;
- }
-
- return 0;
-}
-
-enum ScanfStoreSize {
- // Store size not known in advance; can be calculated as strlen() of the
- // destination buffer.
- SSS_STRLEN = -1,
- // Invalid conversion specifier.
- SSS_INVALID = 0
-};
-
-// Returns the store size of a scanf directive (if >0), or a value of
-// ScanfStoreSize.
-static int scanf_get_store_size(ScanfDirective *dir) {
- if (dir->allocate) {
- if (!char_is_one_of(dir->convSpecifier, "cCsS["))
- return SSS_INVALID;
- return sizeof(char *);
- }
-
- if (dir->maybeGnuMalloc) {
- if (dir->convSpecifier != 'a' || dir->lengthModifier[0])
- return SSS_INVALID;
- // This is ambiguous, so check the smaller size of char * (if it is
- // a GNU extension of %as, %aS or %a[...]) and float (if it is
- // POSIX %a followed by s, S or [ letters).
- return sizeof(char *) < sizeof(float) ? sizeof(char *) : sizeof(float);
- }
-
- if (scanf_is_integer_conv(dir->convSpecifier)) {
- switch (dir->lengthModifier[0]) {
- case 'h':
- return dir->lengthModifier[1] == 'h' ? sizeof(char) : sizeof(short);
- case 'l':
- return dir->lengthModifier[1] == 'l' ? sizeof(long long) : sizeof(long);
- case 'L':
- return sizeof(long long);
- case 'j':
- return sizeof(INTMAX_T);
- case 'z':
- return sizeof(SIZE_T);
- case 't':
- return sizeof(PTRDIFF_T);
- case 0:
- return sizeof(int);
- default:
- return SSS_INVALID;
- }
- }
-
- if (scanf_is_float_conv(dir->convSpecifier)) {
- switch (dir->lengthModifier[0]) {
- case 'L':
- case 'q':
- return sizeof(long double);
- case 'l':
- return dir->lengthModifier[1] == 'l' ? sizeof(long double)
- : sizeof(double);
- case 0:
- return sizeof(float);
- default:
- return SSS_INVALID;
- }
- }
-
- if (char_is_one_of(dir->convSpecifier, "sS[")) {
- unsigned charSize = scanf_get_char_size(dir);
- if (charSize == 0)
- return SSS_INVALID;
- if (dir->fieldWidth == 0)
- return SSS_STRLEN;
- return (dir->fieldWidth + 1) * charSize;
- }
-
- if (char_is_one_of(dir->convSpecifier, "cC")) {
- unsigned charSize = scanf_get_char_size(dir);
- if (charSize == 0)
- return SSS_INVALID;
- if (dir->fieldWidth == 0)
- return charSize;
- return dir->fieldWidth * charSize;
- }
-
- if (dir->convSpecifier == 'p') {
- if (dir->lengthModifier[1] != 0)
- return SSS_INVALID;
- return sizeof(void *);
- }
-
- return SSS_INVALID;
-}
-
-// Common part of *scanf interceptors.
-// Process format string and va_list, and report all store ranges.
-// Stops when "consuming" n_inputs input items.
-static void scanf_common(void *ctx, int n_inputs, bool allowGnuMalloc,
- const char *format, va_list aq) {
- CHECK_GT(n_inputs, 0);
- const char *p = format;
-
- while (*p) {
- ScanfDirective dir;
- p = scanf_parse_next(p, allowGnuMalloc, &dir);
- if (!p)
- break;
- if (dir.convSpecifier == 0) {
- // This can only happen at the end of the format string.
- CHECK_EQ(*p, 0);
- break;
- }
- // Here the directive is valid. Do what it says.
- if (dir.argIdx != -1) {
- // Unsupported.
- break;
- }
- if (dir.suppressed)
- continue;
- int size = scanf_get_store_size(&dir);
- if (size == SSS_INVALID)
- break;
- void *argp = va_arg(aq, void *);
- if (dir.convSpecifier != 'n')
- --n_inputs;
- if (n_inputs < 0)
- break;
- if (size == SSS_STRLEN) {
- size = internal_strlen((const char *)argp) + 1;
- }
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, argp, size);
- }
-}
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_common_libcdep.cc
index 215a61deab63..d8330630b5f6 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common_libcdep.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_common_libcdep.cc
@@ -10,6 +10,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
namespace __sanitizer {
@@ -32,4 +33,10 @@ bool PrintsToTtyCached() {
}
return prints_to_tty;
}
+
+bool ColorizeReports() {
+ const char *flag = common_flags()->color;
+ return internal_strcmp(flag, "always") == 0 ||
+ (internal_strcmp(flag, "auto") == 0 && PrintsToTtyCached());
+}
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc b/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc
index 75f7d1d1e89d..6be2f51b80ac 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc
+++ b/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc
@@ -232,6 +232,7 @@ POST_SYSCALL(settimeofday)(long res, void *tv, void *tz) {
}
}
+#if !SANITIZER_ANDROID
PRE_SYSCALL(adjtimex)(void *txc_p) {}
POST_SYSCALL(adjtimex)(long res, void *txc_p) {
@@ -239,6 +240,7 @@ POST_SYSCALL(adjtimex)(long res, void *txc_p) {
if (txc_p) POST_WRITE(txc_p, struct_timex_sz);
}
}
+#endif
PRE_SYSCALL(times)(void *tbuf) {}
@@ -384,24 +386,21 @@ PRE_SYSCALL(acct)(const void *name) {
POST_SYSCALL(acct)(long res, const void *name) {}
-PRE_SYSCALL(capget)(void *header, void *dataptr) {}
+PRE_SYSCALL(capget)(void *header, void *dataptr) {
+ if (header) PRE_READ(header, __user_cap_header_struct_sz);
+}
POST_SYSCALL(capget)(long res, void *header, void *dataptr) {
- if (res >= 0) {
- if (header) POST_WRITE(header, __user_cap_header_struct_sz);
+ if (res >= 0)
if (dataptr) POST_WRITE(dataptr, __user_cap_data_struct_sz);
- }
}
PRE_SYSCALL(capset)(void *header, const void *data) {
+ if (header) PRE_READ(header, __user_cap_header_struct_sz);
if (data) PRE_READ(data, __user_cap_data_struct_sz);
}
-POST_SYSCALL(capset)(long res, void *header, const void *data) {
- if (res >= 0) {
- if (header) POST_WRITE(header, __user_cap_header_struct_sz);
- }
-}
+POST_SYSCALL(capset)(long res, void *header, const void *data) {}
PRE_SYSCALL(personality)(long personality) {}
@@ -494,6 +493,7 @@ POST_SYSCALL(clock_gettime)(long res, long which_clock, void *tp) {
}
}
+#if !SANITIZER_ANDROID
PRE_SYSCALL(clock_adjtime)(long which_clock, void *tx) {}
POST_SYSCALL(clock_adjtime)(long res, long which_clock, void *tx) {
@@ -501,6 +501,7 @@ POST_SYSCALL(clock_adjtime)(long res, long which_clock, void *tx) {
if (tx) POST_WRITE(tx, struct_timex_sz);
}
}
+#endif
PRE_SYSCALL(clock_getres)(long which_clock, void *tp) {}
@@ -918,6 +919,7 @@ POST_SYSCALL(newfstat)(long res, long fd, void *statbuf) {
}
}
+#if !SANITIZER_ANDROID
PRE_SYSCALL(ustat)(long dev, void *ubuf) {}
POST_SYSCALL(ustat)(long res, long dev, void *ubuf) {
@@ -925,6 +927,7 @@ POST_SYSCALL(ustat)(long res, long dev, void *ubuf) {
if (ubuf) POST_WRITE(ubuf, struct_ustat_sz);
}
}
+#endif // !SANITIZER_ANDROID
PRE_SYSCALL(stat64)(const void *filename, void *statbuf) {
if (filename)
@@ -1002,8 +1005,8 @@ PRE_SYSCALL(getxattr)(const void *path, const void *name, void *value,
POST_SYSCALL(getxattr)(long res, const void *path, const void *name,
void *value, long size) {
- if (res >= 0) {
- if (value) POST_WRITE(value, size);
+ if (size && res > 0) {
+ if (value) POST_WRITE(value, res);
}
}
@@ -1017,8 +1020,8 @@ PRE_SYSCALL(lgetxattr)(const void *path, const void *name, void *value,
POST_SYSCALL(lgetxattr)(long res, const void *path, const void *name,
void *value, long size) {
- if (res >= 0) {
- if (value) POST_WRITE(value, size);
+ if (size && res > 0) {
+ if (value) POST_WRITE(value, res);
}
}
@@ -1029,8 +1032,8 @@ PRE_SYSCALL(fgetxattr)(long fd, const void *name, void *value, long size) {
POST_SYSCALL(fgetxattr)(long res, long fd, const void *name, void *value,
long size) {
- if (res >= 0) {
- if (value) POST_WRITE(value, size);
+ if (size && res > 0) {
+ if (value) POST_WRITE(value, res);
}
}
@@ -1040,8 +1043,8 @@ PRE_SYSCALL(listxattr)(const void *path, void *list, long size) {
}
POST_SYSCALL(listxattr)(long res, const void *path, void *list, long size) {
- if (res >= 0) {
- if (list) POST_WRITE(list, size);
+ if (size && res > 0) {
+ if (list) POST_WRITE(list, res);
}
}
@@ -1051,16 +1054,16 @@ PRE_SYSCALL(llistxattr)(const void *path, void *list, long size) {
}
POST_SYSCALL(llistxattr)(long res, const void *path, void *list, long size) {
- if (res >= 0) {
- if (list) POST_WRITE(list, size);
+ if (size && res > 0) {
+ if (list) POST_WRITE(list, res);
}
}
PRE_SYSCALL(flistxattr)(long fd, void *list, long size) {}
POST_SYSCALL(flistxattr)(long res, long fd, void *list, long size) {
- if (res >= 0) {
- if (list) POST_WRITE(list, size);
+ if (size && res > 0) {
+ if (list) POST_WRITE(list, res);
}
}
@@ -2080,6 +2083,7 @@ POST_SYSCALL(msgrcv)(long res, long msqid, void *msgp, long msgsz, long msgtyp,
}
}
+#if !SANITIZER_ANDROID
PRE_SYSCALL(msgctl)(long msqid, long cmd, void *buf) {}
POST_SYSCALL(msgctl)(long res, long msqid, long cmd, void *buf) {
@@ -2087,6 +2091,7 @@ POST_SYSCALL(msgctl)(long res, long msqid, long cmd, void *buf) {
if (buf) POST_WRITE(buf, struct_msqid_ds_sz);
}
}
+#endif
PRE_SYSCALL(semget)(long key, long nsems, long semflg) {}
diff --git a/libsanitizer/sanitizer_common/sanitizer_coverage.cc b/libsanitizer/sanitizer_common/sanitizer_coverage.cc
index e87b76c00819..b88e9e7ed0a8 100644
--- a/libsanitizer/sanitizer_common/sanitizer_coverage.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_coverage.cc
@@ -9,15 +9,15 @@
// This file implements run-time support for a poor man's coverage tool.
//
// Compiler instrumentation:
-// For every function F the compiler injects the following code:
+// For every interesting basic block the compiler injects the following code:
// if (*Guard) {
-// __sanitizer_cov(&F);
+// __sanitizer_cov();
// *Guard = 1;
// }
-// It's fine to call __sanitizer_cov more than once for a given function.
+// It's fine to call __sanitizer_cov more than once for a given block.
//
// Run-time:
-// - __sanitizer_cov(pc): record that we've executed a given PC.
+// - __sanitizer_cov(): record that we've executed the PC (GET_CALLER_PC).
// - __sanitizer_cov_dump: dump the coverage data to disk.
// For every module of the current process that has coverage data
// this will create a file module_name.PID.sancov. The file format is simple:
@@ -35,40 +35,103 @@
#include "sanitizer_libc.h"
#include "sanitizer_mutex.h"
#include "sanitizer_procmaps.h"
+#include "sanitizer_stacktrace.h"
#include "sanitizer_flags.h"
-struct CovData {
- BlockingMutex mu;
- InternalMmapVector<uptr> v;
-};
+atomic_uint32_t dump_once_guard; // Ensure that CovDump runs only once.
+
+// pc_array is the array containing the covered PCs.
+// To make the pc_array thread- and async-signal-safe it has to be large enough.
+// 128M counters "ought to be enough for anybody" (4M on 32-bit).
+// pc_array is allocated with MmapNoReserveOrDie and so it uses only as
+// much RAM as it really needs.
+static const uptr kPcArraySize = FIRST_32_SECOND_64(1 << 22, 1 << 27);
+static uptr *pc_array;
+static atomic_uintptr_t pc_array_index;
-static uptr cov_data_placeholder[sizeof(CovData) / sizeof(uptr)];
-COMPILER_CHECK(sizeof(cov_data_placeholder) >= sizeof(CovData));
-static CovData *cov_data = reinterpret_cast<CovData*>(cov_data_placeholder);
+static bool cov_sandboxed = false;
+static int cov_fd = kInvalidFd;
+static unsigned int cov_max_block_size = 0;
namespace __sanitizer {
// Simply add the pc into the vector under lock. If the function is called more
// than once for a given PC it will be inserted multiple times, which is fine.
static void CovAdd(uptr pc) {
- BlockingMutexLock lock(&cov_data->mu);
- cov_data->v.push_back(pc);
+ if (!pc_array) return;
+ uptr idx = atomic_fetch_add(&pc_array_index, 1, memory_order_relaxed);
+ CHECK_LT(idx, kPcArraySize);
+ pc_array[idx] = pc;
+}
+
+void CovInit() {
+ pc_array = reinterpret_cast<uptr *>(
+ MmapNoReserveOrDie(sizeof(uptr) * kPcArraySize, "CovInit"));
}
static inline bool CompareLess(const uptr &a, const uptr &b) {
return a < b;
}
+// Block layout for packed file format: header, followed by module name (no
+// trailing zero), followed by data blob.
+struct CovHeader {
+ int pid;
+ unsigned int module_name_length;
+ unsigned int data_length;
+};
+
+static void CovWritePacked(int pid, const char *module, const void *blob,
+ unsigned int blob_size) {
+ CHECK_GE(cov_fd, 0);
+ unsigned module_name_length = internal_strlen(module);
+ CovHeader header = {pid, module_name_length, blob_size};
+
+ if (cov_max_block_size == 0) {
+ // Writing to a file. Just go ahead.
+ internal_write(cov_fd, &header, sizeof(header));
+ internal_write(cov_fd, module, module_name_length);
+ internal_write(cov_fd, blob, blob_size);
+ } else {
+ // Writing to a socket. We want to split the data into appropriately sized
+ // blocks.
+ InternalScopedBuffer<char> block(cov_max_block_size);
+ CHECK_EQ((uptr)block.data(), (uptr)(CovHeader *)block.data());
+ uptr header_size_with_module = sizeof(header) + module_name_length;
+ CHECK_LT(header_size_with_module, cov_max_block_size);
+ unsigned int max_payload_size =
+ cov_max_block_size - header_size_with_module;
+ char *block_pos = block.data();
+ internal_memcpy(block_pos, &header, sizeof(header));
+ block_pos += sizeof(header);
+ internal_memcpy(block_pos, module, module_name_length);
+ block_pos += module_name_length;
+ char *block_data_begin = block_pos;
+ char *blob_pos = (char *)blob;
+ while (blob_size > 0) {
+ unsigned int payload_size = Min(blob_size, max_payload_size);
+ blob_size -= payload_size;
+ internal_memcpy(block_data_begin, blob_pos, payload_size);
+ blob_pos += payload_size;
+ ((CovHeader *)block.data())->data_length = payload_size;
+ internal_write(cov_fd, block.data(),
+ header_size_with_module + payload_size);
+ }
+ }
+}
+
// Dump the coverage on disk.
-void CovDump() {
+static void CovDump() {
+ if (!common_flags()->coverage) return;
#if !SANITIZER_WINDOWS
- BlockingMutexLock lock(&cov_data->mu);
- InternalMmapVector<uptr> &v = cov_data->v;
- InternalSort(&v, v.size(), CompareLess);
- InternalMmapVector<u32> offsets(v.size());
- const uptr *vb = v.data();
- const uptr *ve = vb + v.size();
- MemoryMappingLayout proc_maps(/*cache_enabled*/false);
+ if (atomic_fetch_add(&dump_once_guard, 1, memory_order_relaxed))
+ return;
+ uptr size = atomic_load(&pc_array_index, memory_order_relaxed);
+ InternalSort(&pc_array, size, CompareLess);
+ InternalMmapVector<u32> offsets(size);
+ const uptr *vb = pc_array;
+ const uptr *ve = vb + size;
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
uptr mb, me, off, prot;
InternalScopedBuffer<char> module(4096);
InternalScopedBuffer<char> path(4096 * 2);
@@ -77,8 +140,9 @@ void CovDump() {
i++) {
if ((prot & MemoryMappingLayout::kProtectionExecute) == 0)
continue;
+ while (vb < ve && *vb < mb) vb++;
if (vb >= ve) break;
- if (mb <= *vb && *vb < me) {
+ if (*vb < me) {
offsets.clear();
const uptr *old_vb = vb;
CHECK_LE(off, *vb);
@@ -88,24 +152,63 @@ void CovDump() {
offsets.push_back(static_cast<u32>(diff));
}
char *module_name = StripModuleName(module.data());
- internal_snprintf((char *)path.data(), path.size(), "%s.%zd.sancov",
- module_name, internal_getpid());
+ if (cov_sandboxed) {
+ CovWritePacked(internal_getpid(), module_name, offsets.data(),
+ offsets.size() * sizeof(u32));
+ VReport(1, " CovDump: %zd PCs written to packed file\n", vb - old_vb);
+ } else {
+ // One file per module per process.
+ internal_snprintf((char *)path.data(), path.size(), "%s.%zd.sancov",
+ module_name, internal_getpid());
+ uptr fd = OpenFile(path.data(), true);
+ if (internal_iserror(fd)) {
+ Report(" CovDump: failed to open %s for writing\n", path.data());
+ } else {
+ internal_write(fd, offsets.data(), offsets.size() * sizeof(u32));
+ internal_close(fd);
+ VReport(1, " CovDump: %s: %zd PCs written\n", path.data(),
+ vb - old_vb);
+ }
+ }
InternalFree(module_name);
- uptr fd = OpenFile(path.data(), true);
- internal_write(fd, offsets.data(), offsets.size() * sizeof(u32));
- internal_close(fd);
- if (common_flags()->verbosity)
- Report(" CovDump: %s: %zd PCs written\n", path.data(), vb - old_vb);
}
}
+ if (cov_fd >= 0)
+ internal_close(cov_fd);
#endif // !SANITIZER_WINDOWS
}
+static void OpenPackedFileForWriting() {
+ CHECK(cov_fd == kInvalidFd);
+ InternalScopedBuffer<char> path(1024);
+ internal_snprintf((char *)path.data(), path.size(), "%zd.sancov.packed",
+ internal_getpid());
+ uptr fd = OpenFile(path.data(), true);
+ if (internal_iserror(fd)) {
+ Report(" Coverage: failed to open %s for writing\n", path.data());
+ Die();
+ }
+ cov_fd = fd;
+}
+
+void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
+ if (!args) return;
+ if (!common_flags()->coverage) return;
+ cov_sandboxed = args->coverage_sandboxed;
+ if (!cov_sandboxed) return;
+ cov_fd = args->coverage_fd;
+ cov_max_block_size = args->coverage_max_block_size;
+ if (cov_fd < 0)
+ // Pre-open the file now. The sandbox won't allow us to do it later.
+ OpenPackedFileForWriting();
+}
+
} // namespace __sanitizer
extern "C" {
-SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(void *pc) {
- CovAdd(reinterpret_cast<uptr>(pc));
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov() {
+ CovAdd(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()));
}
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() { CovDump(); }
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init() { CovInit(); }
} // extern "C"
diff --git a/libsanitizer/sanitizer_common/sanitizer_deadlock_detector.h b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector.h
new file mode 100644
index 000000000000..949c486a14ac
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector.h
@@ -0,0 +1,410 @@
+//===-- sanitizer_deadlock_detector.h ---------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer runtime.
+// The deadlock detector maintains a directed graph of lock acquisitions.
+// When a lock event happens, the detector checks if the locks already held by
+// the current thread are reachable from the newly acquired lock.
+//
+// The detector can handle only a fixed amount of simultaneously live locks
+// (a lock is alive if it has been locked at least once and has not been
+// destroyed). When the maximal number of locks is reached the entire graph
+// is flushed and the new lock epoch is started. The node ids from the old
+// epochs can not be used with any of the detector methods except for
+// nodeBelongsToCurrentEpoch().
+//
+// FIXME: this is work in progress, nothing really works yet.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_DEADLOCK_DETECTOR_H
+#define SANITIZER_DEADLOCK_DETECTOR_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_bvgraph.h"
+
+namespace __sanitizer {
+
+// Thread-local state for DeadlockDetector.
+// It contains the locks currently held by the owning thread.
+template <class BV>
+class DeadlockDetectorTLS {
+ public:
+ // No CTOR.
+ void clear() {
+ bv_.clear();
+ epoch_ = 0;
+ n_recursive_locks = 0;
+ n_all_locks_ = 0;
+ }
+
+ bool empty() const { return bv_.empty(); }
+
+ void ensureCurrentEpoch(uptr current_epoch) {
+ if (epoch_ == current_epoch) return;
+ bv_.clear();
+ epoch_ = current_epoch;
+ }
+
+ uptr getEpoch() const { return epoch_; }
+
+ // Returns true if this is the first (non-recursive) acquisition of this lock.
+ bool addLock(uptr lock_id, uptr current_epoch, u32 stk) {
+ // Printf("addLock: %zx %zx stk %u\n", lock_id, current_epoch, stk);
+ CHECK_EQ(epoch_, current_epoch);
+ if (!bv_.setBit(lock_id)) {
+ // The lock is already held by this thread, it must be recursive.
+ CHECK_LT(n_recursive_locks, ARRAY_SIZE(recursive_locks));
+ recursive_locks[n_recursive_locks++] = lock_id;
+ return false;
+ }
+ CHECK_LT(n_all_locks_, ARRAY_SIZE(all_locks_with_contexts_));
+ // lock_id < BV::kSize, can cast to a smaller int.
+ u32 lock_id_short = static_cast<u32>(lock_id);
+ LockWithContext l = {lock_id_short, stk};
+ all_locks_with_contexts_[n_all_locks_++] = l;
+ return true;
+ }
+
+ void removeLock(uptr lock_id) {
+ if (n_recursive_locks) {
+ for (sptr i = n_recursive_locks - 1; i >= 0; i--) {
+ if (recursive_locks[i] == lock_id) {
+ n_recursive_locks--;
+ Swap(recursive_locks[i], recursive_locks[n_recursive_locks]);
+ return;
+ }
+ }
+ }
+ // Printf("remLock: %zx %zx\n", lock_id, epoch_);
+ CHECK(bv_.clearBit(lock_id));
+ if (n_all_locks_) {
+ for (sptr i = n_all_locks_ - 1; i >= 0; i--) {
+ if (all_locks_with_contexts_[i].lock == static_cast<u32>(lock_id)) {
+ Swap(all_locks_with_contexts_[i],
+ all_locks_with_contexts_[n_all_locks_ - 1]);
+ n_all_locks_--;
+ break;
+ }
+ }
+ }
+ }
+
+ u32 findLockContext(uptr lock_id) {
+ for (uptr i = 0; i < n_all_locks_; i++)
+ if (all_locks_with_contexts_[i].lock == static_cast<u32>(lock_id))
+ return all_locks_with_contexts_[i].stk;
+ return 0;
+ }
+
+ const BV &getLocks(uptr current_epoch) const {
+ CHECK_EQ(epoch_, current_epoch);
+ return bv_;
+ }
+
+ uptr getNumLocks() const { return n_all_locks_; }
+ uptr getLock(uptr idx) const { return all_locks_with_contexts_[idx].lock; }
+
+ private:
+ BV bv_;
+ uptr epoch_;
+ uptr recursive_locks[64];
+ uptr n_recursive_locks;
+ struct LockWithContext {
+ u32 lock;
+ u32 stk;
+ };
+ LockWithContext all_locks_with_contexts_[64];
+ uptr n_all_locks_;
+};
+
+// DeadlockDetector.
+// For deadlock detection to work we need one global DeadlockDetector object
+// and one DeadlockDetectorTLS object per evey thread.
+// This class is not thread safe, all concurrent accesses should be guarded
+// by an external lock.
+// Most of the methods of this class are not thread-safe (i.e. should
+// be protected by an external lock) unless explicitly told otherwise.
+template <class BV>
+class DeadlockDetector {
+ public:
+ typedef BV BitVector;
+
+ uptr size() const { return g_.size(); }
+
+ // No CTOR.
+ void clear() {
+ current_epoch_ = 0;
+ available_nodes_.clear();
+ recycled_nodes_.clear();
+ g_.clear();
+ n_edges_ = 0;
+ }
+
+ // Allocate new deadlock detector node.
+ // If we are out of available nodes first try to recycle some.
+ // If there is nothing to recycle, flush the graph and increment the epoch.
+ // Associate 'data' (opaque user's object) with the new node.
+ uptr newNode(uptr data) {
+ if (!available_nodes_.empty())
+ return getAvailableNode(data);
+ if (!recycled_nodes_.empty()) {
+ // Printf("recycling: n_edges_ %zd\n", n_edges_);
+ for (sptr i = n_edges_ - 1; i >= 0; i--) {
+ if (recycled_nodes_.getBit(edges_[i].from) ||
+ recycled_nodes_.getBit(edges_[i].to)) {
+ Swap(edges_[i], edges_[n_edges_ - 1]);
+ n_edges_--;
+ }
+ }
+ CHECK(available_nodes_.empty());
+ // removeEdgesFrom was called in removeNode.
+ g_.removeEdgesTo(recycled_nodes_);
+ available_nodes_.setUnion(recycled_nodes_);
+ recycled_nodes_.clear();
+ return getAvailableNode(data);
+ }
+ // We are out of vacant nodes. Flush and increment the current_epoch_.
+ current_epoch_ += size();
+ recycled_nodes_.clear();
+ available_nodes_.setAll();
+ g_.clear();
+ return getAvailableNode(data);
+ }
+
+ // Get data associated with the node created by newNode().
+ uptr getData(uptr node) const { return data_[nodeToIndex(node)]; }
+
+ bool nodeBelongsToCurrentEpoch(uptr node) {
+ return node && (node / size() * size()) == current_epoch_;
+ }
+
+ void removeNode(uptr node) {
+ uptr idx = nodeToIndex(node);
+ CHECK(!available_nodes_.getBit(idx));
+ CHECK(recycled_nodes_.setBit(idx));
+ g_.removeEdgesFrom(idx);
+ }
+
+ void ensureCurrentEpoch(DeadlockDetectorTLS<BV> *dtls) {
+ dtls->ensureCurrentEpoch(current_epoch_);
+ }
+
+ // Returns true if there is a cycle in the graph after this lock event.
+ // Ideally should be called before the lock is acquired so that we can
+ // report a deadlock before a real deadlock happens.
+ bool onLockBefore(DeadlockDetectorTLS<BV> *dtls, uptr cur_node) {
+ ensureCurrentEpoch(dtls);
+ uptr cur_idx = nodeToIndex(cur_node);
+ return g_.isReachable(cur_idx, dtls->getLocks(current_epoch_));
+ }
+
+ u32 findLockContext(DeadlockDetectorTLS<BV> *dtls, uptr node) {
+ return dtls->findLockContext(nodeToIndex(node));
+ }
+
+ // Add cur_node to the set of locks held currently by dtls.
+ void onLockAfter(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) {
+ ensureCurrentEpoch(dtls);
+ uptr cur_idx = nodeToIndex(cur_node);
+ dtls->addLock(cur_idx, current_epoch_, stk);
+ }
+
+ // Experimental *racy* fast path function.
+ // Returns true if all edges from the currently held locks to cur_node exist.
+ bool hasAllEdges(DeadlockDetectorTLS<BV> *dtls, uptr cur_node) {
+ uptr local_epoch = dtls->getEpoch();
+ // Read from current_epoch_ is racy.
+ if (cur_node && local_epoch == current_epoch_ &&
+ local_epoch == nodeToEpoch(cur_node)) {
+ uptr cur_idx = nodeToIndexUnchecked(cur_node);
+ for (uptr i = 0, n = dtls->getNumLocks(); i < n; i++) {
+ if (!g_.hasEdge(dtls->getLock(i), cur_idx))
+ return false;
+ }
+ return true;
+ }
+ return false;
+ }
+
+ // Adds edges from currently held locks to cur_node,
+ // returns the number of added edges, and puts the sources of added edges
+ // into added_edges[].
+ // Should be called before onLockAfter.
+ uptr addEdges(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk,
+ int unique_tid) {
+ ensureCurrentEpoch(dtls);
+ uptr cur_idx = nodeToIndex(cur_node);
+ uptr added_edges[40];
+ uptr n_added_edges = g_.addEdges(dtls->getLocks(current_epoch_), cur_idx,
+ added_edges, ARRAY_SIZE(added_edges));
+ for (uptr i = 0; i < n_added_edges; i++) {
+ if (n_edges_ < ARRAY_SIZE(edges_)) {
+ Edge e = {(u16)added_edges[i], (u16)cur_idx,
+ dtls->findLockContext(added_edges[i]), stk,
+ unique_tid};
+ edges_[n_edges_++] = e;
+ }
+ // Printf("Edge%zd: %u %zd=>%zd in T%d\n",
+ // n_edges_, stk, added_edges[i], cur_idx, unique_tid);
+ }
+ return n_added_edges;
+ }
+
+ bool findEdge(uptr from_node, uptr to_node, u32 *stk_from, u32 *stk_to,
+ int *unique_tid) {
+ uptr from_idx = nodeToIndex(from_node);
+ uptr to_idx = nodeToIndex(to_node);
+ for (uptr i = 0; i < n_edges_; i++) {
+ if (edges_[i].from == from_idx && edges_[i].to == to_idx) {
+ *stk_from = edges_[i].stk_from;
+ *stk_to = edges_[i].stk_to;
+ *unique_tid = edges_[i].unique_tid;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // Test-only function. Handles the before/after lock events,
+ // returns true if there is a cycle.
+ bool onLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) {
+ ensureCurrentEpoch(dtls);
+ bool is_reachable = !isHeld(dtls, cur_node) && onLockBefore(dtls, cur_node);
+ addEdges(dtls, cur_node, stk, 0);
+ onLockAfter(dtls, cur_node, stk);
+ return is_reachable;
+ }
+
+ // Handles the try_lock event, returns false.
+ // When a try_lock event happens (i.e. a try_lock call succeeds) we need
+ // to add this lock to the currently held locks, but we should not try to
+ // change the lock graph or to detect a cycle. We may want to investigate
+ // whether a more aggressive strategy is possible for try_lock.
+ bool onTryLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) {
+ ensureCurrentEpoch(dtls);
+ uptr cur_idx = nodeToIndex(cur_node);
+ dtls->addLock(cur_idx, current_epoch_, stk);
+ return false;
+ }
+
+ // Returns true iff dtls is empty (no locks are currently held) and we can
+ // add the node to the currently held locks w/o chanding the global state.
+ // This operation is thread-safe as it only touches the dtls.
+ bool onFirstLock(DeadlockDetectorTLS<BV> *dtls, uptr node, u32 stk = 0) {
+ if (!dtls->empty()) return false;
+ if (dtls->getEpoch() && dtls->getEpoch() == nodeToEpoch(node)) {
+ dtls->addLock(nodeToIndexUnchecked(node), nodeToEpoch(node), stk);
+ return true;
+ }
+ return false;
+ }
+
+ // Finds a path between the lock 'cur_node' (currently not held in dtls)
+ // and some currently held lock, returns the length of the path
+ // or 0 on failure.
+ uptr findPathToLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, uptr *path,
+ uptr path_size) {
+ tmp_bv_.copyFrom(dtls->getLocks(current_epoch_));
+ uptr idx = nodeToIndex(cur_node);
+ CHECK(!tmp_bv_.getBit(idx));
+ uptr res = g_.findShortestPath(idx, tmp_bv_, path, path_size);
+ for (uptr i = 0; i < res; i++)
+ path[i] = indexToNode(path[i]);
+ if (res)
+ CHECK_EQ(path[0], cur_node);
+ return res;
+ }
+
+ // Handle the unlock event.
+ // This operation is thread-safe as it only touches the dtls.
+ void onUnlock(DeadlockDetectorTLS<BV> *dtls, uptr node) {
+ if (dtls->getEpoch() == nodeToEpoch(node))
+ dtls->removeLock(nodeToIndexUnchecked(node));
+ }
+
+ // Tries to handle the lock event w/o writing to global state.
+ // Returns true on success.
+ // This operation is thread-safe as it only touches the dtls
+ // (modulo racy nature of hasAllEdges).
+ bool onLockFast(DeadlockDetectorTLS<BV> *dtls, uptr node, u32 stk = 0) {
+ if (hasAllEdges(dtls, node)) {
+ dtls->addLock(nodeToIndexUnchecked(node), nodeToEpoch(node), stk);
+ return true;
+ }
+ return false;
+ }
+
+ bool isHeld(DeadlockDetectorTLS<BV> *dtls, uptr node) const {
+ return dtls->getLocks(current_epoch_).getBit(nodeToIndex(node));
+ }
+
+ uptr testOnlyGetEpoch() const { return current_epoch_; }
+ bool testOnlyHasEdge(uptr l1, uptr l2) {
+ return g_.hasEdge(nodeToIndex(l1), nodeToIndex(l2));
+ }
+ // idx1 and idx2 are raw indices to g_, not lock IDs.
+ bool testOnlyHasEdgeRaw(uptr idx1, uptr idx2) {
+ return g_.hasEdge(idx1, idx2);
+ }
+
+ void Print() {
+ for (uptr from = 0; from < size(); from++)
+ for (uptr to = 0; to < size(); to++)
+ if (g_.hasEdge(from, to))
+ Printf(" %zx => %zx\n", from, to);
+ }
+
+ private:
+ void check_idx(uptr idx) const { CHECK_LT(idx, size()); }
+
+ void check_node(uptr node) const {
+ CHECK_GE(node, size());
+ CHECK_EQ(current_epoch_, nodeToEpoch(node));
+ }
+
+ uptr indexToNode(uptr idx) const {
+ check_idx(idx);
+ return idx + current_epoch_;
+ }
+
+ uptr nodeToIndexUnchecked(uptr node) const { return node % size(); }
+
+ uptr nodeToIndex(uptr node) const {
+ check_node(node);
+ return nodeToIndexUnchecked(node);
+ }
+
+ uptr nodeToEpoch(uptr node) const { return node / size() * size(); }
+
+ uptr getAvailableNode(uptr data) {
+ uptr idx = available_nodes_.getAndClearFirstOne();
+ data_[idx] = data;
+ return indexToNode(idx);
+ }
+
+ struct Edge {
+ u16 from;
+ u16 to;
+ u32 stk_from;
+ u32 stk_to;
+ int unique_tid;
+ };
+
+ uptr current_epoch_;
+ BV available_nodes_;
+ BV recycled_nodes_;
+ BV tmp_bv_;
+ BVGraph<BV> g_;
+ uptr data_[BV::kSize];
+ Edge edges_[BV::kSize * 32];
+ uptr n_edges_;
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_DEADLOCK_DETECTOR_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_deadlock_detector1.cc b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector1.cc
new file mode 100644
index 000000000000..fc6f5dcfbb4e
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector1.cc
@@ -0,0 +1,187 @@
+//===-- sanitizer_deadlock_detector1.cc -----------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Deadlock detector implementation based on NxN adjacency bit matrix.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_deadlock_detector_interface.h"
+#include "sanitizer_deadlock_detector.h"
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_mutex.h"
+
+#if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
+
+namespace __sanitizer {
+
+typedef TwoLevelBitVector<> DDBV; // DeadlockDetector's bit vector.
+
+struct DDPhysicalThread {
+};
+
+struct DDLogicalThread {
+ u64 ctx;
+ DeadlockDetectorTLS<DDBV> dd;
+ DDReport rep;
+ bool report_pending;
+};
+
+struct DD : public DDetector {
+ SpinMutex mtx;
+ DeadlockDetector<DDBV> dd;
+ DDFlags flags;
+
+ explicit DD(const DDFlags *flags);
+
+ DDPhysicalThread* CreatePhysicalThread();
+ void DestroyPhysicalThread(DDPhysicalThread *pt);
+
+ DDLogicalThread* CreateLogicalThread(u64 ctx);
+ void DestroyLogicalThread(DDLogicalThread *lt);
+
+ void MutexInit(DDCallback *cb, DDMutex *m);
+ void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock);
+ void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock, bool trylock);
+ void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock);
+ void MutexDestroy(DDCallback *cb, DDMutex *m);
+
+ DDReport *GetReport(DDCallback *cb);
+
+ void MutexEnsureID(DDLogicalThread *lt, DDMutex *m);
+ void ReportDeadlock(DDCallback *cb, DDMutex *m);
+};
+
+DDetector *DDetector::Create(const DDFlags *flags) {
+ (void)flags;
+ void *mem = MmapOrDie(sizeof(DD), "deadlock detector");
+ return new(mem) DD(flags);
+}
+
+DD::DD(const DDFlags *flags)
+ : flags(*flags) {
+ dd.clear();
+}
+
+DDPhysicalThread* DD::CreatePhysicalThread() {
+ return 0;
+}
+
+void DD::DestroyPhysicalThread(DDPhysicalThread *pt) {
+}
+
+DDLogicalThread* DD::CreateLogicalThread(u64 ctx) {
+ DDLogicalThread *lt = (DDLogicalThread*)InternalAlloc(sizeof(*lt));
+ lt->ctx = ctx;
+ lt->dd.clear();
+ lt->report_pending = false;
+ return lt;
+}
+
+void DD::DestroyLogicalThread(DDLogicalThread *lt) {
+ lt->~DDLogicalThread();
+ InternalFree(lt);
+}
+
+void DD::MutexInit(DDCallback *cb, DDMutex *m) {
+ m->id = 0;
+ m->stk = cb->Unwind();
+}
+
+void DD::MutexEnsureID(DDLogicalThread *lt, DDMutex *m) {
+ if (!dd.nodeBelongsToCurrentEpoch(m->id))
+ m->id = dd.newNode(reinterpret_cast<uptr>(m));
+ dd.ensureCurrentEpoch(&lt->dd);
+}
+
+void DD::MutexBeforeLock(DDCallback *cb,
+ DDMutex *m, bool wlock) {
+ DDLogicalThread *lt = cb->lt;
+ if (lt->dd.empty()) return; // This will be the first lock held by lt.
+ if (dd.hasAllEdges(&lt->dd, m->id)) return; // We already have all edges.
+ SpinMutexLock lk(&mtx);
+ MutexEnsureID(lt, m);
+ if (dd.isHeld(&lt->dd, m->id))
+ return; // FIXME: allow this only for recursive locks.
+ if (dd.onLockBefore(&lt->dd, m->id)) {
+ // Actually add this edge now so that we have all the stack traces.
+ dd.addEdges(&lt->dd, m->id, cb->Unwind(), cb->UniqueTid());
+ ReportDeadlock(cb, m);
+ }
+}
+
+void DD::ReportDeadlock(DDCallback *cb, DDMutex *m) {
+ DDLogicalThread *lt = cb->lt;
+ uptr path[10];
+ uptr len = dd.findPathToLock(&lt->dd, m->id, path, ARRAY_SIZE(path));
+ CHECK_GT(len, 0U); // Hm.. cycle of 10 locks? I'd like to see that.
+ CHECK_EQ(m->id, path[0]);
+ lt->report_pending = true;
+ DDReport *rep = &lt->rep;
+ rep->n = len;
+ for (uptr i = 0; i < len; i++) {
+ uptr from = path[i];
+ uptr to = path[(i + 1) % len];
+ DDMutex *m0 = (DDMutex*)dd.getData(from);
+ DDMutex *m1 = (DDMutex*)dd.getData(to);
+
+ u32 stk_from = -1U, stk_to = -1U;
+ int unique_tid = 0;
+ dd.findEdge(from, to, &stk_from, &stk_to, &unique_tid);
+ // Printf("Edge: %zd=>%zd: %u/%u T%d\n", from, to, stk_from, stk_to,
+ // unique_tid);
+ rep->loop[i].thr_ctx = unique_tid;
+ rep->loop[i].mtx_ctx0 = m0->ctx;
+ rep->loop[i].mtx_ctx1 = m1->ctx;
+ rep->loop[i].stk[0] = stk_to;
+ rep->loop[i].stk[1] = stk_from;
+ }
+}
+
+void DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock, bool trylock) {
+ DDLogicalThread *lt = cb->lt;
+ u32 stk = 0;
+ if (flags.second_deadlock_stack)
+ stk = cb->Unwind();
+ // Printf("T%p MutexLock: %zx stk %u\n", lt, m->id, stk);
+ if (dd.onFirstLock(&lt->dd, m->id, stk))
+ return;
+ if (dd.onLockFast(&lt->dd, m->id, stk))
+ return;
+
+ SpinMutexLock lk(&mtx);
+ MutexEnsureID(lt, m);
+ if (wlock) // Only a recursive rlock may be held.
+ CHECK(!dd.isHeld(&lt->dd, m->id));
+ if (!trylock)
+ dd.addEdges(&lt->dd, m->id, stk ? stk : cb->Unwind(), cb->UniqueTid());
+ dd.onLockAfter(&lt->dd, m->id, stk);
+}
+
+void DD::MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {
+ // Printf("T%p MutexUnLock: %zx\n", cb->lt, m->id);
+ dd.onUnlock(&cb->lt->dd, m->id);
+}
+
+void DD::MutexDestroy(DDCallback *cb,
+ DDMutex *m) {
+ if (!m->id) return;
+ SpinMutexLock lk(&mtx);
+ if (dd.nodeBelongsToCurrentEpoch(m->id))
+ dd.removeNode(m->id);
+ m->id = 0;
+}
+
+DDReport *DD::GetReport(DDCallback *cb) {
+ if (!cb->lt->report_pending)
+ return 0;
+ cb->lt->report_pending = false;
+ return &cb->lt->rep;
+}
+
+} // namespace __sanitizer
+#endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
diff --git a/libsanitizer/sanitizer_common/sanitizer_deadlock_detector2.cc b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector2.cc
new file mode 100644
index 000000000000..f4d46d99736d
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector2.cc
@@ -0,0 +1,427 @@
+//===-- sanitizer_deadlock_detector2.cc -----------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Deadlock detector implementation based on adjacency lists.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_deadlock_detector_interface.h"
+#include "sanitizer_common.h"
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_mutex.h"
+
+#if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
+
+namespace __sanitizer {
+
+const int kMaxNesting = 64;
+const u32 kNoId = -1;
+const u32 kEndId = -2;
+const int kMaxLink = 8;
+const int kL1Size = 1024;
+const int kL2Size = 1024;
+const int kMaxMutex = kL1Size * kL2Size;
+
+struct Id {
+ u32 id;
+ u32 seq;
+
+ explicit Id(u32 id = 0, u32 seq = 0)
+ : id(id)
+ , seq(seq) {
+ }
+};
+
+struct Link {
+ u32 id;
+ u32 seq;
+ u32 tid;
+ u32 stk0;
+ u32 stk1;
+
+ explicit Link(u32 id = 0, u32 seq = 0, u32 tid = 0, u32 s0 = 0, u32 s1 = 0)
+ : id(id)
+ , seq(seq)
+ , tid(tid)
+ , stk0(s0)
+ , stk1(s1) {
+ }
+};
+
+struct DDPhysicalThread {
+ DDReport rep;
+ bool report_pending;
+ bool visited[kMaxMutex];
+ Link pending[kMaxMutex];
+ Link path[kMaxMutex];
+};
+
+struct ThreadMutex {
+ u32 id;
+ u32 stk;
+};
+
+struct DDLogicalThread {
+ u64 ctx;
+ ThreadMutex locked[kMaxNesting];
+ int nlocked;
+};
+
+struct Mutex {
+ StaticSpinMutex mtx;
+ u32 seq;
+ int nlink;
+ Link link[kMaxLink];
+};
+
+struct DD : public DDetector {
+ explicit DD(const DDFlags *flags);
+
+ DDPhysicalThread* CreatePhysicalThread();
+ void DestroyPhysicalThread(DDPhysicalThread *pt);
+
+ DDLogicalThread* CreateLogicalThread(u64 ctx);
+ void DestroyLogicalThread(DDLogicalThread *lt);
+
+ void MutexInit(DDCallback *cb, DDMutex *m);
+ void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock);
+ void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
+ bool trylock);
+ void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock);
+ void MutexDestroy(DDCallback *cb, DDMutex *m);
+
+ DDReport *GetReport(DDCallback *cb);
+
+ void CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt, DDMutex *mtx);
+ void Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath);
+ u32 allocateId(DDCallback *cb);
+ Mutex *getMutex(u32 id);
+ u32 getMutexId(Mutex *m);
+
+ DDFlags flags;
+
+ Mutex* mutex[kL1Size];
+
+ SpinMutex mtx;
+ InternalMmapVector<u32> free_id;
+ int id_gen;
+};
+
+DDetector *DDetector::Create(const DDFlags *flags) {
+ (void)flags;
+ void *mem = MmapOrDie(sizeof(DD), "deadlock detector");
+ return new(mem) DD(flags);
+}
+
+DD::DD(const DDFlags *flags)
+ : flags(*flags)
+ , free_id(1024) {
+ id_gen = 0;
+}
+
+DDPhysicalThread* DD::CreatePhysicalThread() {
+ DDPhysicalThread *pt = (DDPhysicalThread*)MmapOrDie(sizeof(DDPhysicalThread),
+ "deadlock detector (physical thread)");
+ return pt;
+}
+
+void DD::DestroyPhysicalThread(DDPhysicalThread *pt) {
+ pt->~DDPhysicalThread();
+ UnmapOrDie(pt, sizeof(DDPhysicalThread));
+}
+
+DDLogicalThread* DD::CreateLogicalThread(u64 ctx) {
+ DDLogicalThread *lt = (DDLogicalThread*)InternalAlloc(
+ sizeof(DDLogicalThread));
+ lt->ctx = ctx;
+ lt->nlocked = 0;
+ return lt;
+}
+
+void DD::DestroyLogicalThread(DDLogicalThread *lt) {
+ lt->~DDLogicalThread();
+ InternalFree(lt);
+}
+
+void DD::MutexInit(DDCallback *cb, DDMutex *m) {
+ VPrintf(2, "#%llu: DD::MutexInit(%p)\n", cb->lt->ctx, m);
+ m->id = kNoId;
+ m->recursion = 0;
+ atomic_store(&m->owner, 0, memory_order_relaxed);
+}
+
+Mutex *DD::getMutex(u32 id) {
+ return &mutex[id / kL2Size][id % kL2Size];
+}
+
+u32 DD::getMutexId(Mutex *m) {
+ for (int i = 0; i < kL1Size; i++) {
+ Mutex *tab = mutex[i];
+ if (tab == 0)
+ break;
+ if (m >= tab && m < tab + kL2Size)
+ return i * kL2Size + (m - tab);
+ }
+ return -1;
+}
+
+u32 DD::allocateId(DDCallback *cb) {
+ u32 id = -1;
+ SpinMutexLock l(&mtx);
+ if (free_id.size() > 0) {
+ id = free_id.back();
+ free_id.pop_back();
+ } else {
+ CHECK_LT(id_gen, kMaxMutex);
+ if ((id_gen % kL2Size) == 0) {
+ mutex[id_gen / kL2Size] = (Mutex*)MmapOrDie(kL2Size * sizeof(Mutex),
+ "deadlock detector (mutex table)");
+ }
+ id = id_gen++;
+ }
+ CHECK_LE(id, kMaxMutex);
+ VPrintf(3, "#%llu: DD::allocateId assign id %d\n",
+ cb->lt->ctx, id);
+ return id;
+}
+
+void DD::MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) {
+ VPrintf(2, "#%llu: DD::MutexBeforeLock(%p, wlock=%d) nlocked=%d\n",
+ cb->lt->ctx, m, wlock, cb->lt->nlocked);
+ DDPhysicalThread *pt = cb->pt;
+ DDLogicalThread *lt = cb->lt;
+
+ uptr owner = atomic_load(&m->owner, memory_order_relaxed);
+ if (owner == (uptr)cb->lt) {
+ VPrintf(3, "#%llu: DD::MutexBeforeLock recursive\n",
+ cb->lt->ctx);
+ return;
+ }
+
+ CHECK_LE(lt->nlocked, kMaxNesting);
+
+ // FIXME(dvyukov): don't allocate id if lt->nlocked == 0?
+ if (m->id == kNoId)
+ m->id = allocateId(cb);
+
+ ThreadMutex *tm = &lt->locked[lt->nlocked++];
+ tm->id = m->id;
+ if (flags.second_deadlock_stack)
+ tm->stk = cb->Unwind();
+ if (lt->nlocked == 1) {
+ VPrintf(3, "#%llu: DD::MutexBeforeLock first mutex\n",
+ cb->lt->ctx);
+ return;
+ }
+
+ bool added = false;
+ Mutex *mtx = getMutex(m->id);
+ for (int i = 0; i < lt->nlocked - 1; i++) {
+ u32 id1 = lt->locked[i].id;
+ u32 stk1 = lt->locked[i].stk;
+ Mutex *mtx1 = getMutex(id1);
+ SpinMutexLock l(&mtx1->mtx);
+ if (mtx1->nlink == kMaxLink) {
+ // FIXME(dvyukov): check stale links
+ continue;
+ }
+ int li = 0;
+ for (; li < mtx1->nlink; li++) {
+ Link *link = &mtx1->link[li];
+ if (link->id == m->id) {
+ if (link->seq != mtx->seq) {
+ link->seq = mtx->seq;
+ link->tid = lt->ctx;
+ link->stk0 = stk1;
+ link->stk1 = cb->Unwind();
+ added = true;
+ VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
+ cb->lt->ctx, getMutexId(mtx1), m->id);
+ }
+ break;
+ }
+ }
+ if (li == mtx1->nlink) {
+ // FIXME(dvyukov): check stale links
+ Link *link = &mtx1->link[mtx1->nlink++];
+ link->id = m->id;
+ link->seq = mtx->seq;
+ link->tid = lt->ctx;
+ link->stk0 = stk1;
+ link->stk1 = cb->Unwind();
+ added = true;
+ VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
+ cb->lt->ctx, getMutexId(mtx1), m->id);
+ }
+ }
+
+ if (!added || mtx->nlink == 0) {
+ VPrintf(3, "#%llu: DD::MutexBeforeLock don't check\n",
+ cb->lt->ctx);
+ return;
+ }
+
+ CycleCheck(pt, lt, m);
+}
+
+void DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
+ bool trylock) {
+ VPrintf(2, "#%llu: DD::MutexAfterLock(%p, wlock=%d, try=%d) nlocked=%d\n",
+ cb->lt->ctx, m, wlock, trylock, cb->lt->nlocked);
+ DDLogicalThread *lt = cb->lt;
+
+ uptr owner = atomic_load(&m->owner, memory_order_relaxed);
+ if (owner == (uptr)cb->lt) {
+ VPrintf(3, "#%llu: DD::MutexAfterLock recursive\n", cb->lt->ctx);
+ CHECK(wlock);
+ m->recursion++;
+ return;
+ }
+ CHECK_EQ(owner, 0);
+ if (wlock) {
+ VPrintf(3, "#%llu: DD::MutexAfterLock set owner\n", cb->lt->ctx);
+ CHECK_EQ(m->recursion, 0);
+ m->recursion = 1;
+ atomic_store(&m->owner, (uptr)cb->lt, memory_order_relaxed);
+ }
+
+ if (!trylock)
+ return;
+
+ CHECK_LE(lt->nlocked, kMaxNesting);
+ if (m->id == kNoId)
+ m->id = allocateId(cb);
+ ThreadMutex *tm = &lt->locked[lt->nlocked++];
+ tm->id = m->id;
+ if (flags.second_deadlock_stack)
+ tm->stk = cb->Unwind();
+}
+
+void DD::MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {
+ VPrintf(2, "#%llu: DD::MutexBeforeUnlock(%p, wlock=%d) nlocked=%d\n",
+ cb->lt->ctx, m, wlock, cb->lt->nlocked);
+ DDLogicalThread *lt = cb->lt;
+
+ uptr owner = atomic_load(&m->owner, memory_order_relaxed);
+ if (owner == (uptr)cb->lt) {
+ VPrintf(3, "#%llu: DD::MutexBeforeUnlock recursive\n", cb->lt->ctx);
+ if (--m->recursion > 0)
+ return;
+ VPrintf(3, "#%llu: DD::MutexBeforeUnlock reset owner\n", cb->lt->ctx);
+ atomic_store(&m->owner, 0, memory_order_relaxed);
+ }
+ CHECK_NE(m->id, kNoId);
+ int last = lt->nlocked - 1;
+ for (int i = last; i >= 0; i--) {
+ if (cb->lt->locked[i].id == m->id) {
+ lt->locked[i] = lt->locked[last];
+ lt->nlocked--;
+ break;
+ }
+ }
+}
+
+void DD::MutexDestroy(DDCallback *cb, DDMutex *m) {
+ VPrintf(2, "#%llu: DD::MutexDestroy(%p)\n",
+ cb->lt->ctx, m);
+ DDLogicalThread *lt = cb->lt;
+
+ if (m->id == kNoId)
+ return;
+
+ // Remove the mutex from lt->locked if there.
+ int last = lt->nlocked - 1;
+ for (int i = last; i >= 0; i--) {
+ if (lt->locked[i].id == m->id) {
+ lt->locked[i] = lt->locked[last];
+ lt->nlocked--;
+ break;
+ }
+ }
+
+ // Clear and invalidate the mutex descriptor.
+ {
+ Mutex *mtx = getMutex(m->id);
+ SpinMutexLock l(&mtx->mtx);
+ mtx->seq++;
+ mtx->nlink = 0;
+ }
+
+ // Return id to cache.
+ {
+ SpinMutexLock l(&mtx);
+ free_id.push_back(m->id);
+ }
+}
+
+void DD::CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt,
+ DDMutex *m) {
+ internal_memset(pt->visited, 0, sizeof(pt->visited));
+ int npath = 0;
+ int npending = 0;
+ {
+ Mutex *mtx = getMutex(m->id);
+ SpinMutexLock l(&mtx->mtx);
+ for (int li = 0; li < mtx->nlink; li++)
+ pt->pending[npending++] = mtx->link[li];
+ }
+ while (npending > 0) {
+ Link link = pt->pending[--npending];
+ if (link.id == kEndId) {
+ npath--;
+ continue;
+ }
+ if (pt->visited[link.id])
+ continue;
+ Mutex *mtx1 = getMutex(link.id);
+ SpinMutexLock l(&mtx1->mtx);
+ if (mtx1->seq != link.seq)
+ continue;
+ pt->visited[link.id] = true;
+ if (mtx1->nlink == 0)
+ continue;
+ pt->path[npath++] = link;
+ pt->pending[npending++] = Link(kEndId);
+ if (link.id == m->id)
+ return Report(pt, lt, npath); // Bingo!
+ for (int li = 0; li < mtx1->nlink; li++) {
+ Link *link1 = &mtx1->link[li];
+ // Mutex *mtx2 = getMutex(link->id);
+ // FIXME(dvyukov): fast seq check
+ // FIXME(dvyukov): fast nlink != 0 check
+ // FIXME(dvyukov): fast pending check?
+ // FIXME(dvyukov): npending can be larger than kMaxMutex
+ pt->pending[npending++] = *link1;
+ }
+ }
+}
+
+void DD::Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath) {
+ DDReport *rep = &pt->rep;
+ rep->n = npath;
+ for (int i = 0; i < npath; i++) {
+ Link *link = &pt->path[i];
+ Link *link0 = &pt->path[i ? i - 1 : npath - 1];
+ rep->loop[i].thr_ctx = link->tid;
+ rep->loop[i].mtx_ctx0 = link0->id;
+ rep->loop[i].mtx_ctx1 = link->id;
+ rep->loop[i].stk[0] = flags.second_deadlock_stack ? link->stk0 : 0;
+ rep->loop[i].stk[1] = link->stk1;
+ }
+ pt->report_pending = true;
+}
+
+DDReport *DD::GetReport(DDCallback *cb) {
+ if (!cb->pt->report_pending)
+ return 0;
+ cb->pt->report_pending = false;
+ return &cb->pt->rep;
+}
+
+} // namespace __sanitizer
+#endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
diff --git a/libsanitizer/sanitizer_common/sanitizer_deadlock_detector_interface.h b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector_interface.h
new file mode 100644
index 000000000000..59d8d93fe133
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector_interface.h
@@ -0,0 +1,91 @@
+//===-- sanitizer_deadlock_detector_interface.h -----------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer runtime.
+// Abstract deadlock detector interface.
+// FIXME: this is work in progress, nothing really works yet.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_DEADLOCK_DETECTOR_INTERFACE_H
+#define SANITIZER_DEADLOCK_DETECTOR_INTERFACE_H
+
+#ifndef SANITIZER_DEADLOCK_DETECTOR_VERSION
+# define SANITIZER_DEADLOCK_DETECTOR_VERSION 1
+#endif
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_atomic.h"
+
+namespace __sanitizer {
+
+// dd - deadlock detector.
+// lt - logical (user) thread.
+// pt - physical (OS) thread.
+
+struct DDPhysicalThread;
+struct DDLogicalThread;
+
+struct DDMutex {
+#if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
+ uptr id;
+ u32 stk; // creation stack
+#elif SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
+ u32 id;
+ u32 recursion;
+ atomic_uintptr_t owner;
+#else
+# error "BAD SANITIZER_DEADLOCK_DETECTOR_VERSION"
+#endif
+ u64 ctx;
+};
+
+struct DDFlags {
+ bool second_deadlock_stack;
+};
+
+struct DDReport {
+ enum { kMaxLoopSize = 8 };
+ int n; // number of entries in loop
+ struct {
+ u64 thr_ctx; // user thread context
+ u64 mtx_ctx0; // user mutex context, start of the edge
+ u64 mtx_ctx1; // user mutex context, end of the edge
+ u32 stk[2]; // stack ids for the edge
+ } loop[kMaxLoopSize];
+};
+
+struct DDCallback {
+ DDPhysicalThread *pt;
+ DDLogicalThread *lt;
+
+ virtual u32 Unwind() { return 0; }
+ virtual int UniqueTid() { return 0; }
+};
+
+struct DDetector {
+ static DDetector *Create(const DDFlags *flags);
+
+ virtual DDPhysicalThread* CreatePhysicalThread() { return 0; }
+ virtual void DestroyPhysicalThread(DDPhysicalThread *pt) {}
+
+ virtual DDLogicalThread* CreateLogicalThread(u64 ctx) { return 0; }
+ virtual void DestroyLogicalThread(DDLogicalThread *lt) {}
+
+ virtual void MutexInit(DDCallback *cb, DDMutex *m) {}
+ virtual void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) {}
+ virtual void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
+ bool trylock) {}
+ virtual void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {}
+ virtual void MutexDestroy(DDCallback *cb, DDMutex *m) {}
+
+ virtual DDReport *GetReport(DDCallback *cb) { return 0; }
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_DEADLOCK_DETECTOR_INTERFACE_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_flags.cc b/libsanitizer/sanitizer_common/sanitizer_flags.cc
index 90bb57dff980..e90d79e43602 100644
--- a/libsanitizer/sanitizer_common/sanitizer_flags.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_flags.cc
@@ -13,12 +13,24 @@
#include "sanitizer_common.h"
#include "sanitizer_libc.h"
+#include "sanitizer_list.h"
namespace __sanitizer {
+CommonFlags common_flags_dont_use;
+
+struct FlagDescription {
+ const char *name;
+ const char *description;
+ FlagDescription *next;
+};
+
+IntrusiveList<FlagDescription> flag_descriptions;
+
void SetCommonFlagsDefaults(CommonFlags *f) {
f->symbolize = true;
f->external_symbolizer_path = 0;
+ f->allow_addr2line = false;
f->strip_path_prefix = "";
f->fast_unwind_on_fatal = false;
f->fast_unwind_on_malloc = true;
@@ -26,26 +38,96 @@ void SetCommonFlagsDefaults(CommonFlags *f) {
f->malloc_context_size = 1;
f->log_path = "stderr";
f->verbosity = 0;
- f->detect_leaks = false;
+ f->detect_leaks = true;
f->leak_check_at_exit = true;
f->allocator_may_return_null = false;
f->print_summary = true;
+ f->check_printf = true;
+ // TODO(glider): tools may want to set different defaults for handle_segv.
+ f->handle_segv = SANITIZER_NEEDS_SEGV;
+ f->allow_user_segv_handler = false;
+ f->use_sigaltstack = true;
+ f->detect_deadlocks = false;
+ f->clear_shadow_mmap_threshold = 64 * 1024;
+ f->color = "auto";
+ f->legacy_pthread_cond = false;
+ f->intercept_tls_get_addr = false;
+ f->coverage = false;
+ f->full_address_space = false;
}
void ParseCommonFlagsFromString(CommonFlags *f, const char *str) {
- ParseFlag(str, &f->symbolize, "symbolize");
- ParseFlag(str, &f->external_symbolizer_path, "external_symbolizer_path");
- ParseFlag(str, &f->strip_path_prefix, "strip_path_prefix");
- ParseFlag(str, &f->fast_unwind_on_fatal, "fast_unwind_on_fatal");
- ParseFlag(str, &f->fast_unwind_on_malloc, "fast_unwind_on_malloc");
- ParseFlag(str, &f->handle_ioctl, "handle_ioctl");
- ParseFlag(str, &f->malloc_context_size, "malloc_context_size");
- ParseFlag(str, &f->log_path, "log_path");
- ParseFlag(str, &f->verbosity, "verbosity");
- ParseFlag(str, &f->detect_leaks, "detect_leaks");
- ParseFlag(str, &f->leak_check_at_exit, "leak_check_at_exit");
- ParseFlag(str, &f->allocator_may_return_null, "allocator_may_return_null");
- ParseFlag(str, &f->print_summary, "print_summary");
+ ParseFlag(str, &f->symbolize, "symbolize",
+ "If set, use the online symbolizer from common sanitizer runtime to turn "
+ "virtual addresses to file/line locations.");
+ ParseFlag(str, &f->external_symbolizer_path, "external_symbolizer_path",
+ "Path to external symbolizer. If empty, the tool will search $PATH for "
+ "the symbolizer.");
+ ParseFlag(str, &f->allow_addr2line, "allow_addr2line",
+ "If set, allows online symbolizer to run addr2line binary to symbolize "
+ "stack traces (addr2line will only be used if llvm-symbolizer binary is "
+ "unavailable.");
+ ParseFlag(str, &f->strip_path_prefix, "strip_path_prefix",
+ "Strips this prefix from file paths in error reports.");
+ ParseFlag(str, &f->fast_unwind_on_fatal, "fast_unwind_on_fatal",
+ "If available, use the fast frame-pointer-based unwinder on fatal "
+ "errors.");
+ ParseFlag(str, &f->fast_unwind_on_malloc, "fast_unwind_on_malloc",
+ "If available, use the fast frame-pointer-based unwinder on "
+ "malloc/free.");
+ ParseFlag(str, &f->handle_ioctl, "handle_ioctl",
+ "Intercept and handle ioctl requests.");
+ ParseFlag(str, &f->malloc_context_size, "malloc_context_size",
+ "Max number of stack frames kept for each allocation/deallocation.");
+ ParseFlag(str, &f->log_path, "log_path",
+ "Write logs to \"log_path.pid\". The special values are \"stdout\" and "
+ "\"stderr\". The default is \"stderr\".");
+ ParseFlag(str, &f->verbosity, "verbosity",
+ "Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).");
+ ParseFlag(str, &f->detect_leaks, "detect_leaks",
+ "Enable memory leak detection.");
+ ParseFlag(str, &f->leak_check_at_exit, "leak_check_at_exit",
+ "Invoke leak checking in an atexit handler. Has no effect if "
+ "detect_leaks=false, or if __lsan_do_leak_check() is called before the "
+ "handler has a chance to run.");
+ ParseFlag(str, &f->allocator_may_return_null, "allocator_may_return_null",
+ "If false, the allocator will crash instead of returning 0 on "
+ "out-of-memory.");
+ ParseFlag(str, &f->print_summary, "print_summary",
+ "If false, disable printing error summaries in addition to error "
+ "reports.");
+ ParseFlag(str, &f->check_printf, "check_printf",
+ "Check printf arguments.");
+ ParseFlag(str, &f->handle_segv, "handle_segv",
+ "If set, registers the tool's custom SEGV handler (both SIGBUS and "
+ "SIGSEGV on OSX).");
+ ParseFlag(str, &f->allow_user_segv_handler, "allow_user_segv_handler",
+ "If set, allows user to register a SEGV handler even if the tool "
+ "registers one.");
+ ParseFlag(str, &f->use_sigaltstack, "use_sigaltstack",
+ "If set, uses alternate stack for signal handling.");
+ ParseFlag(str, &f->detect_deadlocks, "detect_deadlocks",
+ "If set, deadlock detection is enabled.");
+ ParseFlag(str, &f->clear_shadow_mmap_threshold,
+ "clear_shadow_mmap_threshold",
+ "Large shadow regions are zero-filled using mmap(NORESERVE) instead of "
+ "memset(). This is the threshold size in bytes.");
+ ParseFlag(str, &f->color, "color",
+ "Colorize reports: (always|never|auto).");
+ ParseFlag(str, &f->legacy_pthread_cond, "legacy_pthread_cond",
+ "Enables support for dynamic libraries linked with libpthread 2.2.5.");
+ ParseFlag(str, &f->intercept_tls_get_addr, "intercept_tls_get_addr",
+ "Intercept __tls_get_addr.");
+ ParseFlag(str, &f->help, "help", "Print the flag descriptions.");
+ ParseFlag(str, &f->mmap_limit_mb, "mmap_limit_mb",
+ "Limit the amount of mmap-ed memory (excluding shadow) in Mb; "
+ "not a user-facing flag, used mosly for testing the tools");
+ ParseFlag(str, &f->coverage, "coverage",
+ "If set, coverage information will be dumped at program shutdown (if the "
+ "coverage instrumentation was enabled at compile time).");
+ ParseFlag(str, &f->full_address_space, "full_address_space",
+ "Sanitize complete address space; "
+ "by default kernel area on 32-bit platforms will not be sanitized");
// Do a sanity check for certain flags.
if (f->malloc_context_size < 1)
@@ -100,9 +182,40 @@ static bool StartsWith(const char *flag, int flag_length, const char *value) {
(0 == internal_strncmp(flag, value, value_length));
}
-void ParseFlag(const char *env, bool *flag, const char *name) {
+static LowLevelAllocator allocator_for_flags;
+
+// The linear scan is suboptimal, but the number of flags is relatively small.
+bool FlagInDescriptionList(const char *name) {
+ IntrusiveList<FlagDescription>::Iterator it(&flag_descriptions);
+ while (it.hasNext()) {
+ if (!internal_strcmp(it.next()->name, name)) return true;
+ }
+ return false;
+}
+
+void AddFlagDescription(const char *name, const char *description) {
+ if (FlagInDescriptionList(name)) return;
+ FlagDescription *new_description = new(allocator_for_flags) FlagDescription;
+ new_description->name = name;
+ new_description->description = description;
+ flag_descriptions.push_back(new_description);
+}
+
+// TODO(glider): put the descriptions inside CommonFlags.
+void PrintFlagDescriptions() {
+ IntrusiveList<FlagDescription>::Iterator it(&flag_descriptions);
+ Printf("Available flags for %s:\n", SanitizerToolName);
+ while (it.hasNext()) {
+ FlagDescription *descr = it.next();
+ Printf("\t%s\n\t\t- %s\n", descr->name, descr->description);
+ }
+}
+
+void ParseFlag(const char *env, bool *flag,
+ const char *name, const char *descr) {
const char *value;
int value_length;
+ AddFlagDescription(name, descr);
if (!GetFlagValue(env, name, &value, &value_length))
return;
if (StartsWith(value, value_length, "0") ||
@@ -115,19 +228,31 @@ void ParseFlag(const char *env, bool *flag, const char *name) {
*flag = true;
}
-void ParseFlag(const char *env, int *flag, const char *name) {
+void ParseFlag(const char *env, int *flag,
+ const char *name, const char *descr) {
const char *value;
int value_length;
+ AddFlagDescription(name, descr);
if (!GetFlagValue(env, name, &value, &value_length))
return;
*flag = static_cast<int>(internal_atoll(value));
}
-static LowLevelAllocator allocator_for_flags;
+void ParseFlag(const char *env, uptr *flag,
+ const char *name, const char *descr) {
+ const char *value;
+ int value_length;
+ AddFlagDescription(name, descr);
+ if (!GetFlagValue(env, name, &value, &value_length))
+ return;
+ *flag = static_cast<uptr>(internal_atoll(value));
+}
-void ParseFlag(const char *env, const char **flag, const char *name) {
+void ParseFlag(const char *env, const char **flag,
+ const char *name, const char *descr) {
const char *value;
int value_length;
+ AddFlagDescription(name, descr);
if (!GetFlagValue(env, name, &value, &value_length))
return;
// Copy the flag value. Don't use locks here, as flags are parsed at
diff --git a/libsanitizer/sanitizer_common/sanitizer_flags.h b/libsanitizer/sanitizer_common/sanitizer_flags.h
index 46ec09281936..107b6776b3a4 100644
--- a/libsanitizer/sanitizer_common/sanitizer_flags.h
+++ b/libsanitizer/sanitizer_common/sanitizer_flags.h
@@ -16,51 +16,53 @@
namespace __sanitizer {
-void ParseFlag(const char *env, bool *flag, const char *name);
-void ParseFlag(const char *env, int *flag, const char *name);
-void ParseFlag(const char *env, const char **flag, const char *name);
+void ParseFlag(const char *env, bool *flag,
+ const char *name, const char *descr);
+void ParseFlag(const char *env, int *flag,
+ const char *name, const char *descr);
+void ParseFlag(const char *env, uptr *flag,
+ const char *name, const char *descr);
+void ParseFlag(const char *env, const char **flag,
+ const char *name, const char *descr);
struct CommonFlags {
- // If set, use the online symbolizer from common sanitizer runtime.
bool symbolize;
- // Path to external symbolizer. If it is NULL, symbolizer will be looked for
- // in PATH. If it is empty, external symbolizer will not be started.
const char *external_symbolizer_path;
- // Strips this prefix from file paths in error reports.
+ bool allow_addr2line;
const char *strip_path_prefix;
- // Use fast (frame-pointer-based) unwinder on fatal errors (if available).
bool fast_unwind_on_fatal;
- // Use fast (frame-pointer-based) unwinder on malloc/free (if available).
bool fast_unwind_on_malloc;
- // Intercept and handle ioctl requests.
bool handle_ioctl;
- // Max number of stack frames kept for each allocation/deallocation.
int malloc_context_size;
- // Write logs to "log_path.pid".
- // The special values are "stdout" and "stderr".
- // The default is "stderr".
const char *log_path;
- // Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).
int verbosity;
- // Enable memory leak detection.
bool detect_leaks;
- // Invoke leak checking in an atexit handler. Has no effect if
- // detect_leaks=false, or if __lsan_do_leak_check() is called before the
- // handler has a chance to run.
bool leak_check_at_exit;
- // If false, the allocator will crash instead of returning 0 on out-of-memory.
bool allocator_may_return_null;
- // If false, disable printing error summaries in addition to error reports.
bool print_summary;
+ bool check_printf;
+ bool handle_segv;
+ bool allow_user_segv_handler;
+ bool use_sigaltstack;
+ bool detect_deadlocks;
+ uptr clear_shadow_mmap_threshold;
+ const char *color;
+ bool legacy_pthread_cond;
+ bool intercept_tls_get_addr;
+ bool help;
+ uptr mmap_limit_mb;
+ bool coverage;
+ bool full_address_space;
};
inline CommonFlags *common_flags() {
- static CommonFlags f;
- return &f;
+ extern CommonFlags common_flags_dont_use;
+ return &common_flags_dont_use;
}
void SetCommonFlagsDefaults(CommonFlags *f);
void ParseCommonFlagsFromString(CommonFlags *f, const char *str);
+void PrintFlagDescriptions();
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_interception.h b/libsanitizer/sanitizer_common/sanitizer_interception.h
new file mode 100644
index 000000000000..2346fa859b10
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_interception.h
@@ -0,0 +1,23 @@
+//===-- sanitizer_interception.h --------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Common macro definitions for interceptors.
+// Always use this headers instead of interception/interception.h.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_INTERCEPTION_H
+#define SANITIZER_INTERCEPTION_H
+
+#include "interception/interception.h"
+#include "sanitizer_common.h"
+
+#if SANITIZER_LINUX && !defined(SANITIZER_GO)
+#undef REAL
+#define REAL(x) IndirectExternCall(__interception::PTR_TO_REAL(x))
+#endif
+
+#endif // SANITIZER_INTERCEPTION_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_internal_defs.h b/libsanitizer/sanitizer_common/sanitizer_internal_defs.h
index 0dab7c2f6c7d..a925d306b618 100644
--- a/libsanitizer/sanitizer_common/sanitizer_internal_defs.h
+++ b/libsanitizer/sanitizer_common/sanitizer_internal_defs.h
@@ -32,10 +32,9 @@
# define SANITIZER_SUPPORTS_WEAK_HOOKS 0
#endif
-#if __LP64__ || defined(_WIN64)
-# define SANITIZER_WORDSIZE 64
-#else
-# define SANITIZER_WORDSIZE 32
+// If set, the tool will install its own SEGV signal handler.
+#ifndef SANITIZER_NEEDS_SEGV
+# define SANITIZER_NEEDS_SEGV 1
#endif
// GCC does not understand __has_feature
@@ -57,7 +56,7 @@ typedef unsigned long uptr; // NOLINT
typedef signed long sptr; // NOLINT
#endif // defined(_WIN64)
#if defined(__x86_64__)
-// Since x32 uses ILP32 data model in 64-bit hardware mode, we must use
+// Since x32 uses ILP32 data model in 64-bit hardware mode, we must use
// 64-bit pointer to unwind stack frame.
typedef unsigned long long uhwptr; // NOLINT
#else
@@ -97,11 +96,15 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_set_report_path(const char *path);
- // Notify the tools that the sandbox is going to be turned on. The reserved
- // parameter will be used in the future to hold a structure with functions
- // that the tools may call to bypass the sandbox.
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_sandbox_on_notify(void *reserved);
+ typedef struct {
+ int coverage_sandboxed;
+ __sanitizer::sptr coverage_fd;
+ unsigned int coverage_max_block_size;
+ } __sanitizer_sandbox_arguments;
+
+ // Notify the tools that the sandbox is going to be turned on.
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+ __sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
// This function is called by the tool when it has just finished reporting
// an error. 'error_summary' is a one-line string that summarizes
@@ -110,12 +113,16 @@ extern "C" {
void __sanitizer_report_error_summary(const char *error_summary);
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump();
- SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(void *pc);
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init();
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov();
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_annotate_contiguous_container(const void *beg,
const void *end,
const void *old_mid,
const void *new_mid);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
+ const void *end);
} // extern "C"
@@ -141,8 +148,6 @@ using namespace __sanitizer; // NOLINT
# define NOTHROW
# define LIKELY(x) (x)
# define UNLIKELY(x) (x)
-# define UNUSED
-# define USED
# define PREFETCH(x) /* _mm_prefetch(x, _MM_HINT_NTA) */
#else // _MSC_VER
# define ALWAYS_INLINE inline __attribute__((always_inline))
@@ -157,8 +162,6 @@ using namespace __sanitizer; // NOLINT
# define NOTHROW throw()
# define LIKELY(x) __builtin_expect(!!(x), 1)
# define UNLIKELY(x) __builtin_expect(!!(x), 0)
-# define UNUSED __attribute__((unused))
-# define USED __attribute__((used))
# if defined(__i386__) || defined(__x86_64__)
// __builtin_prefetch(x) generates prefetchnt0 on x86
# define PREFETCH(x) __asm__("prefetchnta (%0)" : : "r" (x))
@@ -167,6 +170,14 @@ using namespace __sanitizer; // NOLINT
# endif
#endif // _MSC_VER
+#if !defined(_MSC_VER) || defined(__clang__)
+# define UNUSED __attribute__((unused))
+# define USED __attribute__((used))
+#else
+# define UNUSED
+# define USED
+#endif
+
// Unaligned versions of basic types.
typedef ALIGNED(1) u16 uu16;
typedef ALIGNED(1) u32 uu32;
@@ -197,7 +208,7 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
// Check macro
#define RAW_CHECK_MSG(expr, msg) do { \
- if (!(expr)) { \
+ if (UNLIKELY(!(expr))) { \
RawWrite(msg); \
Die(); \
} \
@@ -209,7 +220,7 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
do { \
__sanitizer::u64 v1 = (u64)(c1); \
__sanitizer::u64 v2 = (u64)(c2); \
- if (!(v1 op v2)) \
+ if (UNLIKELY(!(v1 op v2))) \
__sanitizer::CheckFailed(__FILE__, __LINE__, \
"(" #c1 ") " #op " (" #c2 ")", v1, v2); \
} while (false) \
diff --git a/libsanitizer/sanitizer_common/sanitizer_libc.cc b/libsanitizer/sanitizer_common/sanitizer_libc.cc
index 53c87555092b..c13a66d88cde 100644
--- a/libsanitizer/sanitizer_common/sanitizer_libc.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_libc.cc
@@ -16,7 +16,7 @@ namespace __sanitizer {
// Make the compiler think that something is going on there.
static inline void break_optimization(void *arg) {
-#if SANITIZER_WINDOWS
+#if _MSC_VER
// FIXME: make sure this is actually enough.
__asm;
#else
diff --git a/libsanitizer/sanitizer_common/sanitizer_libc.h b/libsanitizer/sanitizer_common/sanitizer_libc.h
index ae23bc45e9dd..fd03b791c58e 100644
--- a/libsanitizer/sanitizer_common/sanitizer_libc.h
+++ b/libsanitizer/sanitizer_common/sanitizer_libc.h
@@ -89,12 +89,16 @@ uptr internal_waitpid(int pid, int *status, int options);
uptr internal_getpid();
uptr internal_getppid();
+int internal_fork();
+
// Threading
uptr internal_sched_yield();
// Error handling
bool internal_iserror(uptr retval, int *rverrno = 0);
+int internal_sigaction(int signum, const void *act, void *oldact);
+
} // namespace __sanitizer
#endif // SANITIZER_LIBC_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_libignore.cc b/libsanitizer/sanitizer_common/sanitizer_libignore.cc
index 310e811df1da..c88550dec8d8 100644
--- a/libsanitizer/sanitizer_common/sanitizer_libignore.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_libignore.cc
@@ -74,9 +74,10 @@ void LibIgnore::OnLibraryLoaded(const char *name) {
loaded = true;
if (lib->loaded)
continue;
- if (common_flags()->verbosity)
- Report("Matched called_from_lib suppression '%s' against library"
- " '%s'\n", lib->templ, module.data());
+ VReport(1,
+ "Matched called_from_lib suppression '%s' against library"
+ " '%s'\n",
+ lib->templ, module.data());
lib->loaded = true;
lib->name = internal_strdup(module.data());
const uptr idx = atomic_load(&loaded_count_, memory_order_relaxed);
diff --git a/libsanitizer/sanitizer_common/sanitizer_linux.cc b/libsanitizer/sanitizer_common/sanitizer_linux.cc
index 69c9c1063f00..faa85acd696e 100644
--- a/libsanitizer/sanitizer_common/sanitizer_linux.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_linux.cc
@@ -11,9 +11,10 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
-#if SANITIZER_LINUX
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_linux.h"
@@ -23,7 +24,10 @@
#include "sanitizer_stacktrace.h"
#include "sanitizer_symbolizer.h"
+#if !SANITIZER_FREEBSD
#include <asm/param.h>
+#endif
+
#include <dlfcn.h>
#include <errno.h>
#include <fcntl.h>
@@ -42,10 +46,25 @@
#include <unistd.h>
#include <unwind.h>
+#if SANITIZER_FREEBSD
+#include <machine/atomic.h>
+extern "C" {
+// <sys/umtx.h> must be included after <errno.h> and <sys/types.h> on
+// FreeBSD 9.2 and 10.0.
+#include <sys/umtx.h>
+}
+#endif // SANITIZER_FREEBSD
+
#if !SANITIZER_ANDROID
#include <sys/signal.h>
#endif
+#if SANITIZER_ANDROID
+#include <android/log.h>
+#include <sys/system_properties.h>
+#endif
+
+#if SANITIZER_LINUX
// <linux/time.h>
struct kernel_timeval {
long tv_sec;
@@ -55,11 +74,12 @@ struct kernel_timeval {
// <linux/futex.h> is broken on some linux distributions.
const int FUTEX_WAIT = 0;
const int FUTEX_WAKE = 1;
+#endif // SANITIZER_LINUX
-// Are we using 32-bit or 64-bit syscalls?
+// Are we using 32-bit or 64-bit Linux syscalls?
// x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32
// but it still needs to use 64-bit syscalls.
-#if defined(__x86_64__) || SANITIZER_WORDSIZE == 64
+#if SANITIZER_LINUX && (defined(__x86_64__) || SANITIZER_WORDSIZE == 64)
# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 1
#else
# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 0
@@ -67,7 +87,7 @@ const int FUTEX_WAKE = 1;
namespace __sanitizer {
-#ifdef __x86_64__
+#if SANITIZER_LINUX && defined(__x86_64__)
#include "sanitizer_syscall_linux_x86_64.inc"
#else
#include "sanitizer_syscall_generic.inc"
@@ -76,28 +96,38 @@ namespace __sanitizer {
// --------------- sanitizer_libc.h
uptr internal_mmap(void *addr, uptr length, int prot, int flags,
int fd, u64 offset) {
-#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
- return internal_syscall(__NR_mmap, (uptr)addr, length, prot, flags, fd,
+#if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS
+ return internal_syscall(SYSCALL(mmap), (uptr)addr, length, prot, flags, fd,
offset);
#else
- return internal_syscall(__NR_mmap2, addr, length, prot, flags, fd, offset);
+ return internal_syscall(SYSCALL(mmap2), addr, length, prot, flags, fd,
+ offset);
#endif
}
uptr internal_munmap(void *addr, uptr length) {
- return internal_syscall(__NR_munmap, (uptr)addr, length);
+ return internal_syscall(SYSCALL(munmap), (uptr)addr, length);
}
uptr internal_close(fd_t fd) {
- return internal_syscall(__NR_close, fd);
+ return internal_syscall(SYSCALL(close), fd);
}
uptr internal_open(const char *filename, int flags) {
- return internal_syscall(__NR_open, (uptr)filename, flags);
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(openat), AT_FDCWD, (uptr)filename, flags);
+#else
+ return internal_syscall(SYSCALL(open), (uptr)filename, flags);
+#endif
}
uptr internal_open(const char *filename, int flags, u32 mode) {
- return internal_syscall(__NR_open, (uptr)filename, flags, mode);
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(openat), AT_FDCWD, (uptr)filename, flags,
+ mode);
+#else
+ return internal_syscall(SYSCALL(open), (uptr)filename, flags, mode);
+#endif
}
uptr OpenFile(const char *filename, bool write) {
@@ -107,17 +137,19 @@ uptr OpenFile(const char *filename, bool write) {
uptr internal_read(fd_t fd, void *buf, uptr count) {
sptr res;
- HANDLE_EINTR(res, (sptr)internal_syscall(__NR_read, fd, (uptr)buf, count));
+ HANDLE_EINTR(res, (sptr)internal_syscall(SYSCALL(read), fd, (uptr)buf,
+ count));
return res;
}
uptr internal_write(fd_t fd, const void *buf, uptr count) {
sptr res;
- HANDLE_EINTR(res, (sptr)internal_syscall(__NR_write, fd, (uptr)buf, count));
+ HANDLE_EINTR(res, (sptr)internal_syscall(SYSCALL(write), fd, (uptr)buf,
+ count));
return res;
}
-#if !SANITIZER_LINUX_USES_64BIT_SYSCALLS
+#if !SANITIZER_LINUX_USES_64BIT_SYSCALLS && !SANITIZER_FREEBSD
static void stat64_to_stat(struct stat64 *in, struct stat *out) {
internal_memset(out, 0, sizeof(*out));
out->st_dev = in->st_dev;
@@ -138,33 +170,43 @@ static void stat64_to_stat(struct stat64 *in, struct stat *out) {
#endif
uptr internal_stat(const char *path, void *buf) {
-#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
- return internal_syscall(__NR_stat, (uptr)path, (uptr)buf);
+#if SANITIZER_FREEBSD
+ return internal_syscall(SYSCALL(stat), path, buf);
+#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path,
+ (uptr)buf, 0);
+#elif SANITIZER_LINUX_USES_64BIT_SYSCALLS
+ return internal_syscall(SYSCALL(stat), (uptr)path, (uptr)buf);
#else
struct stat64 buf64;
- int res = internal_syscall(__NR_stat64, path, &buf64);
+ int res = internal_syscall(SYSCALL(stat64), path, &buf64);
stat64_to_stat(&buf64, (struct stat *)buf);
return res;
#endif
}
uptr internal_lstat(const char *path, void *buf) {
-#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
- return internal_syscall(__NR_lstat, (uptr)path, (uptr)buf);
+#if SANITIZER_FREEBSD
+ return internal_syscall(SYSCALL(lstat), path, buf);
+#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path,
+ (uptr)buf, AT_SYMLINK_NOFOLLOW);
+#elif SANITIZER_LINUX_USES_64BIT_SYSCALLS
+ return internal_syscall(SYSCALL(lstat), (uptr)path, (uptr)buf);
#else
struct stat64 buf64;
- int res = internal_syscall(__NR_lstat64, path, &buf64);
+ int res = internal_syscall(SYSCALL(lstat64), path, &buf64);
stat64_to_stat(&buf64, (struct stat *)buf);
return res;
#endif
}
uptr internal_fstat(fd_t fd, void *buf) {
-#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
- return internal_syscall(__NR_fstat, fd, (uptr)buf);
+#if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS
+ return internal_syscall(SYSCALL(fstat), fd, (uptr)buf);
#else
struct stat64 buf64;
- int res = internal_syscall(__NR_fstat64, fd, &buf64);
+ int res = internal_syscall(SYSCALL(fstat64), fd, &buf64);
stat64_to_stat(&buf64, (struct stat *)buf);
return res;
#endif
@@ -178,48 +220,80 @@ uptr internal_filesize(fd_t fd) {
}
uptr internal_dup2(int oldfd, int newfd) {
- return internal_syscall(__NR_dup2, oldfd, newfd);
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(dup3), oldfd, newfd, 0);
+#else
+ return internal_syscall(SYSCALL(dup2), oldfd, newfd);
+#endif
}
uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
- return internal_syscall(__NR_readlink, (uptr)path, (uptr)buf, bufsize);
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(readlinkat), AT_FDCWD,
+ (uptr)path, (uptr)buf, bufsize);
+#else
+ return internal_syscall(SYSCALL(readlink), (uptr)path, (uptr)buf, bufsize);
+#endif
}
uptr internal_unlink(const char *path) {
- return internal_syscall(__NR_unlink, (uptr)path);
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(unlinkat), AT_FDCWD, (uptr)path, 0);
+#else
+ return internal_syscall(SYSCALL(unlink), (uptr)path);
+#endif
}
uptr internal_sched_yield() {
- return internal_syscall(__NR_sched_yield);
+ return internal_syscall(SYSCALL(sched_yield));
}
void internal__exit(int exitcode) {
- internal_syscall(__NR_exit_group, exitcode);
+#if SANITIZER_FREEBSD
+ internal_syscall(SYSCALL(exit), exitcode);
+#else
+ internal_syscall(SYSCALL(exit_group), exitcode);
+#endif
Die(); // Unreachable.
}
uptr internal_execve(const char *filename, char *const argv[],
char *const envp[]) {
- return internal_syscall(__NR_execve, (uptr)filename, (uptr)argv, (uptr)envp);
+ return internal_syscall(SYSCALL(execve), (uptr)filename, (uptr)argv,
+ (uptr)envp);
}
// ----------------- sanitizer_common.h
bool FileExists(const char *filename) {
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ struct stat st;
+ if (internal_syscall(SYSCALL(newfstatat), AT_FDCWD, filename, &st, 0))
+ return false;
+#else
struct stat st;
if (internal_stat(filename, &st))
return false;
// Sanity check: filename is a regular file.
return S_ISREG(st.st_mode);
+#endif
}
uptr GetTid() {
- return internal_syscall(__NR_gettid);
+#if SANITIZER_FREEBSD
+ return (uptr)pthread_self();
+#else
+ return internal_syscall(SYSCALL(gettid));
+#endif
}
u64 NanoTime() {
+#if SANITIZER_FREEBSD
+ timeval tv;
+#else
kernel_timeval tv;
+#endif
internal_memset(&tv, 0, sizeof(tv));
- internal_syscall(__NR_gettimeofday, (uptr)&tv, 0);
+ internal_syscall(SYSCALL(gettimeofday), (uptr)&tv, 0);
return (u64)tv.tv_sec * 1000*1000*1000 + tv.tv_usec * 1000;
}
@@ -303,7 +377,18 @@ void ReExec() {
Die();
}
-void PrepareForSandboxing() {
+// Stub implementation of GetThreadStackAndTls for Go.
+#if SANITIZER_GO
+void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
+ uptr *tls_addr, uptr *tls_size) {
+ *stk_addr = 0;
+ *stk_size = 0;
+ *tls_addr = 0;
+ *tls_size = 0;
+}
+#endif // SANITIZER_GO
+
+void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
// Some kinds of sandboxes may forbid filesystem access, so we won't be able
// to read the file mappings from /proc/self/maps. Luckily, neither the
// process will be able to load additional libraries, so it's fine to use the
@@ -313,216 +398,10 @@ void PrepareForSandboxing() {
#if !SANITIZER_GO
if (Symbolizer *sym = Symbolizer::GetOrNull())
sym->PrepareForSandboxing();
+ CovPrepareForSandboxing(args);
#endif
}
-// ----------------- sanitizer_procmaps.h
-// Linker initialized.
-ProcSelfMapsBuff MemoryMappingLayout::cached_proc_self_maps_;
-StaticSpinMutex MemoryMappingLayout::cache_lock_; // Linker initialized.
-
-MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
- proc_self_maps_.len =
- ReadFileToBuffer("/proc/self/maps", &proc_self_maps_.data,
- &proc_self_maps_.mmaped_size, 1 << 26);
- if (cache_enabled) {
- if (proc_self_maps_.mmaped_size == 0) {
- LoadFromCache();
- CHECK_GT(proc_self_maps_.len, 0);
- }
- } else {
- CHECK_GT(proc_self_maps_.mmaped_size, 0);
- }
- Reset();
- // FIXME: in the future we may want to cache the mappings on demand only.
- if (cache_enabled)
- CacheMemoryMappings();
-}
-
-MemoryMappingLayout::~MemoryMappingLayout() {
- // Only unmap the buffer if it is different from the cached one. Otherwise
- // it will be unmapped when the cache is refreshed.
- if (proc_self_maps_.data != cached_proc_self_maps_.data) {
- UnmapOrDie(proc_self_maps_.data, proc_self_maps_.mmaped_size);
- }
-}
-
-void MemoryMappingLayout::Reset() {
- current_ = proc_self_maps_.data;
-}
-
-// static
-void MemoryMappingLayout::CacheMemoryMappings() {
- SpinMutexLock l(&cache_lock_);
- // Don't invalidate the cache if the mappings are unavailable.
- ProcSelfMapsBuff old_proc_self_maps;
- old_proc_self_maps = cached_proc_self_maps_;
- cached_proc_self_maps_.len =
- ReadFileToBuffer("/proc/self/maps", &cached_proc_self_maps_.data,
- &cached_proc_self_maps_.mmaped_size, 1 << 26);
- if (cached_proc_self_maps_.mmaped_size == 0) {
- cached_proc_self_maps_ = old_proc_self_maps;
- } else {
- if (old_proc_self_maps.mmaped_size) {
- UnmapOrDie(old_proc_self_maps.data,
- old_proc_self_maps.mmaped_size);
- }
- }
-}
-
-void MemoryMappingLayout::LoadFromCache() {
- SpinMutexLock l(&cache_lock_);
- if (cached_proc_self_maps_.data) {
- proc_self_maps_ = cached_proc_self_maps_;
- }
-}
-
-// Parse a hex value in str and update str.
-static uptr ParseHex(char **str) {
- uptr x = 0;
- char *s;
- for (s = *str; ; s++) {
- char c = *s;
- uptr v = 0;
- if (c >= '0' && c <= '9')
- v = c - '0';
- else if (c >= 'a' && c <= 'f')
- v = c - 'a' + 10;
- else if (c >= 'A' && c <= 'F')
- v = c - 'A' + 10;
- else
- break;
- x = x * 16 + v;
- }
- *str = s;
- return x;
-}
-
-static bool IsOneOf(char c, char c1, char c2) {
- return c == c1 || c == c2;
-}
-
-static bool IsDecimal(char c) {
- return c >= '0' && c <= '9';
-}
-
-static bool IsHex(char c) {
- return (c >= '0' && c <= '9')
- || (c >= 'a' && c <= 'f');
-}
-
-static uptr ReadHex(const char *p) {
- uptr v = 0;
- for (; IsHex(p[0]); p++) {
- if (p[0] >= '0' && p[0] <= '9')
- v = v * 16 + p[0] - '0';
- else
- v = v * 16 + p[0] - 'a' + 10;
- }
- return v;
-}
-
-static uptr ReadDecimal(const char *p) {
- uptr v = 0;
- for (; IsDecimal(p[0]); p++)
- v = v * 10 + p[0] - '0';
- return v;
-}
-
-
-bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
- char filename[], uptr filename_size,
- uptr *protection) {
- char *last = proc_self_maps_.data + proc_self_maps_.len;
- if (current_ >= last) return false;
- uptr dummy;
- if (!start) start = &dummy;
- if (!end) end = &dummy;
- if (!offset) offset = &dummy;
- char *next_line = (char*)internal_memchr(current_, '\n', last - current_);
- if (next_line == 0)
- next_line = last;
- // Example: 08048000-08056000 r-xp 00000000 03:0c 64593 /foo/bar
- *start = ParseHex(&current_);
- CHECK_EQ(*current_++, '-');
- *end = ParseHex(&current_);
- CHECK_EQ(*current_++, ' ');
- uptr local_protection = 0;
- CHECK(IsOneOf(*current_, '-', 'r'));
- if (*current_++ == 'r')
- local_protection |= kProtectionRead;
- CHECK(IsOneOf(*current_, '-', 'w'));
- if (*current_++ == 'w')
- local_protection |= kProtectionWrite;
- CHECK(IsOneOf(*current_, '-', 'x'));
- if (*current_++ == 'x')
- local_protection |= kProtectionExecute;
- CHECK(IsOneOf(*current_, 's', 'p'));
- if (*current_++ == 's')
- local_protection |= kProtectionShared;
- if (protection) {
- *protection = local_protection;
- }
- CHECK_EQ(*current_++, ' ');
- *offset = ParseHex(&current_);
- CHECK_EQ(*current_++, ' ');
- ParseHex(&current_);
- CHECK_EQ(*current_++, ':');
- ParseHex(&current_);
- CHECK_EQ(*current_++, ' ');
- while (IsDecimal(*current_))
- current_++;
- // Qemu may lack the trailing space.
- // http://code.google.com/p/address-sanitizer/issues/detail?id=160
- // CHECK_EQ(*current_++, ' ');
- // Skip spaces.
- while (current_ < next_line && *current_ == ' ')
- current_++;
- // Fill in the filename.
- uptr i = 0;
- while (current_ < next_line) {
- if (filename && i < filename_size - 1)
- filename[i++] = *current_;
- current_++;
- }
- if (filename && i < filename_size)
- filename[i] = 0;
- current_ = next_line + 1;
- return true;
-}
-
-// Gets the object name and the offset by walking MemoryMappingLayout.
-bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset,
- char filename[],
- uptr filename_size,
- uptr *protection) {
- return IterateForObjectNameAndOffset(addr, offset, filename, filename_size,
- protection);
-}
-
-void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {
- char *smaps = 0;
- uptr smaps_cap = 0;
- uptr smaps_len = ReadFileToBuffer("/proc/self/smaps",
- &smaps, &smaps_cap, 64<<20);
- uptr start = 0;
- bool file = false;
- const char *pos = smaps;
- while (pos < smaps + smaps_len) {
- if (IsHex(pos[0])) {
- start = ReadHex(pos);
- for (; *pos != '/' && *pos > '\n'; pos++) {}
- file = *pos == '/';
- } else if (internal_strncmp(pos, "Rss:", 4) == 0) {
- for (; *pos < '0' || *pos > '9'; pos++) {}
- uptr rss = ReadDecimal(pos) * 1024;
- cb(start, rss, file, stats, stats_size);
- }
- while (*pos++ != '\n') {}
- }
- UnmapOrDie(smaps, smaps_cap);
-}
-
enum MutexState {
MtxUnlocked = 0,
MtxLocked = 1,
@@ -541,16 +420,26 @@ void BlockingMutex::Lock() {
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
return;
- while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked)
- internal_syscall(__NR_futex, (uptr)m, FUTEX_WAIT, MtxSleeping, 0, 0, 0);
+ while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
+#if SANITIZER_FREEBSD
+ _umtx_op(m, UMTX_OP_WAIT_UINT, MtxSleeping, 0, 0);
+#else
+ internal_syscall(SYSCALL(futex), (uptr)m, FUTEX_WAIT, MtxSleeping, 0, 0, 0);
+#endif
+ }
}
void BlockingMutex::Unlock() {
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
u32 v = atomic_exchange(m, MtxUnlocked, memory_order_relaxed);
CHECK_NE(v, MtxUnlocked);
- if (v == MtxSleeping)
- internal_syscall(__NR_futex, (uptr)m, FUTEX_WAKE, 1, 0, 0, 0);
+ if (v == MtxSleeping) {
+#if SANITIZER_FREEBSD
+ _umtx_op(m, UMTX_OP_WAKE, 1, 0, 0);
+#else
+ internal_syscall(SYSCALL(futex), (uptr)m, FUTEX_WAKE, 1, 0, 0, 0);
+#endif
+ }
}
void BlockingMutex::CheckLocked() {
@@ -563,71 +452,133 @@ void BlockingMutex::CheckLocked() {
// Note that getdents64 uses a different structure format. We only provide the
// 32-bit syscall here.
struct linux_dirent {
+#if SANITIZER_X32
+ u64 d_ino;
+ u64 d_off;
+#else
unsigned long d_ino;
unsigned long d_off;
+#endif
unsigned short d_reclen;
char d_name[256];
};
// Syscall wrappers.
uptr internal_ptrace(int request, int pid, void *addr, void *data) {
- return internal_syscall(__NR_ptrace, request, pid, (uptr)addr, (uptr)data);
+ return internal_syscall(SYSCALL(ptrace), request, pid, (uptr)addr,
+ (uptr)data);
}
uptr internal_waitpid(int pid, int *status, int options) {
- return internal_syscall(__NR_wait4, pid, (uptr)status, options,
+ return internal_syscall(SYSCALL(wait4), pid, (uptr)status, options,
0 /* rusage */);
}
uptr internal_getpid() {
- return internal_syscall(__NR_getpid);
+ return internal_syscall(SYSCALL(getpid));
}
uptr internal_getppid() {
- return internal_syscall(__NR_getppid);
+ return internal_syscall(SYSCALL(getppid));
}
uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count) {
- return internal_syscall(__NR_getdents, fd, (uptr)dirp, count);
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(getdents64), fd, (uptr)dirp, count);
+#else
+ return internal_syscall(SYSCALL(getdents), fd, (uptr)dirp, count);
+#endif
}
uptr internal_lseek(fd_t fd, OFF_T offset, int whence) {
- return internal_syscall(__NR_lseek, fd, offset, whence);
+ return internal_syscall(SYSCALL(lseek), fd, offset, whence);
}
+#if SANITIZER_LINUX
uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5) {
- return internal_syscall(__NR_prctl, option, arg2, arg3, arg4, arg5);
+ return internal_syscall(SYSCALL(prctl), option, arg2, arg3, arg4, arg5);
}
+#endif
uptr internal_sigaltstack(const struct sigaltstack *ss,
struct sigaltstack *oss) {
- return internal_syscall(__NR_sigaltstack, (uptr)ss, (uptr)oss);
+ return internal_syscall(SYSCALL(sigaltstack), (uptr)ss, (uptr)oss);
}
-uptr internal_sigaction(int signum, const __sanitizer_kernel_sigaction_t *act,
- __sanitizer_kernel_sigaction_t *oldact) {
- return internal_syscall(__NR_rt_sigaction, signum, act, oldact,
- sizeof(__sanitizer_kernel_sigset_t));
+int internal_fork() {
+ return internal_syscall(SYSCALL(fork));
}
-uptr internal_sigprocmask(int how, __sanitizer_kernel_sigset_t *set,
- __sanitizer_kernel_sigset_t *oldset) {
- return internal_syscall(__NR_rt_sigprocmask, (uptr)how, &set->sig[0],
- &oldset->sig[0], sizeof(__sanitizer_kernel_sigset_t));
+#if SANITIZER_LINUX
+// Doesn't set sa_restorer, use with caution (see below).
+int internal_sigaction_norestorer(int signum, const void *act, void *oldact) {
+ __sanitizer_kernel_sigaction_t k_act, k_oldact;
+ internal_memset(&k_act, 0, sizeof(__sanitizer_kernel_sigaction_t));
+ internal_memset(&k_oldact, 0, sizeof(__sanitizer_kernel_sigaction_t));
+ const __sanitizer_sigaction *u_act = (__sanitizer_sigaction *)act;
+ __sanitizer_sigaction *u_oldact = (__sanitizer_sigaction *)oldact;
+ if (u_act) {
+ k_act.handler = u_act->handler;
+ k_act.sigaction = u_act->sigaction;
+ internal_memcpy(&k_act.sa_mask, &u_act->sa_mask,
+ sizeof(__sanitizer_kernel_sigset_t));
+ k_act.sa_flags = u_act->sa_flags;
+ // FIXME: most often sa_restorer is unset, however the kernel requires it
+ // to point to a valid signal restorer that calls the rt_sigreturn syscall.
+ // If sa_restorer passed to the kernel is NULL, the program may crash upon
+ // signal delivery or fail to unwind the stack in the signal handler.
+ // libc implementation of sigaction() passes its own restorer to
+ // rt_sigaction, so we need to do the same (we'll need to reimplement the
+ // restorers; for x86_64 the restorer address can be obtained from
+ // oldact->sa_restorer upon a call to sigaction(xxx, NULL, oldact).
+ k_act.sa_restorer = u_act->sa_restorer;
+ }
+
+ uptr result = internal_syscall(SYSCALL(rt_sigaction), (uptr)signum,
+ (uptr)(u_act ? &k_act : NULL),
+ (uptr)(u_oldact ? &k_oldact : NULL),
+ (uptr)sizeof(__sanitizer_kernel_sigset_t));
+
+ if ((result == 0) && u_oldact) {
+ u_oldact->handler = k_oldact.handler;
+ u_oldact->sigaction = k_oldact.sigaction;
+ internal_memcpy(&u_oldact->sa_mask, &k_oldact.sa_mask,
+ sizeof(__sanitizer_kernel_sigset_t));
+ u_oldact->sa_flags = k_oldact.sa_flags;
+ u_oldact->sa_restorer = k_oldact.sa_restorer;
+ }
+ return result;
}
+#endif // SANITIZER_LINUX
-void internal_sigfillset(__sanitizer_kernel_sigset_t *set) {
+uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
+ __sanitizer_sigset_t *oldset) {
+#if SANITIZER_FREEBSD
+ return internal_syscall(SYSCALL(sigprocmask), how, set, oldset);
+#else
+ __sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set;
+ __sanitizer_kernel_sigset_t *k_oldset = (__sanitizer_kernel_sigset_t *)oldset;
+ return internal_syscall(SYSCALL(rt_sigprocmask), (uptr)how,
+ (uptr)&k_set->sig[0], (uptr)&k_oldset->sig[0],
+ sizeof(__sanitizer_kernel_sigset_t));
+#endif
+}
+
+void internal_sigfillset(__sanitizer_sigset_t *set) {
internal_memset(set, 0xff, sizeof(*set));
}
-void internal_sigdelset(__sanitizer_kernel_sigset_t *set, int signum) {
+#if SANITIZER_LINUX
+void internal_sigdelset(__sanitizer_sigset_t *set, int signum) {
signum -= 1;
CHECK_GE(signum, 0);
CHECK_LT(signum, sizeof(*set) * 8);
- const uptr idx = signum / (sizeof(set->sig[0]) * 8);
- const uptr bit = signum % (sizeof(set->sig[0]) * 8);
- set->sig[idx] &= ~(1 << bit);
+ __sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set;
+ const uptr idx = signum / (sizeof(k_set->sig[0]) * 8);
+ const uptr bit = signum % (sizeof(k_set->sig[0]) * 8);
+ k_set->sig[idx] &= ~(1 << bit);
}
+#endif // SANITIZER_LINUX
// ThreadLister implementation.
ThreadLister::ThreadLister(int pid)
@@ -698,7 +649,7 @@ bool ThreadLister::GetDirectoryEntries() {
}
uptr GetPageSize() {
-#if defined(__x86_64__) || defined(__i386__)
+#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__i386__))
return EXEC_PAGESIZE;
#else
return sysconf(_SC_PAGESIZE); // EXEC_PAGESIZE may not be trustworthy.
@@ -753,8 +704,10 @@ bool LibraryNameIs(const char *full_name, const char *base_name) {
#if !SANITIZER_ANDROID
// Call cb for each region mapped by map.
void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr)) {
+#if !SANITIZER_FREEBSD
typedef ElfW(Phdr) Elf_Phdr;
typedef ElfW(Ehdr) Elf_Ehdr;
+#endif // !SANITIZER_FREEBSD
char *base = (char *)map->l_addr;
Elf_Ehdr *ehdr = (Elf_Ehdr *)base;
char *phdrs = base + ehdr->e_phoff;
@@ -788,7 +741,7 @@ void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr)) {
}
#endif
-#if defined(__x86_64__)
+#if defined(__x86_64__) && SANITIZER_LINUX
// We cannot use glibc's clone wrapper, because it messes with the child
// task's TLS. It writes the PID and TID of the child task to its thread
// descriptor, but in our case the child task shares the thread descriptor with
@@ -807,7 +760,7 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
register void *r8 __asm__("r8") = newtls;
register int *r10 __asm__("r10") = child_tidptr;
__asm__ __volatile__(
- /* %rax = syscall(%rax = __NR_clone,
+ /* %rax = syscall(%rax = SYSCALL(clone),
* %rdi = flags,
* %rsi = child_stack,
* %rdx = parent_tidptr,
@@ -841,7 +794,7 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
/* Return to parent. */
"1:\n"
: "=a" (res)
- : "a"(__NR_clone), "i"(__NR_exit),
+ : "a"(SYSCALL(clone)), "i"(SYSCALL(exit)),
"S"(child_stack),
"D"(flags),
"d"(parent_tidptr),
@@ -850,7 +803,38 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
: "rsp", "memory", "r11", "rcx");
return res;
}
-#endif // defined(__x86_64__)
+#endif // defined(__x86_64__) && SANITIZER_LINUX
+
+#if SANITIZER_ANDROID
+// This thing is not, strictly speaking, async signal safe, but it does not seem
+// to cause any issues. Alternative is writing to log devices directly, but
+// their location and message format might change in the future, so we'd really
+// like to avoid that.
+void AndroidLogWrite(const char *buffer) {
+ char *copy = internal_strdup(buffer);
+ char *p = copy;
+ char *q;
+ // __android_log_write has an implicit message length limit.
+ // Print one line at a time.
+ do {
+ q = internal_strchr(p, '\n');
+ if (q) *q = '\0';
+ __android_log_write(ANDROID_LOG_INFO, NULL, p);
+ if (q) p = q + 1;
+ } while (q);
+ InternalFree(copy);
+}
+
+void GetExtraActivationFlags(char *buf, uptr size) {
+ CHECK(size > PROP_VALUE_MAX);
+ __system_property_get("asan.options", buf);
+}
+#endif
+
+bool IsDeadlySignal(int signum) {
+ return (signum == SIGSEGV) && common_flags()->handle_segv;
+}
+
} // namespace __sanitizer
-#endif // SANITIZER_LINUX
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
diff --git a/libsanitizer/sanitizer_common/sanitizer_linux.h b/libsanitizer/sanitizer_common/sanitizer_linux.h
index 6422df142e7a..086834c3a2f9 100644
--- a/libsanitizer/sanitizer_common/sanitizer_linux.h
+++ b/libsanitizer/sanitizer_common/sanitizer_linux.h
@@ -12,7 +12,7 @@
#define SANITIZER_LINUX_H
#include "sanitizer_platform.h"
-#if SANITIZER_LINUX
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_platform_limits_posix.h"
@@ -27,20 +27,25 @@ struct linux_dirent;
// Syscall wrappers.
uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count);
-uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5);
uptr internal_sigaltstack(const struct sigaltstack* ss,
struct sigaltstack* oss);
-uptr internal_sigaction(int signum, const __sanitizer_kernel_sigaction_t *act,
- __sanitizer_kernel_sigaction_t *oldact);
-uptr internal_sigprocmask(int how, __sanitizer_kernel_sigset_t *set,
- __sanitizer_kernel_sigset_t *oldset);
-void internal_sigfillset(__sanitizer_kernel_sigset_t *set);
-void internal_sigdelset(__sanitizer_kernel_sigset_t *set, int signum);
+uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
+ __sanitizer_sigset_t *oldset);
+void internal_sigfillset(__sanitizer_sigset_t *set);
-#ifdef __x86_64__
+// Linux-only syscalls.
+#if SANITIZER_LINUX
+uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5);
+// Used only by sanitizer_stoptheworld. Signal handlers that are actually used
+// (like the process-wide error reporting SEGV handler) must use
+// internal_sigaction instead.
+int internal_sigaction_norestorer(int signum, const void *act, void *oldact);
+void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
+#if defined(__x86_64__)
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr);
#endif
+#endif // SANITIZER_LINUX
// This class reads thread IDs from /proc/<pid>/task using only syscalls.
class ThreadLister {
@@ -64,8 +69,6 @@ class ThreadLister {
int bytes_read_;
};
-void AdjustStackSizeLinux(void *attr);
-
// Exposed for testing.
uptr ThreadDescriptorSize();
uptr ThreadSelf();
@@ -84,5 +87,5 @@ void CacheBinaryName();
void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr));
} // namespace __sanitizer
-#endif // SANITIZER_LINUX
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
#endif // SANITIZER_LINUX_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cc
index c9eb435be915..e754b26e693d 100644
--- a/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cc
@@ -11,7 +11,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
-#if SANITIZER_LINUX
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
@@ -23,27 +23,53 @@
#include <dlfcn.h>
#include <pthread.h>
-#include <sys/prctl.h>
+#include <signal.h>
#include <sys/resource.h>
+#if SANITIZER_FREEBSD
+#define _GNU_SOURCE // to declare _Unwind_Backtrace() from <unwind.h>
+#endif
#include <unwind.h>
+#if SANITIZER_FREEBSD
+#include <pthread_np.h>
+#define pthread_getattr_np pthread_attr_get_np
+#endif
+
+#if SANITIZER_LINUX
+#include <sys/prctl.h>
+#endif
+
#if !SANITIZER_ANDROID
#include <elf.h>
#include <link.h>
#include <unistd.h>
#endif
+namespace __sanitizer {
+
// This function is defined elsewhere if we intercepted pthread_attr_getstack.
-SANITIZER_WEAK_ATTRIBUTE
-int __sanitizer_pthread_attr_getstack(void *attr, void **addr, size_t *size) {
- return pthread_attr_getstack((pthread_attr_t*)attr, addr, size);
+extern "C" {
+SANITIZER_WEAK_ATTRIBUTE int
+real_pthread_attr_getstack(void *attr, void **addr, size_t *size);
+} // extern "C"
+
+static int my_pthread_attr_getstack(void *attr, void **addr, size_t *size) {
+ if (real_pthread_attr_getstack)
+ return real_pthread_attr_getstack((pthread_attr_t *)attr, addr, size);
+ return pthread_attr_getstack((pthread_attr_t *)attr, addr, size);
}
-namespace __sanitizer {
+SANITIZER_WEAK_ATTRIBUTE int
+real_sigaction(int signum, const void *act, void *oldact);
+
+int internal_sigaction(int signum, const void *act, void *oldact) {
+ if (real_sigaction)
+ return real_sigaction(signum, act, oldact);
+ return sigaction(signum, (struct sigaction *)act, (struct sigaction *)oldact);
+}
void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
uptr *stack_bottom) {
- static const uptr kMaxThreadStackSize = 1 << 30; // 1Gb
CHECK(stack_top);
CHECK(stack_bottom);
if (at_initialization) {
@@ -77,10 +103,11 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
return;
}
pthread_attr_t attr;
+ pthread_attr_init(&attr);
CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
uptr stacksize = 0;
void *stackaddr = 0;
- __sanitizer_pthread_attr_getstack(&attr, &stackaddr, (size_t*)&stacksize);
+ my_pthread_attr_getstack(&attr, &stackaddr, (size_t*)&stacksize);
pthread_attr_destroy(&attr);
CHECK_LE(stacksize, kMaxThreadStackSize); // Sanity check.
@@ -88,8 +115,6 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
*stack_bottom = (uptr)stackaddr;
}
-// Does not compile for Go because dlsym() requires -ldl
-#ifndef SANITIZER_GO
bool SetEnv(const char *name, const char *value) {
void *f = dlsym(RTLD_NEXT, "setenv");
if (f == 0)
@@ -98,9 +123,8 @@ bool SetEnv(const char *name, const char *value) {
setenv_ft setenv_f;
CHECK_EQ(sizeof(setenv_f), sizeof(f));
internal_memcpy(&setenv_f, &f, sizeof(f));
- return setenv_f(name, value, 1) == 0;
+ return IndirectExternCall(setenv_f)(name, value, 1) == 0;
}
-#endif
bool SanitizerSetThreadName(const char *name) {
#ifdef PR_SET_NAME
@@ -123,8 +147,52 @@ bool SanitizerGetThreadName(char *name, int max_len) {
#endif
}
-#ifndef SANITIZER_GO
//------------------------- SlowUnwindStack -----------------------------------
+
+typedef struct {
+ uptr absolute_pc;
+ uptr stack_top;
+ uptr stack_size;
+} backtrace_frame_t;
+
+extern "C" {
+typedef void *(*acquire_my_map_info_list_func)();
+typedef void (*release_my_map_info_list_func)(void *map);
+typedef sptr (*unwind_backtrace_signal_arch_func)(
+ void *siginfo, void *sigcontext, void *map_info_list,
+ backtrace_frame_t *backtrace, uptr ignore_depth, uptr max_depth);
+acquire_my_map_info_list_func acquire_my_map_info_list;
+release_my_map_info_list_func release_my_map_info_list;
+unwind_backtrace_signal_arch_func unwind_backtrace_signal_arch;
+} // extern "C"
+
+#if SANITIZER_ANDROID
+void SanitizerInitializeUnwinder() {
+ void *p = dlopen("libcorkscrew.so", RTLD_LAZY);
+ if (!p) {
+ VReport(1,
+ "Failed to open libcorkscrew.so. You may see broken stack traces "
+ "in SEGV reports.");
+ return;
+ }
+ acquire_my_map_info_list =
+ (acquire_my_map_info_list_func)(uptr)dlsym(p, "acquire_my_map_info_list");
+ release_my_map_info_list =
+ (release_my_map_info_list_func)(uptr)dlsym(p, "release_my_map_info_list");
+ unwind_backtrace_signal_arch = (unwind_backtrace_signal_arch_func)(uptr)dlsym(
+ p, "unwind_backtrace_signal_arch");
+ if (!acquire_my_map_info_list || !release_my_map_info_list ||
+ !unwind_backtrace_signal_arch) {
+ VReport(1,
+ "Failed to find one of the required symbols in libcorkscrew.so. "
+ "You may see broken stack traces in SEGV reports.");
+ acquire_my_map_info_list = NULL;
+ unwind_backtrace_signal_arch = NULL;
+ release_my_map_info_list = NULL;
+ }
+}
+#endif
+
#ifdef __arm__
#define UNWIND_STOP _URC_END_OF_STACK
#define UNWIND_CONTINUE _URC_NO_REASON
@@ -161,9 +229,8 @@ _Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
}
void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
+ CHECK_GE(max_depth, 2);
size = 0;
- if (max_depth == 0)
- return;
UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)};
_Unwind_Backtrace(Unwind_Trace, &arg);
// We need to pop a few frames so that pc is on top.
@@ -175,9 +242,35 @@ void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
trace[0] = pc;
}
-#endif // !SANITIZER_GO
+void StackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
+ uptr max_depth) {
+ CHECK_GE(max_depth, 2);
+ if (!unwind_backtrace_signal_arch) {
+ SlowUnwindStack(pc, max_depth);
+ return;
+ }
+
+ void *map = acquire_my_map_info_list();
+ CHECK(map);
+ InternalScopedBuffer<backtrace_frame_t> frames(kStackTraceMax);
+ // siginfo argument appears to be unused.
+ sptr res = unwind_backtrace_signal_arch(/* siginfo */ NULL, context, map,
+ frames.data(),
+ /* ignore_depth */ 0, max_depth);
+ release_my_map_info_list(map);
+ if (res < 0) return;
+ CHECK_LE((uptr)res, kStackTraceMax);
+
+ size = 0;
+ // +2 compensate for libcorkscrew unwinder returning addresses of call
+ // instructions instead of raw return addresses.
+ for (sptr i = 0; i < res; ++i)
+ trace[size++] = frames[i].absolute_pc + 2;
+}
+#if !SANITIZER_FREEBSD
static uptr g_tls_size;
+#endif
#ifdef __i386__
# define DL_INTERNAL_FUNCTION __attribute__((regparm(3), stdcall))
@@ -186,7 +279,7 @@ static uptr g_tls_size;
#endif
void InitTlsSize() {
-#if !defined(SANITIZER_GO) && !SANITIZER_ANDROID
+#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID
typedef void (*get_tls_func)(size_t*, size_t*) DL_INTERNAL_FUNCTION;
get_tls_func get_tls;
void *get_tls_static_info_ptr = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
@@ -196,16 +289,12 @@ void InitTlsSize() {
CHECK_NE(get_tls, 0);
size_t tls_size = 0;
size_t tls_align = 0;
- get_tls(&tls_size, &tls_align);
+ IndirectExternCall(get_tls)(&tls_size, &tls_align);
g_tls_size = tls_size;
-#endif
-}
-
-uptr GetTlsSize() {
- return g_tls_size;
+#endif // !SANITIZER_FREEBSD && !SANITIZER_ANDROID
}
-#if defined(__x86_64__) || defined(__i386__)
+#if (defined(__x86_64__) || defined(__i386__)) && SANITIZER_LINUX
// sizeof(struct thread) from glibc.
static atomic_uintptr_t kThreadDescriptorSize;
@@ -221,7 +310,9 @@ uptr ThreadDescriptorSize() {
int minor = internal_simple_strtoll(buf + 8, &end, 10);
if (end != buf + 8 && (*end == '\0' || *end == '.')) {
/* sizeof(struct thread) values from various glibc versions. */
- if (minor <= 3)
+ if (SANITIZER_X32)
+ val = 1728; // Assume only one particular version for x32.
+ else if (minor <= 3)
val = FIRST_32_SECOND_64(1104, 1696);
else if (minor == 4)
val = FIRST_32_SECOND_64(1120, 1728);
@@ -253,27 +344,79 @@ uptr ThreadSelfOffset() {
uptr ThreadSelf() {
uptr descr_addr;
-#ifdef __i386__
+# if defined(__i386__)
asm("mov %%gs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
-#else
+# elif defined(__x86_64__)
asm("mov %%fs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
-#endif
+# else
+# error "unsupported CPU arch"
+# endif
return descr_addr;
}
-#endif // defined(__x86_64__) || defined(__i386__)
+#endif // (defined(__x86_64__) || defined(__i386__)) && SANITIZER_LINUX
+
+#if SANITIZER_FREEBSD
+static void **ThreadSelfSegbase() {
+ void **segbase = 0;
+# if defined(__i386__)
+ // sysarch(I386_GET_GSBASE, segbase);
+ __asm __volatile("mov %%gs:0, %0" : "=r" (segbase));
+# elif defined(__x86_64__)
+ // sysarch(AMD64_GET_FSBASE, segbase);
+ __asm __volatile("movq %%fs:0, %0" : "=r" (segbase));
+# else
+# error "unsupported CPU arch for FreeBSD platform"
+# endif
+ return segbase;
+}
-void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
- uptr *tls_addr, uptr *tls_size) {
-#ifndef SANITIZER_GO
-#if defined(__x86_64__) || defined(__i386__)
- *tls_addr = ThreadSelf();
- *tls_size = GetTlsSize();
- *tls_addr -= *tls_size;
- *tls_addr += ThreadDescriptorSize();
+uptr ThreadSelf() {
+ return (uptr)ThreadSelfSegbase()[2];
+}
+#endif // SANITIZER_FREEBSD
+
+static void GetTls(uptr *addr, uptr *size) {
+#if SANITIZER_LINUX
+# if defined(__x86_64__) || defined(__i386__)
+ *addr = ThreadSelf();
+ *size = GetTlsSize();
+ *addr -= *size;
+ *addr += ThreadDescriptorSize();
+# else
+ *addr = 0;
+ *size = 0;
+# endif
+#elif SANITIZER_FREEBSD
+ void** segbase = ThreadSelfSegbase();
+ *addr = 0;
+ *size = 0;
+ if (segbase != 0) {
+ // tcbalign = 16
+ // tls_size = round(tls_static_space, tcbalign);
+ // dtv = segbase[1];
+ // dtv[2] = segbase - tls_static_space;
+ void **dtv = (void**) segbase[1];
+ *addr = (uptr) dtv[2];
+ *size = (*addr == 0) ? 0 : ((uptr) segbase[0] - (uptr) dtv[2]);
+ }
+#else
+# error "Unknown OS"
+#endif
+}
+
+uptr GetTlsSize() {
+#if SANITIZER_FREEBSD
+ uptr addr, size;
+ GetTls(&addr, &size);
+ return size;
#else
- *tls_addr = 0;
- *tls_size = 0;
+ return g_tls_size;
#endif
+}
+
+void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
+ uptr *tls_addr, uptr *tls_size) {
+ GetTls(tls_addr, tls_size);
uptr stack_top, stack_bottom;
GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
@@ -289,19 +432,13 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
*tls_addr = *stk_addr + *stk_size;
}
}
-#else // SANITIZER_GO
- *stk_addr = 0;
- *stk_size = 0;
- *tls_addr = 0;
- *tls_size = 0;
-#endif // SANITIZER_GO
}
-void AdjustStackSizeLinux(void *attr_) {
+void AdjustStackSize(void *attr_) {
pthread_attr_t *attr = (pthread_attr_t *)attr_;
uptr stackaddr = 0;
size_t stacksize = 0;
- __sanitizer_pthread_attr_getstack(attr, (void**)&stackaddr, &stacksize);
+ my_pthread_attr_getstack(attr, (void**)&stackaddr, &stacksize);
// GLibC will return (0 - stacksize) as the stack address in the case when
// stacksize is set, but stackaddr is not.
bool stack_set = (stackaddr != 0) && (stackaddr + stacksize != 0);
@@ -309,10 +446,11 @@ void AdjustStackSizeLinux(void *attr_) {
const uptr minstacksize = GetTlsSize() + 128*1024;
if (stacksize < minstacksize) {
if (!stack_set) {
- if (common_flags()->verbosity && stacksize != 0)
- Printf("Sanitizer: increasing stacksize %zu->%zu\n", stacksize,
- minstacksize);
- pthread_attr_setstacksize(attr, minstacksize);
+ if (stacksize != 0) {
+ VPrintf(1, "Sanitizer: increasing stacksize %zu->%zu\n", stacksize,
+ minstacksize);
+ pthread_attr_setstacksize(attr, minstacksize);
+ }
} else {
Printf("Sanitizer: pre-allocated stack size is insufficient: "
"%zu < %zu\n", stacksize, minstacksize);
@@ -324,10 +462,13 @@ void AdjustStackSizeLinux(void *attr_) {
#if SANITIZER_ANDROID
uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
string_predicate_t filter) {
- return 0;
+ MemoryMappingLayout memory_mapping(false);
+ return memory_mapping.DumpListOfModules(modules, max_modules, filter);
}
#else // SANITIZER_ANDROID
+# if !SANITIZER_FREEBSD
typedef ElfW(Phdr) Elf_Phdr;
+# endif
struct DlIteratePhdrData {
LoadedModule *modules;
@@ -378,6 +519,14 @@ uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
}
#endif // SANITIZER_ANDROID
+uptr indirect_call_wrapper;
+
+void SetIndirectCallWrapper(uptr wrapper) {
+ CHECK(!indirect_call_wrapper);
+ CHECK(wrapper);
+ indirect_call_wrapper = wrapper;
+}
+
} // namespace __sanitizer
-#endif // SANITIZER_LINUX
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
diff --git a/libsanitizer/sanitizer_common/sanitizer_list.h b/libsanitizer/sanitizer_common/sanitizer_list.h
index 9692e01b8e01..b72c548e3850 100644
--- a/libsanitizer/sanitizer_common/sanitizer_list.h
+++ b/libsanitizer/sanitizer_common/sanitizer_list.h
@@ -24,6 +24,8 @@ namespace __sanitizer {
// non-zero-initialized objects before using.
template<class Item>
struct IntrusiveList {
+ friend class Iterator;
+
void clear() {
first_ = last_ = 0;
size_ = 0;
@@ -111,6 +113,21 @@ struct IntrusiveList {
}
}
+ class Iterator {
+ public:
+ explicit Iterator(IntrusiveList<Item> *list)
+ : list_(list), current_(list->first_) { }
+ Item *next() {
+ Item *ret = current_;
+ if (current_) current_ = current_->next;
+ return ret;
+ }
+ bool hasNext() const { return current_ != 0; }
+ private:
+ IntrusiveList<Item> *list_;
+ Item *current_;
+ };
+
// private, don't use directly.
uptr size_;
Item *first_;
diff --git a/libsanitizer/sanitizer_common/sanitizer_mac.cc b/libsanitizer/sanitizer_common/sanitizer_mac.cc
index 288e31ca0787..6deba53d3e8a 100644
--- a/libsanitizer/sanitizer_common/sanitizer_mac.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_mac.cc
@@ -5,9 +5,8 @@
//
//===----------------------------------------------------------------------===//
//
-// This file is shared between AddressSanitizer and ThreadSanitizer
-// run-time libraries and implements mac-specific functions from
-// sanitizer_libc.h.
+// This file is shared between various sanitizers' runtime libraries and
+// implements OSX-specific functions.
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
@@ -21,20 +20,22 @@
#include <stdio.h>
#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
+#include "sanitizer_mac.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_procmaps.h"
#include <crt_externs.h> // for _NSGetEnviron
#include <fcntl.h>
-#include <mach-o/dyld.h>
-#include <mach-o/loader.h>
#include <pthread.h>
#include <sched.h>
+#include <signal.h>
#include <sys/mman.h>
#include <sys/resource.h>
#include <sys/stat.h>
+#include <sys/sysctl.h>
#include <sys/types.h>
#include <unistd.h>
#include <libkern/OSAtomic.h>
@@ -118,6 +119,16 @@ uptr internal_getpid() {
return getpid();
}
+int internal_sigaction(int signum, const void *act, void *oldact) {
+ return sigaction(signum,
+ (struct sigaction *)act, (struct sigaction *)oldact);
+}
+
+int internal_fork() {
+ // TODO(glider): this may call user's pthread_atfork() handlers which is bad.
+ return fork();
+}
+
// ----------------- sanitizer_common.h
bool FileExists(const char *filename) {
struct stat st;
@@ -136,6 +147,20 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
CHECK(stack_top);
CHECK(stack_bottom);
uptr stacksize = pthread_get_stacksize_np(pthread_self());
+ // pthread_get_stacksize_np() returns an incorrect stack size for the main
+ // thread on Mavericks. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=261
+ if ((GetMacosVersion() == MACOS_VERSION_MAVERICKS) && at_initialization &&
+ stacksize == (1 << 19)) {
+ struct rlimit rl;
+ CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);
+ // Most often rl.rlim_cur will be the desired 8M.
+ if (rl.rlim_cur < kMaxThreadStackSize) {
+ stacksize = rl.rlim_cur;
+ } else {
+ stacksize = kMaxThreadStackSize;
+ }
+ }
void *stackaddr = pthread_get_stackaddr_np(pthread_self());
*stack_top = (uptr)stackaddr;
*stack_bottom = *stack_top - stacksize;
@@ -169,7 +194,8 @@ void ReExec() {
UNIMPLEMENTED();
}
-void PrepareForSandboxing() {
+void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
+ (void)args;
// Nothing here for now.
}
@@ -177,148 +203,6 @@ uptr GetPageSize() {
return sysconf(_SC_PAGESIZE);
}
-// ----------------- sanitizer_procmaps.h
-
-MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
- Reset();
-}
-
-MemoryMappingLayout::~MemoryMappingLayout() {
-}
-
-// More information about Mach-O headers can be found in mach-o/loader.h
-// Each Mach-O image has a header (mach_header or mach_header_64) starting with
-// a magic number, and a list of linker load commands directly following the
-// header.
-// A load command is at least two 32-bit words: the command type and the
-// command size in bytes. We're interested only in segment load commands
-// (LC_SEGMENT and LC_SEGMENT_64), which tell that a part of the file is mapped
-// into the task's address space.
-// The |vmaddr|, |vmsize| and |fileoff| fields of segment_command or
-// segment_command_64 correspond to the memory address, memory size and the
-// file offset of the current memory segment.
-// Because these fields are taken from the images as is, one needs to add
-// _dyld_get_image_vmaddr_slide() to get the actual addresses at runtime.
-
-void MemoryMappingLayout::Reset() {
- // Count down from the top.
- // TODO(glider): as per man 3 dyld, iterating over the headers with
- // _dyld_image_count is thread-unsafe. We need to register callbacks for
- // adding and removing images which will invalidate the MemoryMappingLayout
- // state.
- current_image_ = _dyld_image_count();
- current_load_cmd_count_ = -1;
- current_load_cmd_addr_ = 0;
- current_magic_ = 0;
- current_filetype_ = 0;
-}
-
-// static
-void MemoryMappingLayout::CacheMemoryMappings() {
- // No-op on Mac for now.
-}
-
-void MemoryMappingLayout::LoadFromCache() {
- // No-op on Mac for now.
-}
-
-// Next and NextSegmentLoad were inspired by base/sysinfo.cc in
-// Google Perftools, http://code.google.com/p/google-perftools.
-
-// NextSegmentLoad scans the current image for the next segment load command
-// and returns the start and end addresses and file offset of the corresponding
-// segment.
-// Note that the segment addresses are not necessarily sorted.
-template<u32 kLCSegment, typename SegmentCommand>
-bool MemoryMappingLayout::NextSegmentLoad(
- uptr *start, uptr *end, uptr *offset,
- char filename[], uptr filename_size, uptr *protection) {
- if (protection)
- UNIMPLEMENTED();
- const char* lc = current_load_cmd_addr_;
- current_load_cmd_addr_ += ((const load_command *)lc)->cmdsize;
- if (((const load_command *)lc)->cmd == kLCSegment) {
- const sptr dlloff = _dyld_get_image_vmaddr_slide(current_image_);
- const SegmentCommand* sc = (const SegmentCommand *)lc;
- if (start) *start = sc->vmaddr + dlloff;
- if (end) *end = sc->vmaddr + sc->vmsize + dlloff;
- if (offset) {
- if (current_filetype_ == /*MH_EXECUTE*/ 0x2) {
- *offset = sc->vmaddr;
- } else {
- *offset = sc->fileoff;
- }
- }
- if (filename) {
- internal_strncpy(filename, _dyld_get_image_name(current_image_),
- filename_size);
- }
- return true;
- }
- return false;
-}
-
-bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
- char filename[], uptr filename_size,
- uptr *protection) {
- for (; current_image_ >= 0; current_image_--) {
- const mach_header* hdr = _dyld_get_image_header(current_image_);
- if (!hdr) continue;
- if (current_load_cmd_count_ < 0) {
- // Set up for this image;
- current_load_cmd_count_ = hdr->ncmds;
- current_magic_ = hdr->magic;
- current_filetype_ = hdr->filetype;
- switch (current_magic_) {
-#ifdef MH_MAGIC_64
- case MH_MAGIC_64: {
- current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header_64);
- break;
- }
-#endif
- case MH_MAGIC: {
- current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header);
- break;
- }
- default: {
- continue;
- }
- }
- }
-
- for (; current_load_cmd_count_ >= 0; current_load_cmd_count_--) {
- switch (current_magic_) {
- // current_magic_ may be only one of MH_MAGIC, MH_MAGIC_64.
-#ifdef MH_MAGIC_64
- case MH_MAGIC_64: {
- if (NextSegmentLoad<LC_SEGMENT_64, struct segment_command_64>(
- start, end, offset, filename, filename_size, protection))
- return true;
- break;
- }
-#endif
- case MH_MAGIC: {
- if (NextSegmentLoad<LC_SEGMENT, struct segment_command>(
- start, end, offset, filename, filename_size, protection))
- return true;
- break;
- }
- }
- }
- // If we get here, no more load_cmd's in this image talk about
- // segments. Go on to the next image.
- }
- return false;
-}
-
-bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset,
- char filename[],
- uptr filename_size,
- uptr *protection) {
- return IterateForObjectNameAndOffset(addr, offset, filename, filename_size,
- protection);
-}
-
BlockingMutex::BlockingMutex(LinkerInitialized) {
// We assume that OS_SPINLOCK_INIT is zero
}
@@ -377,32 +261,49 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
string_predicate_t filter) {
MemoryMappingLayout memory_mapping(false);
- memory_mapping.Reset();
- uptr cur_beg, cur_end, cur_offset;
- InternalScopedBuffer<char> module_name(kMaxPathLength);
- uptr n_modules = 0;
- for (uptr i = 0;
- n_modules < max_modules &&
- memory_mapping.Next(&cur_beg, &cur_end, &cur_offset,
- module_name.data(), module_name.size(), 0);
- i++) {
- const char *cur_name = module_name.data();
- if (cur_name[0] == '\0')
- continue;
- if (filter && !filter(cur_name))
- continue;
- LoadedModule *cur_module = 0;
- if (n_modules > 0 &&
- 0 == internal_strcmp(cur_name, modules[n_modules - 1].full_name())) {
- cur_module = &modules[n_modules - 1];
- } else {
- void *mem = &modules[n_modules];
- cur_module = new(mem) LoadedModule(cur_name, cur_beg);
- n_modules++;
+ return memory_mapping.DumpListOfModules(modules, max_modules, filter);
+}
+
+bool IsDeadlySignal(int signum) {
+ return (signum == SIGSEGV || signum == SIGBUS) && common_flags()->handle_segv;
+}
+
+MacosVersion cached_macos_version = MACOS_VERSION_UNINITIALIZED;
+
+MacosVersion GetMacosVersionInternal() {
+ int mib[2] = { CTL_KERN, KERN_OSRELEASE };
+ char version[100];
+ uptr len = 0, maxlen = sizeof(version) / sizeof(version[0]);
+ for (uptr i = 0; i < maxlen; i++) version[i] = '\0';
+ // Get the version length.
+ CHECK_NE(sysctl(mib, 2, 0, &len, 0, 0), -1);
+ CHECK_LT(len, maxlen);
+ CHECK_NE(sysctl(mib, 2, version, &len, 0, 0), -1);
+ switch (version[0]) {
+ case '9': return MACOS_VERSION_LEOPARD;
+ case '1': {
+ switch (version[1]) {
+ case '0': return MACOS_VERSION_SNOW_LEOPARD;
+ case '1': return MACOS_VERSION_LION;
+ case '2': return MACOS_VERSION_MOUNTAIN_LION;
+ case '3': return MACOS_VERSION_MAVERICKS;
+ default: return MACOS_VERSION_UNKNOWN;
+ }
}
- cur_module->addAddressRange(cur_beg, cur_end);
+ default: return MACOS_VERSION_UNKNOWN;
+ }
+}
+
+MacosVersion GetMacosVersion() {
+ atomic_uint32_t *cache =
+ reinterpret_cast<atomic_uint32_t*>(&cached_macos_version);
+ MacosVersion result =
+ static_cast<MacosVersion>(atomic_load(cache, memory_order_acquire));
+ if (result == MACOS_VERSION_UNINITIALIZED) {
+ result = GetMacosVersionInternal();
+ atomic_store(cache, result, memory_order_release);
}
- return n_modules;
+ return result;
}
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_mac.h b/libsanitizer/sanitizer_common/sanitizer_mac.h
new file mode 100644
index 000000000000..63055297fef1
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_mac.h
@@ -0,0 +1,34 @@
+//===-- sanitizer_mac.h -----------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between various sanitizers' runtime libraries and
+// provides definitions for OSX-specific functions.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_MAC_H
+#define SANITIZER_MAC_H
+
+#include "sanitizer_platform.h"
+#if SANITIZER_MAC
+
+namespace __sanitizer {
+
+enum MacosVersion {
+ MACOS_VERSION_UNINITIALIZED = 0,
+ MACOS_VERSION_UNKNOWN,
+ MACOS_VERSION_LEOPARD,
+ MACOS_VERSION_SNOW_LEOPARD,
+ MACOS_VERSION_LION,
+ MACOS_VERSION_MOUNTAIN_LION,
+ MACOS_VERSION_MAVERICKS
+};
+
+MacosVersion GetMacosVersion();
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_MAC
+#endif // SANITIZER_MAC_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_mutex.h b/libsanitizer/sanitizer_common/sanitizer_mutex.h
index d78f43edaaed..adc3add6008a 100644
--- a/libsanitizer/sanitizer_common/sanitizer_mutex.h
+++ b/libsanitizer/sanitizer_common/sanitizer_mutex.h
@@ -81,6 +81,88 @@ class BlockingMutex {
uptr owner_; // for debugging
};
+// Reader-writer spin mutex.
+class RWMutex {
+ public:
+ RWMutex() {
+ atomic_store(&state_, kUnlocked, memory_order_relaxed);
+ }
+
+ ~RWMutex() {
+ CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
+ }
+
+ void Lock() {
+ u32 cmp = kUnlocked;
+ if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
+ memory_order_acquire))
+ return;
+ LockSlow();
+ }
+
+ void Unlock() {
+ u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
+ DCHECK_NE(prev & kWriteLock, 0);
+ (void)prev;
+ }
+
+ void ReadLock() {
+ u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
+ if ((prev & kWriteLock) == 0)
+ return;
+ ReadLockSlow();
+ }
+
+ void ReadUnlock() {
+ u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
+ DCHECK_EQ(prev & kWriteLock, 0);
+ DCHECK_GT(prev & ~kWriteLock, 0);
+ (void)prev;
+ }
+
+ void CheckLocked() {
+ CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
+ }
+
+ private:
+ atomic_uint32_t state_;
+
+ enum {
+ kUnlocked = 0,
+ kWriteLock = 1,
+ kReadLock = 2
+ };
+
+ void NOINLINE LockSlow() {
+ for (int i = 0;; i++) {
+ if (i < 10)
+ proc_yield(10);
+ else
+ internal_sched_yield();
+ u32 cmp = atomic_load(&state_, memory_order_relaxed);
+ if (cmp == kUnlocked &&
+ atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
+ memory_order_acquire))
+ return;
+ }
+ }
+
+ void NOINLINE ReadLockSlow() {
+ for (int i = 0;; i++) {
+ if (i < 10)
+ proc_yield(10);
+ else
+ internal_sched_yield();
+ u32 prev = atomic_load(&state_, memory_order_acquire);
+ if ((prev & kWriteLock) == 0)
+ return;
+ }
+ }
+
+ RWMutex(const RWMutex&);
+ void operator = (const RWMutex&);
+};
+
template<typename MutexType>
class GenericScopedLock {
public:
@@ -121,6 +203,8 @@ class GenericScopedReadLock {
typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
+typedef GenericScopedLock<RWMutex> RWMutexLock;
+typedef GenericScopedReadLock<RWMutex> RWMutexReadLock;
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform.h b/libsanitizer/sanitizer_common/sanitizer_platform.h
index 7693fe7ff17c..14594d5ce553 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform.h
+++ b/libsanitizer/sanitizer_common/sanitizer_platform.h
@@ -11,7 +11,8 @@
#ifndef SANITIZER_PLATFORM_H
#define SANITIZER_PLATFORM_H
-#if !defined(__linux__) && !defined(__APPLE__) && !defined(_WIN32)
+#if !defined(__linux__) && !defined(__FreeBSD__) && \
+ !defined(__APPLE__) && !defined(_WIN32)
# error "This operating system is not supported"
#endif
@@ -21,6 +22,12 @@
# define SANITIZER_LINUX 0
#endif
+#if defined(__FreeBSD__)
+# define SANITIZER_FREEBSD 1
+#else
+# define SANITIZER_FREEBSD 0
+#endif
+
#if defined(__APPLE__)
# define SANITIZER_MAC 1
# include <TargetConditionals.h>
@@ -46,6 +53,58 @@
# define SANITIZER_ANDROID 0
#endif
-#define SANITIZER_POSIX (SANITIZER_LINUX || SANITIZER_MAC)
+#define SANITIZER_POSIX (SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC)
+
+#if __LP64__ || defined(_WIN64)
+# define SANITIZER_WORDSIZE 64
+#else
+# define SANITIZER_WORDSIZE 32
+#endif
+
+#if SANITIZER_WORDSIZE == 64
+# define FIRST_32_SECOND_64(a, b) (b)
+#else
+# define FIRST_32_SECOND_64(a, b) (a)
+#endif
+
+#if defined(__x86_64__) && !defined(_LP64)
+# define SANITIZER_X32 1
+#else
+# define SANITIZER_X32 0
+#endif
+
+// By default we allow to use SizeClassAllocator64 on 64-bit platform.
+// But in some cases (e.g. AArch64's 39-bit address space) SizeClassAllocator64
+// does not work well and we need to fallback to SizeClassAllocator32.
+// For such platforms build this code with -DSANITIZER_CAN_USE_ALLOCATOR64=0 or
+// change the definition of SANITIZER_CAN_USE_ALLOCATOR64 here.
+#ifndef SANITIZER_CAN_USE_ALLOCATOR64
+# if defined(__aarch64__)
+# define SANITIZER_CAN_USE_ALLOCATOR64 0
+# else
+# define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64)
+# endif
+#endif
+
+// The range of addresses which can be returned my mmap.
+// FIXME: this value should be different on different platforms,
+// e.g. on AArch64 it is most likely (1ULL << 39). Larger values will still work
+// but will consume more memory for TwoLevelByteMap.
+#if defined(__aarch64__)
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 39)
+#else
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
+#endif
+
+// The AArch64 linux port uses the canonical syscall set as mandated by
+// the upstream linux community for all new ports. Other ports may still
+// use legacy syscalls.
+#ifndef SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+# if defined(__aarch64__) && SANITIZER_LINUX
+# define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 1
+# else
+# define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 0
+# endif
+#endif
#endif // SANITIZER_PLATFORM_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h b/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h
index f37d84b041d8..92bbc0051063 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h
+++ b/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h
@@ -45,14 +45,16 @@
# define SI_IOS 0
#endif
-# define SANITIZER_INTERCEPT_STRCMP 1
-# define SANITIZER_INTERCEPT_TEXTDOMAIN SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_STRCASECMP SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_STRCMP 1
+#define SANITIZER_INTERCEPT_TEXTDOMAIN SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_STRCASECMP SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_MEMCHR 1
+#define SANITIZER_INTERCEPT_MEMRCHR SI_LINUX
-# define SANITIZER_INTERCEPT_READ SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_PREAD SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_WRITE SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_PWRITE SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_READ SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PREAD SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_WRITE SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PWRITE SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_PREAD64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PWRITE64 SI_LINUX_NOT_ANDROID
@@ -65,109 +67,139 @@
#define SANITIZER_INTERCEPT_PREADV64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PWRITEV64 SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_PRCTL SI_LINUX
-
-# define SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_STRPTIME SI_NOT_WINDOWS
-
-# define SANITIZER_INTERCEPT_SCANF SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_ISOC99_SCANF SI_LINUX
-
-# define SANITIZER_INTERCEPT_FREXP 1
-# define SANITIZER_INTERCEPT_FREXPF_FREXPL SI_NOT_WINDOWS
-
-# define SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS \
- SI_MAC || SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_CLOCK_GETTIME SI_LINUX
-# define SANITIZER_INTERCEPT_GETITIMER SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_TIME SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_GLOB SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_WAIT SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_INET SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_GETADDRINFO SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_GETNAMEINFO SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_GETSOCKNAME SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_GETHOSTBYNAME SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_GETHOSTBYNAME_R SI_LINUX
-# define SANITIZER_INTERCEPT_GETSOCKOPT SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_ACCEPT SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_ACCEPT4 SI_LINUX
-# define SANITIZER_INTERCEPT_MODF SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_RECVMSG SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_GETPEERNAME SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_IOCTL SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_INET_ATON SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_SYSINFO SI_LINUX
-# define SANITIZER_INTERCEPT_READDIR SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_READDIR64 SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_PTRACE SI_LINUX_NOT_ANDROID && \
- (defined(__i386) || defined (__x86_64)) // NOLINT
-# define SANITIZER_INTERCEPT_SETLOCALE SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_GETCWD SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_GET_CURRENT_DIR_NAME SI_LINUX
-# define SANITIZER_INTERCEPT_STRTOIMAX SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_MBSTOWCS SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_MBSNRTOWCS SI_MAC || SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_WCSTOMBS SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_WCSNRTOMBS SI_MAC || SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_TCGETATTR SI_LINUX
-# define SANITIZER_INTERCEPT_REALPATH SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_CONFSTR SI_MAC || SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_SCHED_GETAFFINITY SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_STRERROR SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_STRERROR_R SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_XPG_STRERROR_R SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_SCANDIR SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_SCANDIR64 SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_GETGROUPS SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_POLL SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_PPOLL SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_WORDEXP SI_MAC || SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_SIGWAIT SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_SIGWAITINFO SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_SIGTIMEDWAIT SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_SIGSETOPS SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_SIGPENDING SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_SIGPROCMASK SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_BACKTRACE SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_GETMNTENT SI_LINUX
-# define SANITIZER_INTERCEPT_GETMNTENT_R SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_STATFS SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_STATFS64 \
- (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_STATVFS SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_STATVFS64 SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_INITGROUPS SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_ETHER SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_ETHER_R SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_SHMCTL \
- (SI_LINUX_NOT_ANDROID && SANITIZER_WORDSIZE == 64)
-# define SANITIZER_INTERCEPT_RANDOM_R SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED \
- SI_MAC || SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_TMPNAM SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_TMPNAM_R SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_TEMPNAM SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_SINCOS SI_LINUX
-# define SANITIZER_INTERCEPT_REMQUO SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_LGAMMA SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_LGAMMA_R SI_LINUX
-# define SANITIZER_INTERCEPT_DRAND48_R SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_ICONV SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_TIMES SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PRCTL SI_LINUX
-// FIXME: getline seems to be available on OSX 10.7
-# define SANITIZER_INTERCEPT_GETLINE SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_STRPTIME SI_NOT_WINDOWS
+
+#define SANITIZER_INTERCEPT_SCANF SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_ISOC99_SCANF SI_LINUX
+
+#ifndef SANITIZER_INTERCEPT_PRINTF
+# define SANITIZER_INTERCEPT_PRINTF SI_NOT_WINDOWS
+# define SANITIZER_INTERCEPT_ISOC99_PRINTF SI_LINUX
+#endif
-# define SANITIZER_INTERCEPT__EXIT SI_LINUX
+#define SANITIZER_INTERCEPT_FREXP 1
+#define SANITIZER_INTERCEPT_FREXPF_FREXPL SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_PHTREAD_MUTEX SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_PTHREAD_COND SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS \
+ SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_GETPWENT SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_FGETPWENT SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_GETPWENT_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SETPWENT SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_CLOCK_GETTIME SI_LINUX
+#define SANITIZER_INTERCEPT_GETITIMER SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_TIME SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_GLOB SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_WAIT SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_INET SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_GETADDRINFO SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_GETNAMEINFO SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_GETSOCKNAME SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME_R SI_LINUX
+#define SANITIZER_INTERCEPT_GETSOCKOPT SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_ACCEPT SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_ACCEPT4 SI_LINUX
+#define SANITIZER_INTERCEPT_MODF SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_RECVMSG SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_GETPEERNAME SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_IOCTL SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_INET_ATON SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_SYSINFO SI_LINUX
+#define SANITIZER_INTERCEPT_READDIR SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_READDIR64 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTRACE SI_LINUX_NOT_ANDROID && \
+ (defined(__i386) || defined (__x86_64)) // NOLINT
+#define SANITIZER_INTERCEPT_SETLOCALE SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_GETCWD SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_GET_CURRENT_DIR_NAME SI_LINUX
+#define SANITIZER_INTERCEPT_STRTOIMAX SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_MBSTOWCS SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_MBSNRTOWCS SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_WCSTOMBS SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_WCSNRTOMBS SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_TCGETATTR SI_LINUX
+#define SANITIZER_INTERCEPT_REALPATH SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_CONFSTR SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SCHED_GETAFFINITY SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_STRERROR SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_STRERROR_R SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_XPG_STRERROR_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SCANDIR SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SCANDIR64 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_GETGROUPS SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_POLL SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PPOLL SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_WORDEXP (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SIGWAIT SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_SIGWAITINFO SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SIGTIMEDWAIT SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SIGSETOPS SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_SIGPENDING SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_SIGPROCMASK SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_BACKTRACE SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_GETMNTENT SI_LINUX
+#define SANITIZER_INTERCEPT_GETMNTENT_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_STATFS SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_STATFS64 \
+ (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_STATVFS SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_STATVFS64 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_INITGROUPS SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_ETHER SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_ETHER_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SHMCTL \
+ (SI_LINUX_NOT_ANDROID && SANITIZER_WORDSIZE == 64)
+#define SANITIZER_INTERCEPT_RANDOM_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED \
+ SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_TMPNAM SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_TMPNAM_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_TEMPNAM SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_SINCOS SI_LINUX
+#define SANITIZER_INTERCEPT_REMQUO SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_LGAMMA SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_LGAMMA_R SI_LINUX
+#define SANITIZER_INTERCEPT_DRAND48_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_RAND_R SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_ICONV SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_TIMES SI_NOT_WINDOWS
+
+// FIXME: getline seems to be available on OSX 10.7
+#define SANITIZER_INTERCEPT_GETLINE SI_LINUX_NOT_ANDROID
+
+#define SANITIZER_INTERCEPT__EXIT SI_LINUX
+
+#define SANITIZER_INTERCEPT_PHTREAD_MUTEX SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP SI_LINUX_NOT_ANDROID
+
+#define SANITIZER_INTERCEPT_TLS_GET_ADDR SI_LINUX_NOT_ANDROID
+
+#define SANITIZER_INTERCEPT_LISTXATTR SI_LINUX
+#define SANITIZER_INTERCEPT_GETXATTR SI_LINUX
+#define SANITIZER_INTERCEPT_GETRESID SI_LINUX
+#define SANITIZER_INTERCEPT_GETIFADDRS SI_LINUX_NOT_ANDROID || SI_MAC
+#define SANITIZER_INTERCEPT_IF_INDEXTONAME SI_LINUX_NOT_ANDROID || SI_MAC
+#define SANITIZER_INTERCEPT_CAPGET SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_AEABI_MEM SI_LINUX && defined(__arm__)
+#define SANITIZER_INTERCEPT___BZERO SI_MAC
+#define SANITIZER_INTERCEPT_FTIME SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_XDR SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_TSEARCH SI_LINUX_NOT_ANDROID || SI_MAC
+#define SANITIZER_INTERCEPT_LIBIO_INTERNALS SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_FOPEN SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_FOPEN64 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_OPEN_MEMSTREAM SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_OBSTACK SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_FFLUSH SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_FCLOSE SI_NOT_WINDOWS
#endif // #ifndef SANITIZER_PLATFORM_INTERCEPTORS_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc b/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc
index bc37df0df498..76ee9001b7d1 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc
@@ -62,7 +62,7 @@ namespace __sanitizer {
unsigned struct_statfs64_sz = sizeof(struct statfs64);
} // namespace __sanitizer
-#if !defined(__powerpc64__) && !defined(__x86_64__)
+#if !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__aarch64__)
COMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat));
#endif
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc
index 196eb3b3c645..a93d38d8aace 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc
@@ -12,10 +12,7 @@
#include "sanitizer_platform.h"
-#if SANITIZER_LINUX || SANITIZER_MAC
-
-#include "sanitizer_internal_defs.h"
-#include "sanitizer_platform_limits_posix.h"
+#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_MAC
#include <arpa/inet.h>
#include <dirent.h>
@@ -31,10 +28,12 @@
#include <pwd.h>
#include <signal.h>
#include <stddef.h>
+#include <sys/mman.h>
#include <sys/resource.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/time.h>
+#include <sys/timeb.h>
#include <sys/times.h>
#include <sys/types.h>
#include <sys/utsname.h>
@@ -42,12 +41,14 @@
#include <time.h>
#include <wchar.h>
+#if !SANITIZER_ANDROID
+#include <sys/mount.h>
+#endif
+
#if SANITIZER_LINUX
+#include <malloc.h>
#include <mntent.h>
#include <netinet/ether.h>
-#include <utime.h>
-#include <sys/mount.h>
-#include <sys/ptrace.h>
#include <sys/sysinfo.h>
#include <sys/vt.h>
#include <linux/cdrom.h>
@@ -62,18 +63,63 @@
#include <linux/posix_types.h>
#endif
+#if SANITIZER_FREEBSD
+# include <sys/mount.h>
+# include <sys/sockio.h>
+# include <sys/socket.h>
+# include <sys/filio.h>
+# include <sys/signal.h>
+# include <sys/timespec.h>
+# include <sys/timex.h>
+# include <sys/mqueue.h>
+# include <sys/msg.h>
+# include <sys/ipc.h>
+# include <sys/msg.h>
+# include <sys/statvfs.h>
+# include <sys/soundcard.h>
+# include <sys/mtio.h>
+# include <sys/consio.h>
+# include <sys/kbio.h>
+# include <sys/link_elf.h>
+# include <netinet/ip_mroute.h>
+# include <netinet/in.h>
+# include <netinet/ip_compat.h>
+# include <net/ethernet.h>
+# include <net/ppp_defs.h>
+# include <glob.h>
+# include <term.h>
+
+#define _KERNEL // to declare 'shminfo' structure
+# include <sys/shm.h>
+#undef _KERNEL
+
+#undef INLINE // to avoid clashes with sanitizers' definitions
+#endif
+
+#if SANITIZER_FREEBSD || SANITIZER_IOS
+#undef IOC_DIRMASK
+#endif
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+# include <utime.h>
+# include <sys/ptrace.h>
+#endif
+
#if !SANITIZER_ANDROID
+#include <ifaddrs.h>
#include <sys/ucontext.h>
#include <wordexp.h>
#endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID
#include <glob.h>
+#include <obstack.h>
#include <mqueue.h>
#include <net/if_ppp.h>
#include <netax25/ax25.h>
#include <netipx/ipx.h>
#include <netrom/netrom.h>
+#include <rpc/xdr.h>
#include <scsi/scsi.h>
#include <sys/mtio.h>
#include <sys/kd.h>
@@ -92,7 +138,6 @@
#include <linux/serial.h>
#include <sys/msg.h>
#include <sys/ipc.h>
-#include <sys/shm.h>
#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
#if SANITIZER_ANDROID
@@ -112,16 +157,19 @@
#if SANITIZER_MAC
#include <net/ethernet.h>
#include <sys/filio.h>
-#include <sys/mount.h>
#include <sys/sockio.h>
#endif
+// Include these after system headers to avoid name clashes and ambiguities.
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform_limits_posix.h"
+
namespace __sanitizer {
unsigned struct_utsname_sz = sizeof(struct utsname);
unsigned struct_stat_sz = sizeof(struct stat);
-#if !SANITIZER_IOS
+#if !SANITIZER_IOS && !SANITIZER_FREEBSD
unsigned struct_stat64_sz = sizeof(struct stat64);
-#endif // !SANITIZER_IOS
+#endif // !SANITIZER_IOS && !SANITIZER_FREEBSD
unsigned struct_rusage_sz = sizeof(struct rusage);
unsigned struct_tm_sz = sizeof(struct tm);
unsigned struct_passwd_sz = sizeof(struct passwd);
@@ -134,6 +182,7 @@ namespace __sanitizer {
unsigned pid_t_sz = sizeof(pid_t);
unsigned timeval_sz = sizeof(timeval);
unsigned uid_t_sz = sizeof(uid_t);
+ unsigned gid_t_sz = sizeof(gid_t);
unsigned mbstate_t_sz = sizeof(mbstate_t);
unsigned sigset_t_sz = sizeof(sigset_t);
unsigned struct_timezone_sz = sizeof(struct timezone);
@@ -147,33 +196,40 @@ namespace __sanitizer {
#endif // SANITIZER_MAC && !SANITIZER_IOS
#if !SANITIZER_ANDROID
+ unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
unsigned ucontext_t_sz = sizeof(ucontext_t);
#endif // !SANITIZER_ANDROID
#if SANITIZER_LINUX
- unsigned struct_rlimit_sz = sizeof(struct rlimit);
unsigned struct_epoll_event_sz = sizeof(struct epoll_event);
unsigned struct_sysinfo_sz = sizeof(struct sysinfo);
- unsigned struct_timespec_sz = sizeof(struct timespec);
unsigned __user_cap_header_struct_sz =
sizeof(struct __user_cap_header_struct);
unsigned __user_cap_data_struct_sz = sizeof(struct __user_cap_data_struct);
- unsigned struct_utimbuf_sz = sizeof(struct utimbuf);
unsigned struct_new_utsname_sz = sizeof(struct new_utsname);
unsigned struct_old_utsname_sz = sizeof(struct old_utsname);
unsigned struct_oldold_utsname_sz = sizeof(struct oldold_utsname);
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+ unsigned struct_rlimit_sz = sizeof(struct rlimit);
+ unsigned struct_timespec_sz = sizeof(struct timespec);
+ unsigned struct_utimbuf_sz = sizeof(struct utimbuf);
unsigned struct_itimerspec_sz = sizeof(struct itimerspec);
- unsigned struct_ustat_sz = sizeof(struct ustat);
-#endif // SANITIZER_LINUX
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD
#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ unsigned struct_ustat_sz = sizeof(struct ustat);
unsigned struct_rlimit64_sz = sizeof(struct rlimit64);
+ unsigned struct_statvfs64_sz = sizeof(struct statvfs64);
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
unsigned struct_timex_sz = sizeof(struct timex);
unsigned struct_msqid_ds_sz = sizeof(struct msqid_ds);
unsigned struct_mq_attr_sz = sizeof(struct mq_attr);
unsigned struct_statvfs_sz = sizeof(struct statvfs);
- unsigned struct_statvfs64_sz = sizeof(struct statvfs64);
-#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
uptr sig_ign = (uptr)SIG_IGN;
uptr sig_dfl = (uptr)SIG_DFL;
@@ -184,15 +240,17 @@ namespace __sanitizer {
#endif
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
unsigned struct_shminfo_sz = sizeof(struct shminfo);
unsigned struct_shm_info_sz = sizeof(struct shm_info);
int shmctl_ipc_stat = (int)IPC_STAT;
int shmctl_ipc_info = (int)IPC_INFO;
int shmctl_shm_info = (int)SHM_INFO;
- int shmctl_shm_stat = (int)SHM_INFO;
+ int shmctl_shm_stat = (int)SHM_STAT;
#endif
+ int map_fixed = MAP_FIXED;
+
int af_inet = (int)AF_INET;
int af_inet6 = (int)AF_INET6;
@@ -205,13 +263,13 @@ namespace __sanitizer {
return 0;
}
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
int glob_nomatch = GLOB_NOMATCH;
int glob_altdirfunc = GLOB_ALTDIRFUNC;
#endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID && \
- (defined(__i386) || defined (__x86_64)) // NOLINT
+ (defined(__i386) || defined(__x86_64))
unsigned struct_user_regs_struct_sz = sizeof(struct user_regs_struct);
unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpregs_struct);
#ifdef __x86_64
@@ -229,15 +287,21 @@ namespace __sanitizer {
int ptrace_setfpregs = PTRACE_SETFPREGS;
int ptrace_getfpxregs = PTRACE_GETFPXREGS;
int ptrace_setfpxregs = PTRACE_SETFPXREGS;
+#if (defined(PTRACE_GETSIGINFO) && defined(PTRACE_SETSIGINFO)) || \
+ (defined(PT_GETSIGINFO) && defined(PT_SETSIGINFO))
int ptrace_getsiginfo = PTRACE_GETSIGINFO;
int ptrace_setsiginfo = PTRACE_SETSIGINFO;
+#else
+ int ptrace_getsiginfo = -1;
+ int ptrace_setsiginfo = -1;
+#endif // PTRACE_GETSIGINFO/PTRACE_SETSIGINFO
#if defined(PTRACE_GETREGSET) && defined(PTRACE_SETREGSET)
int ptrace_getregset = PTRACE_GETREGSET;
int ptrace_setregset = PTRACE_SETREGSET;
#else
int ptrace_getregset = -1;
int ptrace_setregset = -1;
-#endif
+#endif // PTRACE_GETREGSET/PTRACE_SETREGSET
#endif
unsigned path_max = PATH_MAX;
@@ -257,15 +321,6 @@ namespace __sanitizer {
unsigned struct_cdrom_tocentry_sz = sizeof(struct cdrom_tocentry);
unsigned struct_cdrom_tochdr_sz = sizeof(struct cdrom_tochdr);
unsigned struct_cdrom_volctrl_sz = sizeof(struct cdrom_volctrl);
-#if SOUND_VERSION >= 0x040000
- unsigned struct_copr_buffer_sz = 0;
- unsigned struct_copr_debug_buf_sz = 0;
- unsigned struct_copr_msg_sz = 0;
-#else
- unsigned struct_copr_buffer_sz = sizeof(struct copr_buffer);
- unsigned struct_copr_debug_buf_sz = sizeof(struct copr_debug_buf);
- unsigned struct_copr_msg_sz = sizeof(struct copr_msg);
-#endif
unsigned struct_ff_effect_sz = sizeof(struct ff_effect);
unsigned struct_floppy_drive_params_sz = sizeof(struct floppy_drive_params);
unsigned struct_floppy_drive_struct_sz = sizeof(struct floppy_drive_struct);
@@ -279,23 +334,34 @@ namespace __sanitizer {
unsigned struct_hd_geometry_sz = sizeof(struct hd_geometry);
unsigned struct_input_absinfo_sz = sizeof(struct input_absinfo);
unsigned struct_input_id_sz = sizeof(struct input_id);
+ unsigned struct_mtpos_sz = sizeof(struct mtpos);
+ unsigned struct_termio_sz = sizeof(struct termio);
+ unsigned struct_vt_consize_sz = sizeof(struct vt_consize);
+ unsigned struct_vt_sizes_sz = sizeof(struct vt_sizes);
+ unsigned struct_vt_stat_sz = sizeof(struct vt_stat);
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+#if SOUND_VERSION >= 0x040000
+ unsigned struct_copr_buffer_sz = 0;
+ unsigned struct_copr_debug_buf_sz = 0;
+ unsigned struct_copr_msg_sz = 0;
+#else
+ unsigned struct_copr_buffer_sz = sizeof(struct copr_buffer);
+ unsigned struct_copr_debug_buf_sz = sizeof(struct copr_debug_buf);
+ unsigned struct_copr_msg_sz = sizeof(struct copr_msg);
+#endif
unsigned struct_midi_info_sz = sizeof(struct midi_info);
unsigned struct_mtget_sz = sizeof(struct mtget);
unsigned struct_mtop_sz = sizeof(struct mtop);
- unsigned struct_mtpos_sz = sizeof(struct mtpos);
unsigned struct_rtentry_sz = sizeof(struct rtentry);
unsigned struct_sbi_instrument_sz = sizeof(struct sbi_instrument);
unsigned struct_seq_event_rec_sz = sizeof(struct seq_event_rec);
unsigned struct_synth_info_sz = sizeof(struct synth_info);
- unsigned struct_termio_sz = sizeof(struct termio);
- unsigned struct_vt_consize_sz = sizeof(struct vt_consize);
unsigned struct_vt_mode_sz = sizeof(struct vt_mode);
- unsigned struct_vt_sizes_sz = sizeof(struct vt_sizes);
- unsigned struct_vt_stat_sz = sizeof(struct vt_stat);
-#endif
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD
#if SANITIZER_LINUX && !SANITIZER_ANDROID
- unsigned struct_audio_buf_info_sz = sizeof(struct audio_buf_info);
unsigned struct_ax25_parms_struct_sz = sizeof(struct ax25_parms_struct);
unsigned struct_cyclades_monitor_sz = sizeof(struct cyclades_monitor);
#if EV_VERSION > (0x010000)
@@ -310,7 +376,6 @@ namespace __sanitizer {
unsigned struct_kbsentry_sz = sizeof(struct kbsentry);
unsigned struct_mtconfiginfo_sz = sizeof(struct mtconfiginfo);
unsigned struct_nr_parms_struct_sz = sizeof(struct nr_parms_struct);
- unsigned struct_ppp_stats_sz = sizeof(struct ppp_stats);
unsigned struct_scc_modem_sz = sizeof(struct scc_modem);
unsigned struct_scc_stat_sz = sizeof(struct scc_stat);
unsigned struct_serial_multiport_struct_sz
@@ -319,7 +384,12 @@ namespace __sanitizer {
unsigned struct_sockaddr_ax25_sz = sizeof(struct sockaddr_ax25);
unsigned struct_unimapdesc_sz = sizeof(struct unimapdesc);
unsigned struct_unimapinit_sz = sizeof(struct unimapinit);
-#endif
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+ unsigned struct_audio_buf_info_sz = sizeof(struct audio_buf_info);
+ unsigned struct_ppp_stats_sz = sizeof(struct ppp_stats);
+#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
#if !SANITIZER_ANDROID && !SANITIZER_MAC
unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req);
@@ -372,10 +442,11 @@ namespace __sanitizer {
unsigned IOCTL_TIOCSPGRP = TIOCSPGRP;
unsigned IOCTL_TIOCSTI = TIOCSTI;
unsigned IOCTL_TIOCSWINSZ = TIOCSWINSZ;
-#if (SANITIZER_LINUX && !SANITIZER_ANDROID)
+#if ((SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID)
unsigned IOCTL_SIOCGETSGCNT = SIOCGETSGCNT;
unsigned IOCTL_SIOCGETVIFCNT = SIOCGETVIFCNT;
#endif
+
#if SANITIZER_LINUX
unsigned IOCTL_EVIOCGABS = EVIOCGABS(0);
unsigned IOCTL_EVIOCGBIT = EVIOCGBIT(0, 0);
@@ -466,9 +537,7 @@ namespace __sanitizer {
unsigned IOCTL_HDIO_SET_MULTCOUNT = HDIO_SET_MULTCOUNT;
unsigned IOCTL_HDIO_SET_NOWERR = HDIO_SET_NOWERR;
unsigned IOCTL_HDIO_SET_UNMASKINTR = HDIO_SET_UNMASKINTR;
- unsigned IOCTL_MTIOCGET = MTIOCGET;
unsigned IOCTL_MTIOCPOS = MTIOCPOS;
- unsigned IOCTL_MTIOCTOP = MTIOCTOP;
unsigned IOCTL_PPPIOCGASYNCMAP = PPPIOCGASYNCMAP;
unsigned IOCTL_PPPIOCGDEBUG = PPPIOCGDEBUG;
unsigned IOCTL_PPPIOCGFLAGS = PPPIOCGFLAGS;
@@ -480,9 +549,7 @@ namespace __sanitizer {
unsigned IOCTL_PPPIOCSMAXCID = PPPIOCSMAXCID;
unsigned IOCTL_PPPIOCSMRU = PPPIOCSMRU;
unsigned IOCTL_PPPIOCSXASYNCMAP = PPPIOCSXASYNCMAP;
- unsigned IOCTL_SIOCADDRT = SIOCADDRT;
unsigned IOCTL_SIOCDARP = SIOCDARP;
- unsigned IOCTL_SIOCDELRT = SIOCDELRT;
unsigned IOCTL_SIOCDRARP = SIOCDRARP;
unsigned IOCTL_SIOCGARP = SIOCGARP;
unsigned IOCTL_SIOCGIFENCAP = SIOCGIFENCAP;
@@ -501,7 +568,7 @@ namespace __sanitizer {
unsigned IOCTL_SIOCSIFMEM = SIOCSIFMEM;
unsigned IOCTL_SIOCSIFSLAVE = SIOCSIFSLAVE;
unsigned IOCTL_SIOCSRARP = SIOCSRARP;
-#if SOUND_VERSION >= 0x040000
+# if SOUND_VERSION >= 0x040000
unsigned IOCTL_SNDCTL_COPR_HALT = IOCTL_NOT_PRESENT;
unsigned IOCTL_SNDCTL_COPR_LOAD = IOCTL_NOT_PRESENT;
unsigned IOCTL_SNDCTL_COPR_RCODE = IOCTL_NOT_PRESENT;
@@ -518,7 +585,7 @@ namespace __sanitizer {
unsigned IOCTL_SOUND_PCM_READ_RATE = IOCTL_NOT_PRESENT;
unsigned IOCTL_SOUND_PCM_WRITE_CHANNELS = IOCTL_NOT_PRESENT;
unsigned IOCTL_SOUND_PCM_WRITE_FILTER = IOCTL_NOT_PRESENT;
-#else
+# else // SOUND_VERSION
unsigned IOCTL_SNDCTL_COPR_HALT = SNDCTL_COPR_HALT;
unsigned IOCTL_SNDCTL_COPR_LOAD = SNDCTL_COPR_LOAD;
unsigned IOCTL_SNDCTL_COPR_RCODE = SNDCTL_COPR_RCODE;
@@ -535,7 +602,41 @@ namespace __sanitizer {
unsigned IOCTL_SOUND_PCM_READ_RATE = SOUND_PCM_READ_RATE;
unsigned IOCTL_SOUND_PCM_WRITE_CHANNELS = SOUND_PCM_WRITE_CHANNELS;
unsigned IOCTL_SOUND_PCM_WRITE_FILTER = SOUND_PCM_WRITE_FILTER;
-#endif
+#endif // SOUND_VERSION
+ unsigned IOCTL_TCFLSH = TCFLSH;
+ unsigned IOCTL_TCGETA = TCGETA;
+ unsigned IOCTL_TCGETS = TCGETS;
+ unsigned IOCTL_TCSBRK = TCSBRK;
+ unsigned IOCTL_TCSBRKP = TCSBRKP;
+ unsigned IOCTL_TCSETA = TCSETA;
+ unsigned IOCTL_TCSETAF = TCSETAF;
+ unsigned IOCTL_TCSETAW = TCSETAW;
+ unsigned IOCTL_TCSETS = TCSETS;
+ unsigned IOCTL_TCSETSF = TCSETSF;
+ unsigned IOCTL_TCSETSW = TCSETSW;
+ unsigned IOCTL_TCXONC = TCXONC;
+ unsigned IOCTL_TIOCGLCKTRMIOS = TIOCGLCKTRMIOS;
+ unsigned IOCTL_TIOCGSOFTCAR = TIOCGSOFTCAR;
+ unsigned IOCTL_TIOCINQ = TIOCINQ;
+ unsigned IOCTL_TIOCLINUX = TIOCLINUX;
+ unsigned IOCTL_TIOCSERCONFIG = TIOCSERCONFIG;
+ unsigned IOCTL_TIOCSERGETLSR = TIOCSERGETLSR;
+ unsigned IOCTL_TIOCSERGWILD = TIOCSERGWILD;
+ unsigned IOCTL_TIOCSERSWILD = TIOCSERSWILD;
+ unsigned IOCTL_TIOCSLCKTRMIOS = TIOCSLCKTRMIOS;
+ unsigned IOCTL_TIOCSSOFTCAR = TIOCSSOFTCAR;
+ unsigned IOCTL_VT_DISALLOCATE = VT_DISALLOCATE;
+ unsigned IOCTL_VT_GETSTATE = VT_GETSTATE;
+ unsigned IOCTL_VT_RESIZE = VT_RESIZE;
+ unsigned IOCTL_VT_RESIZEX = VT_RESIZEX;
+ unsigned IOCTL_VT_SENDSIG = VT_SENDSIG;
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+ unsigned IOCTL_MTIOCGET = MTIOCGET;
+ unsigned IOCTL_MTIOCTOP = MTIOCTOP;
+ unsigned IOCTL_SIOCADDRT = SIOCADDRT;
+ unsigned IOCTL_SIOCDELRT = SIOCDELRT;
unsigned IOCTL_SNDCTL_DSP_GETBLKSIZE = SNDCTL_DSP_GETBLKSIZE;
unsigned IOCTL_SNDCTL_DSP_GETFMTS = SNDCTL_DSP_GETFMTS;
unsigned IOCTL_SNDCTL_DSP_NONBLOCK = SNDCTL_DSP_NONBLOCK;
@@ -620,40 +721,14 @@ namespace __sanitizer {
unsigned IOCTL_SOUND_MIXER_WRITE_SYNTH = SOUND_MIXER_WRITE_SYNTH;
unsigned IOCTL_SOUND_MIXER_WRITE_TREBLE = SOUND_MIXER_WRITE_TREBLE;
unsigned IOCTL_SOUND_MIXER_WRITE_VOLUME = SOUND_MIXER_WRITE_VOLUME;
- unsigned IOCTL_TCFLSH = TCFLSH;
- unsigned IOCTL_TCGETA = TCGETA;
- unsigned IOCTL_TCGETS = TCGETS;
- unsigned IOCTL_TCSBRK = TCSBRK;
- unsigned IOCTL_TCSBRKP = TCSBRKP;
- unsigned IOCTL_TCSETA = TCSETA;
- unsigned IOCTL_TCSETAF = TCSETAF;
- unsigned IOCTL_TCSETAW = TCSETAW;
- unsigned IOCTL_TCSETS = TCSETS;
- unsigned IOCTL_TCSETSF = TCSETSF;
- unsigned IOCTL_TCSETSW = TCSETSW;
- unsigned IOCTL_TCXONC = TCXONC;
- unsigned IOCTL_TIOCGLCKTRMIOS = TIOCGLCKTRMIOS;
- unsigned IOCTL_TIOCGSOFTCAR = TIOCGSOFTCAR;
- unsigned IOCTL_TIOCINQ = TIOCINQ;
- unsigned IOCTL_TIOCLINUX = TIOCLINUX;
- unsigned IOCTL_TIOCSERCONFIG = TIOCSERCONFIG;
- unsigned IOCTL_TIOCSERGETLSR = TIOCSERGETLSR;
- unsigned IOCTL_TIOCSERGWILD = TIOCSERGWILD;
- unsigned IOCTL_TIOCSERSWILD = TIOCSERSWILD;
- unsigned IOCTL_TIOCSLCKTRMIOS = TIOCSLCKTRMIOS;
- unsigned IOCTL_TIOCSSOFTCAR = TIOCSSOFTCAR;
unsigned IOCTL_VT_ACTIVATE = VT_ACTIVATE;
- unsigned IOCTL_VT_DISALLOCATE = VT_DISALLOCATE;
unsigned IOCTL_VT_GETMODE = VT_GETMODE;
- unsigned IOCTL_VT_GETSTATE = VT_GETSTATE;
unsigned IOCTL_VT_OPENQRY = VT_OPENQRY;
unsigned IOCTL_VT_RELDISP = VT_RELDISP;
- unsigned IOCTL_VT_RESIZE = VT_RESIZE;
- unsigned IOCTL_VT_RESIZEX = VT_RESIZEX;
- unsigned IOCTL_VT_SENDSIG = VT_SENDSIG;
unsigned IOCTL_VT_SETMODE = VT_SETMODE;
unsigned IOCTL_VT_WAITACTIVE = VT_WAITACTIVE;
-#endif
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD
+
#if SANITIZER_LINUX && !SANITIZER_ANDROID
unsigned IOCTL_CYGETDEFTHRESH = CYGETDEFTHRESH;
unsigned IOCTL_CYGETDEFTIMEOUT = CYGETDEFTIMEOUT;
@@ -685,37 +760,25 @@ namespace __sanitizer {
unsigned IOCTL_FS_IOC_SETVERSION = FS_IOC_SETVERSION;
unsigned IOCTL_GIO_CMAP = GIO_CMAP;
unsigned IOCTL_GIO_FONT = GIO_FONT;
- unsigned IOCTL_GIO_SCRNMAP = GIO_SCRNMAP;
unsigned IOCTL_GIO_UNIMAP = GIO_UNIMAP;
unsigned IOCTL_GIO_UNISCRNMAP = GIO_UNISCRNMAP;
unsigned IOCTL_KDADDIO = KDADDIO;
unsigned IOCTL_KDDELIO = KDDELIO;
- unsigned IOCTL_KDDISABIO = KDDISABIO;
- unsigned IOCTL_KDENABIO = KDENABIO;
unsigned IOCTL_KDGETKEYCODE = KDGETKEYCODE;
- unsigned IOCTL_KDGETLED = KDGETLED;
- unsigned IOCTL_KDGETMODE = KDGETMODE;
unsigned IOCTL_KDGKBDIACR = KDGKBDIACR;
unsigned IOCTL_KDGKBENT = KDGKBENT;
unsigned IOCTL_KDGKBLED = KDGKBLED;
unsigned IOCTL_KDGKBMETA = KDGKBMETA;
- unsigned IOCTL_KDGKBMODE = KDGKBMODE;
unsigned IOCTL_KDGKBSENT = KDGKBSENT;
- unsigned IOCTL_KDGKBTYPE = KDGKBTYPE;
unsigned IOCTL_KDMAPDISP = KDMAPDISP;
- unsigned IOCTL_KDMKTONE = KDMKTONE;
unsigned IOCTL_KDSETKEYCODE = KDSETKEYCODE;
- unsigned IOCTL_KDSETLED = KDSETLED;
- unsigned IOCTL_KDSETMODE = KDSETMODE;
unsigned IOCTL_KDSIGACCEPT = KDSIGACCEPT;
unsigned IOCTL_KDSKBDIACR = KDSKBDIACR;
unsigned IOCTL_KDSKBENT = KDSKBENT;
unsigned IOCTL_KDSKBLED = KDSKBLED;
unsigned IOCTL_KDSKBMETA = KDSKBMETA;
- unsigned IOCTL_KDSKBMODE = KDSKBMODE;
unsigned IOCTL_KDSKBSENT = KDSKBSENT;
unsigned IOCTL_KDUNMAPDISP = KDUNMAPDISP;
- unsigned IOCTL_KIOCSOUND = KIOCSOUND;
unsigned IOCTL_LPABORT = LPABORT;
unsigned IOCTL_LPABORTOPEN = LPABORTOPEN;
unsigned IOCTL_LPCAREFUL = LPCAREFUL;
@@ -730,7 +793,6 @@ namespace __sanitizer {
unsigned IOCTL_MTIOCSETCONFIG = MTIOCSETCONFIG;
unsigned IOCTL_PIO_CMAP = PIO_CMAP;
unsigned IOCTL_PIO_FONT = PIO_FONT;
- unsigned IOCTL_PIO_SCRNMAP = PIO_SCRNMAP;
unsigned IOCTL_PIO_UNIMAP = PIO_UNIMAP;
unsigned IOCTL_PIO_UNIMAPCLR = PIO_UNIMAPCLR;
unsigned IOCTL_PIO_UNISCRNMAP = PIO_UNISCRNMAP;
@@ -752,20 +814,40 @@ namespace __sanitizer {
unsigned IOCTL_SIOCNRGETPARMS = SIOCNRGETPARMS;
unsigned IOCTL_SIOCNRRTCTL = SIOCNRRTCTL;
unsigned IOCTL_SIOCNRSETPARMS = SIOCNRSETPARMS;
- unsigned IOCTL_SNDCTL_DSP_GETISPACE = SNDCTL_DSP_GETISPACE;
- unsigned IOCTL_SNDCTL_DSP_GETOSPACE = SNDCTL_DSP_GETOSPACE;
unsigned IOCTL_TIOCGSERIAL = TIOCGSERIAL;
unsigned IOCTL_TIOCSERGETMULTI = TIOCSERGETMULTI;
unsigned IOCTL_TIOCSERSETMULTI = TIOCSERSETMULTI;
unsigned IOCTL_TIOCSSERIAL = TIOCSSERIAL;
-#endif
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+ unsigned IOCTL_GIO_SCRNMAP = GIO_SCRNMAP;
+ unsigned IOCTL_KDDISABIO = KDDISABIO;
+ unsigned IOCTL_KDENABIO = KDENABIO;
+ unsigned IOCTL_KDGETLED = KDGETLED;
+ unsigned IOCTL_KDGETMODE = KDGETMODE;
+ unsigned IOCTL_KDGKBMODE = KDGKBMODE;
+ unsigned IOCTL_KDGKBTYPE = KDGKBTYPE;
+ unsigned IOCTL_KDMKTONE = KDMKTONE;
+ unsigned IOCTL_KDSETLED = KDSETLED;
+ unsigned IOCTL_KDSETMODE = KDSETMODE;
+ unsigned IOCTL_KDSKBMODE = KDSKBMODE;
+ unsigned IOCTL_KIOCSOUND = KIOCSOUND;
+ unsigned IOCTL_PIO_SCRNMAP = PIO_SCRNMAP;
+ unsigned IOCTL_SNDCTL_DSP_GETISPACE = SNDCTL_DSP_GETISPACE;
+ unsigned IOCTL_SNDCTL_DSP_GETOSPACE = SNDCTL_DSP_GETOSPACE;
+#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+
+ const int errno_EINVAL = EINVAL;
// EOWNERDEAD is not present in some older platforms.
#if defined(EOWNERDEAD)
- extern const int errno_EOWNERDEAD = EOWNERDEAD;
+ const int errno_EOWNERDEAD = EOWNERDEAD;
#else
- extern const int errno_EOWNERDEAD = -1;
+ const int errno_EOWNERDEAD = -1;
#endif
+
+ const int si_SEGV_MAPERR = SEGV_MAPERR;
+ const int si_SEGV_ACCERR = SEGV_ACCERR;
} // namespace __sanitizer
COMPILER_CHECK(sizeof(__sanitizer_pthread_attr_t) >= sizeof(pthread_attr_t));
@@ -774,6 +856,31 @@ COMPILER_CHECK(sizeof(socklen_t) == sizeof(unsigned));
CHECK_TYPE_SIZE(pthread_key_t);
#if SANITIZER_LINUX
+// FIXME: We define those on Linux and Mac, but only check on Linux.
+COMPILER_CHECK(IOC_NRBITS == _IOC_NRBITS);
+COMPILER_CHECK(IOC_TYPEBITS == _IOC_TYPEBITS);
+COMPILER_CHECK(IOC_SIZEBITS == _IOC_SIZEBITS);
+COMPILER_CHECK(IOC_DIRBITS == _IOC_DIRBITS);
+COMPILER_CHECK(IOC_NRMASK == _IOC_NRMASK);
+COMPILER_CHECK(IOC_TYPEMASK == _IOC_TYPEMASK);
+COMPILER_CHECK(IOC_SIZEMASK == _IOC_SIZEMASK);
+COMPILER_CHECK(IOC_DIRMASK == _IOC_DIRMASK);
+COMPILER_CHECK(IOC_NRSHIFT == _IOC_NRSHIFT);
+COMPILER_CHECK(IOC_TYPESHIFT == _IOC_TYPESHIFT);
+COMPILER_CHECK(IOC_SIZESHIFT == _IOC_SIZESHIFT);
+COMPILER_CHECK(IOC_DIRSHIFT == _IOC_DIRSHIFT);
+COMPILER_CHECK(IOC_NONE == _IOC_NONE);
+COMPILER_CHECK(IOC_WRITE == _IOC_WRITE);
+COMPILER_CHECK(IOC_READ == _IOC_READ);
+COMPILER_CHECK(EVIOC_ABS_MAX == ABS_MAX);
+COMPILER_CHECK(EVIOC_EV_MAX == EV_MAX);
+COMPILER_CHECK(IOC_SIZE(0x12345678) == _IOC_SIZE(0x12345678));
+COMPILER_CHECK(IOC_DIR(0x12345678) == _IOC_DIR(0x12345678));
+COMPILER_CHECK(IOC_NR(0x12345678) == _IOC_NR(0x12345678));
+COMPILER_CHECK(IOC_TYPE(0x12345678) == _IOC_TYPE(0x12345678));
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
// There are more undocumented fields in dl_phdr_info that we are not interested
// in.
COMPILER_CHECK(sizeof(__sanitizer_dl_phdr_info) <= sizeof(dl_phdr_info));
@@ -781,11 +888,9 @@ CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_addr);
CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_name);
CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phdr);
CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phnum);
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD
-COMPILER_CHECK(IOC_SIZE(0x12345678) == _IOC_SIZE(0x12345678));
-#endif
-
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
CHECK_TYPE_SIZE(glob_t);
CHECK_SIZE_AND_OFFSET(glob_t, gl_pathc);
CHECK_SIZE_AND_OFFSET(glob_t, gl_pathv);
@@ -837,6 +942,8 @@ COMPILER_CHECK(sizeof(__sanitizer_dirent) <= sizeof(dirent));
CHECK_SIZE_AND_OFFSET(dirent, d_ino);
#if SANITIZER_MAC
CHECK_SIZE_AND_OFFSET(dirent, d_seekoff);
+#elif SANITIZER_FREEBSD
+// There is no 'd_off' field on FreeBSD.
#else
CHECK_SIZE_AND_OFFSET(dirent, d_off);
#endif
@@ -921,15 +1028,20 @@ CHECK_SIZE_AND_OFFSET(mntent, mnt_passno);
CHECK_TYPE_SIZE(ether_addr);
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
CHECK_TYPE_SIZE(ipc_perm);
+# if SANITIZER_FREEBSD
+CHECK_SIZE_AND_OFFSET(ipc_perm, key);
+CHECK_SIZE_AND_OFFSET(ipc_perm, seq);
+# else
CHECK_SIZE_AND_OFFSET(ipc_perm, __key);
+CHECK_SIZE_AND_OFFSET(ipc_perm, __seq);
+# endif
CHECK_SIZE_AND_OFFSET(ipc_perm, uid);
CHECK_SIZE_AND_OFFSET(ipc_perm, gid);
CHECK_SIZE_AND_OFFSET(ipc_perm, cuid);
CHECK_SIZE_AND_OFFSET(ipc_perm, cgid);
CHECK_SIZE_AND_OFFSET(ipc_perm, mode);
-CHECK_SIZE_AND_OFFSET(ipc_perm, __seq);
CHECK_TYPE_SIZE(shmid_ds);
CHECK_SIZE_AND_OFFSET(shmid_ds, shm_perm);
@@ -944,4 +1056,108 @@ CHECK_SIZE_AND_OFFSET(shmid_ds, shm_nattch);
CHECK_TYPE_SIZE(clock_t);
-#endif // SANITIZER_LINUX || SANITIZER_MAC
+#if !SANITIZER_ANDROID
+CHECK_TYPE_SIZE(ifaddrs);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_next);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_name);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_addr);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_netmask);
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+// Compare against the union, because we can't reach into the union in a
+// compliant way.
+#ifdef ifa_dstaddr
+#undef ifa_dstaddr
+#endif
+# if SANITIZER_FREEBSD
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_dstaddr);
+# else
+COMPILER_CHECK(sizeof(((__sanitizer_ifaddrs *)NULL)->ifa_dstaddr) ==
+ sizeof(((ifaddrs *)NULL)->ifa_ifu));
+COMPILER_CHECK(offsetof(__sanitizer_ifaddrs, ifa_dstaddr) ==
+ offsetof(ifaddrs, ifa_ifu));
+# endif // SANITIZER_FREEBSD
+#else
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_dstaddr);
+#endif // SANITIZER_LINUX
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_data);
+#endif
+
+#if SANITIZER_LINUX
+COMPILER_CHECK(sizeof(__sanitizer_mallinfo) == sizeof(struct mallinfo));
+#endif
+
+CHECK_TYPE_SIZE(timeb);
+CHECK_SIZE_AND_OFFSET(timeb, time);
+CHECK_SIZE_AND_OFFSET(timeb, millitm);
+CHECK_SIZE_AND_OFFSET(timeb, timezone);
+CHECK_SIZE_AND_OFFSET(timeb, dstflag);
+
+CHECK_TYPE_SIZE(passwd);
+CHECK_SIZE_AND_OFFSET(passwd, pw_name);
+CHECK_SIZE_AND_OFFSET(passwd, pw_passwd);
+CHECK_SIZE_AND_OFFSET(passwd, pw_uid);
+CHECK_SIZE_AND_OFFSET(passwd, pw_gid);
+CHECK_SIZE_AND_OFFSET(passwd, pw_dir);
+CHECK_SIZE_AND_OFFSET(passwd, pw_shell);
+
+#if !SANITIZER_ANDROID
+CHECK_SIZE_AND_OFFSET(passwd, pw_gecos);
+#endif
+
+#if SANITIZER_MAC
+CHECK_SIZE_AND_OFFSET(passwd, pw_change);
+CHECK_SIZE_AND_OFFSET(passwd, pw_expire);
+CHECK_SIZE_AND_OFFSET(passwd, pw_class);
+#endif
+
+
+CHECK_TYPE_SIZE(group);
+CHECK_SIZE_AND_OFFSET(group, gr_name);
+CHECK_SIZE_AND_OFFSET(group, gr_passwd);
+CHECK_SIZE_AND_OFFSET(group, gr_gid);
+CHECK_SIZE_AND_OFFSET(group, gr_mem);
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+CHECK_TYPE_SIZE(XDR);
+CHECK_SIZE_AND_OFFSET(XDR, x_op);
+CHECK_SIZE_AND_OFFSET(XDR, x_ops);
+CHECK_SIZE_AND_OFFSET(XDR, x_public);
+CHECK_SIZE_AND_OFFSET(XDR, x_private);
+CHECK_SIZE_AND_OFFSET(XDR, x_base);
+CHECK_SIZE_AND_OFFSET(XDR, x_handy);
+COMPILER_CHECK(__sanitizer_XDR_ENCODE == XDR_ENCODE);
+COMPILER_CHECK(__sanitizer_XDR_DECODE == XDR_DECODE);
+COMPILER_CHECK(__sanitizer_XDR_FREE == XDR_FREE);
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+COMPILER_CHECK(sizeof(__sanitizer_FILE) <= sizeof(FILE));
+CHECK_SIZE_AND_OFFSET(FILE, _flags);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_read_ptr);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_read_end);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_read_base);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_write_ptr);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_write_end);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_write_base);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_buf_base);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_buf_end);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_save_base);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_backup_base);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_save_end);
+CHECK_SIZE_AND_OFFSET(FILE, _markers);
+CHECK_SIZE_AND_OFFSET(FILE, _chain);
+CHECK_SIZE_AND_OFFSET(FILE, _fileno);
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+COMPILER_CHECK(sizeof(__sanitizer__obstack_chunk) <= sizeof(_obstack_chunk));
+CHECK_SIZE_AND_OFFSET(_obstack_chunk, limit);
+CHECK_SIZE_AND_OFFSET(_obstack_chunk, prev);
+CHECK_TYPE_SIZE(obstack);
+CHECK_SIZE_AND_OFFSET(obstack, chunk_size);
+CHECK_SIZE_AND_OFFSET(obstack, chunk);
+CHECK_SIZE_AND_OFFSET(obstack, object_base);
+CHECK_SIZE_AND_OFFSET(obstack, next_free);
+#endif
+
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_MAC
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
index be6e6cf1c3f6..dece2d3cbdd2 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
+++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
@@ -19,12 +19,10 @@
namespace __sanitizer {
extern unsigned struct_utsname_sz;
extern unsigned struct_stat_sz;
-#if !SANITIZER_IOS
+#if !SANITIZER_FREEBSD && !SANITIZER_IOS
extern unsigned struct_stat64_sz;
#endif
extern unsigned struct_rusage_sz;
- extern unsigned struct_passwd_sz;
- extern unsigned struct_group_sz;
extern unsigned siginfo_t_sz;
extern unsigned struct_itimerval_sz;
extern unsigned pthread_t_sz;
@@ -32,6 +30,7 @@ namespace __sanitizer {
extern unsigned pid_t_sz;
extern unsigned timeval_sz;
extern unsigned uid_t_sz;
+ extern unsigned gid_t_sz;
extern unsigned mbstate_t_sz;
extern unsigned struct_timezone_sz;
extern unsigned struct_tms_sz;
@@ -40,6 +39,7 @@ namespace __sanitizer {
extern unsigned struct_sched_param_sz;
extern unsigned struct_statfs_sz;
extern unsigned struct_statfs64_sz;
+ extern unsigned struct_sockaddr_sz;
#if !SANITIZER_ANDROID
extern unsigned ucontext_t_sz;
@@ -48,23 +48,21 @@ namespace __sanitizer {
#if SANITIZER_LINUX
#if defined(__x86_64__)
- const unsigned struct___old_kernel_stat_sz = 32;
const unsigned struct_kernel_stat_sz = 144;
const unsigned struct_kernel_stat64_sz = 0;
#elif defined(__i386__)
- const unsigned struct___old_kernel_stat_sz = 32;
const unsigned struct_kernel_stat_sz = 64;
const unsigned struct_kernel_stat64_sz = 96;
#elif defined(__arm__)
- const unsigned struct___old_kernel_stat_sz = 32;
const unsigned struct_kernel_stat_sz = 64;
const unsigned struct_kernel_stat64_sz = 104;
+#elif defined(__aarch64__)
+ const unsigned struct_kernel_stat_sz = 128;
+ const unsigned struct_kernel_stat64_sz = 104;
#elif defined(__powerpc__) && !defined(__powerpc64__)
- const unsigned struct___old_kernel_stat_sz = 32;
const unsigned struct_kernel_stat_sz = 72;
const unsigned struct_kernel_stat64_sz = 104;
#elif defined(__powerpc64__)
- const unsigned struct___old_kernel_stat_sz = 0;
const unsigned struct_kernel_stat_sz = 144;
const unsigned struct_kernel_stat64_sz = 104;
#endif
@@ -74,23 +72,28 @@ namespace __sanitizer {
// More fields that vary with the kernel version.
};
- extern unsigned struct_utimbuf_sz;
+ extern unsigned struct_epoll_event_sz;
+ extern unsigned struct_sysinfo_sz;
+ extern unsigned __user_cap_header_struct_sz;
+ extern unsigned __user_cap_data_struct_sz;
extern unsigned struct_new_utsname_sz;
extern unsigned struct_old_utsname_sz;
extern unsigned struct_oldold_utsname_sz;
- extern unsigned struct_msqid_ds_sz;
- extern unsigned struct_mq_attr_sz;
- extern unsigned struct_timex_sz;
- extern unsigned struct_ustat_sz;
+
+ const unsigned struct_kexec_segment_sz = 4 * sizeof(unsigned long);
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+
+#if defined(__powerpc64__)
+ const unsigned struct___old_kernel_stat_sz = 0;
+#else
+ const unsigned struct___old_kernel_stat_sz = 32;
+#endif
extern unsigned struct_rlimit_sz;
- extern unsigned struct_epoll_event_sz;
- extern unsigned struct_sysinfo_sz;
+ extern unsigned struct_utimbuf_sz;
extern unsigned struct_timespec_sz;
- extern unsigned __user_cap_header_struct_sz;
- extern unsigned __user_cap_data_struct_sz;
- const unsigned old_sigset_t_sz = sizeof(unsigned long);
- const unsigned struct_kexec_segment_sz = 4 * sizeof(unsigned long);
struct __sanitizer_iocb {
u64 aio_data;
@@ -127,11 +130,23 @@ namespace __sanitizer {
uptr newlen;
unsigned long ___unused[4];
};
-#endif // SANITIZER_LINUX
+
+ const unsigned old_sigset_t_sz = sizeof(unsigned long);
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD
+
+#if SANITIZER_ANDROID
+ struct __sanitizer_mallinfo {
+ uptr v[10];
+ };
+#endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ struct __sanitizer_mallinfo {
+ int v[10];
+ };
+
+ extern unsigned struct_ustat_sz;
extern unsigned struct_rlimit64_sz;
- extern unsigned struct_statvfs_sz;
extern unsigned struct_statvfs64_sz;
struct __sanitizer_ipc_perm {
@@ -200,19 +215,121 @@ namespace __sanitizer {
uptr __unused5;
#endif
};
- #endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+#elif SANITIZER_FREEBSD
+ struct __sanitizer_ipc_perm {
+ unsigned int cuid;
+ unsigned int cgid;
+ unsigned int uid;
+ unsigned int gid;
+ unsigned short mode;
+ unsigned short seq;
+ long key;
+ };
+
+ struct __sanitizer_shmid_ds {
+ __sanitizer_ipc_perm shm_perm;
+ unsigned long shm_segsz;
+ unsigned int shm_lpid;
+ unsigned int shm_cpid;
+ int shm_nattch;
+ unsigned long shm_atime;
+ unsigned long shm_dtime;
+ unsigned long shm_ctime;
+ };
+#endif
+
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+ extern unsigned struct_msqid_ds_sz;
+ extern unsigned struct_mq_attr_sz;
+ extern unsigned struct_timex_sz;
+ extern unsigned struct_statvfs_sz;
+#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
struct __sanitizer_iovec {
- void *iov_base;
+ void *iov_base;
uptr iov_len;
};
+#if !SANITIZER_ANDROID
+ struct __sanitizer_ifaddrs {
+ struct __sanitizer_ifaddrs *ifa_next;
+ char *ifa_name;
+ unsigned int ifa_flags;
+ void *ifa_addr; // (struct sockaddr *)
+ void *ifa_netmask; // (struct sockaddr *)
+ // This is a union on Linux.
+# ifdef ifa_dstaddr
+# undef ifa_dstaddr
+# endif
+ void *ifa_dstaddr; // (struct sockaddr *)
+ void *ifa_data;
+ };
+#endif // !SANITIZER_ANDROID
+
#if SANITIZER_MAC
typedef unsigned long __sanitizer_pthread_key_t;
#else
typedef unsigned __sanitizer_pthread_key_t;
#endif
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+
+ struct __sanitizer_XDR {
+ int x_op;
+ void *x_ops;
+ uptr x_public;
+ uptr x_private;
+ uptr x_base;
+ unsigned x_handy;
+ };
+
+ const int __sanitizer_XDR_ENCODE = 0;
+ const int __sanitizer_XDR_DECODE = 1;
+ const int __sanitizer_XDR_FREE = 2;
+#endif
+
+ struct __sanitizer_passwd {
+ char *pw_name;
+ char *pw_passwd;
+ int pw_uid;
+ int pw_gid;
+#if SANITIZER_MAC || SANITIZER_FREEBSD
+ long pw_change;
+ char *pw_class;
+#endif
+#if !SANITIZER_ANDROID
+ char *pw_gecos;
+#endif
+ char *pw_dir;
+ char *pw_shell;
+#if SANITIZER_MAC || SANITIZER_FREEBSD
+ long pw_expire;
+#endif
+#if SANITIZER_FREEBSD
+ int pw_fields;
+#endif
+ };
+
+ struct __sanitizer_group {
+ char *gr_name;
+ char *gr_passwd;
+ int gr_gid;
+ char **gr_mem;
+ };
+
+#if defined(__x86_64__) && !defined(_LP64)
+ typedef long long __sanitizer_time_t;
+#else
+ typedef long __sanitizer_time_t;
+#endif
+
+ struct __sanitizer_timeb {
+ __sanitizer_time_t time;
+ unsigned short millitm;
+ short timezone;
+ short dstflag;
+ };
+
struct __sanitizer_ether_addr {
u8 octet[6];
};
@@ -242,7 +359,7 @@ namespace __sanitizer {
};
#endif
-#if SANITIZER_ANDROID || SANITIZER_MAC
+#if SANITIZER_ANDROID || SANITIZER_MAC || SANITIZER_FREEBSD
struct __sanitizer_msghdr {
void *msg_name;
unsigned msg_namelen;
@@ -281,6 +398,12 @@ namespace __sanitizer {
unsigned short d_reclen;
// more fields that we don't care about
};
+#elif SANITIZER_FREEBSD
+ struct __sanitizer_dirent {
+ unsigned int d_fileno;
+ unsigned short d_reclen;
+ // more fields that we don't care about
+ };
#elif SANITIZER_ANDROID || defined(__x86_64__)
struct __sanitizer_dirent {
unsigned long long d_ino;
@@ -306,13 +429,16 @@ namespace __sanitizer {
};
#endif
-#if defined(__x86_64__) && !defined(_LP64)
+// 'clock_t' is 32 bits wide on x64 FreeBSD
+#if SANITIZER_FREEBSD
+ typedef int __sanitizer_clock_t;
+#elif defined(__x86_64__) && !defined(_LP64)
typedef long long __sanitizer_clock_t;
#else
typedef long __sanitizer_clock_t;
#endif
-#if SANITIZER_LINUX
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
#if defined(_LP64) || defined(__x86_64__) || defined(__powerpc__)
typedef unsigned __sanitizer___kernel_uid_t;
typedef unsigned __sanitizer___kernel_gid_t;
@@ -326,7 +452,7 @@ namespace __sanitizer {
typedef long __sanitizer___kernel_off_t;
#endif
-#if defined(__powerpc__)
+#if defined(__powerpc__) || defined(__aarch64__)
typedef unsigned int __sanitizer___kernel_old_uid_t;
typedef unsigned int __sanitizer___kernel_old_gid_t;
#else
@@ -357,28 +483,44 @@ namespace __sanitizer {
// The size is determined by looking at sizeof of real sigset_t on linux.
uptr val[128 / sizeof(uptr)];
};
+#elif SANITIZER_FREEBSD
+ struct __sanitizer_sigset_t {
+ // uint32_t * 4
+ unsigned int __bits[4];
+ };
#endif
+ // Linux system headers define the 'sa_handler' and 'sa_sigaction' macros.
struct __sanitizer_sigaction {
union {
- void (*sa_handler)(int sig);
- void (*sa_sigaction)(int sig, void *siginfo, void *uctx);
+ void (*sigaction)(int sig, void *siginfo, void *uctx);
+ void (*handler)(int sig);
};
+#if SANITIZER_FREEBSD
+ int sa_flags;
+ __sanitizer_sigset_t sa_mask;
+#else
__sanitizer_sigset_t sa_mask;
int sa_flags;
+#endif
#if SANITIZER_LINUX
void (*sa_restorer)();
#endif
};
+#if SANITIZER_FREEBSD
+ typedef __sanitizer_sigset_t __sanitizer_kernel_sigset_t;
+#else
struct __sanitizer_kernel_sigset_t {
u8 sig[8];
};
+#endif
+ // Linux system headers define the 'sa_handler' and 'sa_sigaction' macros.
struct __sanitizer_kernel_sigaction_t {
union {
- void (*sigaction)(int signo, void *info, void *ctx);
void (*handler)(int signo);
+ void (*sigaction)(int signo, void *info, void *ctx);
};
unsigned long sa_flags;
void (*sa_restorer)(void);
@@ -397,7 +539,7 @@ namespace __sanitizer {
extern int af_inet6;
uptr __sanitizer_in_addr_sz(int af);
-#if SANITIZER_LINUX
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
struct __sanitizer_dl_phdr_info {
uptr dlpi_addr;
const char *dlpi_name;
@@ -411,7 +553,7 @@ namespace __sanitizer {
int ai_family;
int ai_socktype;
int ai_protocol;
-#if SANITIZER_ANDROID || SANITIZER_MAC
+#if SANITIZER_ANDROID || SANITIZER_MAC || SANITIZER_FREEBSD
unsigned ai_addrlen;
char *ai_canonname;
void *ai_addr;
@@ -437,13 +579,14 @@ namespace __sanitizer {
short revents;
};
-#if SANITIZER_ANDROID || SANITIZER_MAC
+#if SANITIZER_ANDROID || SANITIZER_MAC || SANITIZER_FREEBSD
typedef unsigned __sanitizer_nfds_t;
#else
typedef unsigned long __sanitizer_nfds_t;
#endif
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if !SANITIZER_ANDROID
+# if SANITIZER_LINUX
struct __sanitizer_glob_t {
uptr gl_pathc;
char **gl_pathv;
@@ -456,10 +599,27 @@ namespace __sanitizer {
int (*gl_lstat)(const char *, void *);
int (*gl_stat)(const char *, void *);
};
+# elif SANITIZER_FREEBSD
+ struct __sanitizer_glob_t {
+ uptr gl_pathc;
+ uptr gl_matchc;
+ uptr gl_offs;
+ int gl_flags;
+ char **gl_pathv;
+ int (*gl_errfunc)(const char*, int);
+ void (*gl_closedir)(void *dirp);
+ struct dirent *(*gl_readdir)(void *dirp);
+ void *(*gl_opendir)(const char*);
+ int (*gl_lstat)(const char*, void* /* struct stat* */);
+ int (*gl_stat)(const char*, void* /* struct stat* */);
+ };
+# endif // SANITIZER_FREEBSD
+# if SANITIZER_LINUX || SANITIZER_FREEBSD
extern int glob_nomatch;
extern int glob_altdirfunc;
-#endif
+# endif
+#endif // !SANITIZER_ANDROID
extern unsigned path_max;
@@ -467,10 +627,38 @@ namespace __sanitizer {
uptr we_wordc;
char **we_wordv;
uptr we_offs;
+#if SANITIZER_FREEBSD
+ char *we_strings;
+ uptr we_nbytes;
+#endif
};
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ struct __sanitizer_FILE {
+ int _flags;
+ char *_IO_read_ptr;
+ char *_IO_read_end;
+ char *_IO_read_base;
+ char *_IO_write_base;
+ char *_IO_write_ptr;
+ char *_IO_write_end;
+ char *_IO_buf_base;
+ char *_IO_buf_end;
+ char *_IO_save_base;
+ char *_IO_backup_base;
+ char *_IO_save_end;
+ void *_markers;
+ __sanitizer_FILE *_chain;
+ int _fileno;
+ };
+# define SANITIZER_HAS_STRUCT_FILE 1
+#else
+ typedef void __sanitizer_FILE;
+# define SANITIZER_HAS_STRUCT_FILE 0
+#endif
+
#if SANITIZER_LINUX && !SANITIZER_ANDROID && \
- (defined(__i386) || defined (__x86_64)) // NOLINT
+ (defined(__i386) || defined(__x86_64))
extern unsigned struct_user_regs_struct_sz;
extern unsigned struct_user_fpregs_struct_sz;
extern unsigned struct_user_fpxregs_struct_sz;
@@ -490,7 +678,7 @@ namespace __sanitizer {
extern int ptrace_setregset;
#endif
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
extern unsigned struct_shminfo_sz;
extern unsigned struct_shm_info_sz;
extern int shmctl_ipc_stat;
@@ -499,6 +687,8 @@ namespace __sanitizer {
extern int shmctl_shm_stat;
#endif
+ extern int map_fixed;
+
// ioctl arguments
struct __sanitizer_ifconf {
int ifc_len;
@@ -511,7 +701,54 @@ namespace __sanitizer {
};
#endif
-#define IOC_SIZE(nr) (((nr) >> 16) & 0x3fff)
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+struct __sanitizer__obstack_chunk {
+ char *limit;
+ struct __sanitizer__obstack_chunk *prev;
+};
+
+struct __sanitizer_obstack {
+ long chunk_size;
+ struct __sanitizer__obstack_chunk *chunk;
+ char *object_base;
+ char *next_free;
+ uptr more_fields[7];
+};
+#endif
+
+#define IOC_NRBITS 8
+#define IOC_TYPEBITS 8
+#if defined(__powerpc__) || defined(__powerpc64__)
+#define IOC_SIZEBITS 13
+#define IOC_DIRBITS 3
+#define IOC_NONE 1U
+#define IOC_WRITE 4U
+#define IOC_READ 2U
+#else
+#define IOC_SIZEBITS 14
+#define IOC_DIRBITS 2
+#define IOC_NONE 0U
+#define IOC_WRITE 1U
+#define IOC_READ 2U
+#endif
+#define IOC_NRMASK ((1 << IOC_NRBITS) - 1)
+#define IOC_TYPEMASK ((1 << IOC_TYPEBITS) - 1)
+#define IOC_SIZEMASK ((1 << IOC_SIZEBITS) - 1)
+#if defined(IOC_DIRMASK)
+#undef IOC_DIRMASK
+#endif
+#define IOC_DIRMASK ((1 << IOC_DIRBITS) - 1)
+#define IOC_NRSHIFT 0
+#define IOC_TYPESHIFT (IOC_NRSHIFT + IOC_NRBITS)
+#define IOC_SIZESHIFT (IOC_TYPESHIFT + IOC_TYPEBITS)
+#define IOC_DIRSHIFT (IOC_SIZESHIFT + IOC_SIZEBITS)
+#define EVIOC_EV_MAX 0x1f
+#define EVIOC_ABS_MAX 0x3f
+
+#define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK)
+#define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK)
+#define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK)
+#define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK)
extern unsigned struct_arpreq_sz;
extern unsigned struct_ifreq_sz;
@@ -527,9 +764,6 @@ namespace __sanitizer {
extern unsigned struct_cdrom_tocentry_sz;
extern unsigned struct_cdrom_tochdr_sz;
extern unsigned struct_cdrom_volctrl_sz;
- extern unsigned struct_copr_buffer_sz;
- extern unsigned struct_copr_debug_buf_sz;
- extern unsigned struct_copr_msg_sz;
extern unsigned struct_ff_effect_sz;
extern unsigned struct_floppy_drive_params_sz;
extern unsigned struct_floppy_drive_struct_sz;
@@ -543,23 +777,28 @@ namespace __sanitizer {
extern unsigned struct_hd_geometry_sz;
extern unsigned struct_input_absinfo_sz;
extern unsigned struct_input_id_sz;
+ extern unsigned struct_mtpos_sz;
+ extern unsigned struct_termio_sz;
+ extern unsigned struct_vt_consize_sz;
+ extern unsigned struct_vt_sizes_sz;
+ extern unsigned struct_vt_stat_sz;
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+ extern unsigned struct_copr_buffer_sz;
+ extern unsigned struct_copr_debug_buf_sz;
+ extern unsigned struct_copr_msg_sz;
extern unsigned struct_midi_info_sz;
extern unsigned struct_mtget_sz;
extern unsigned struct_mtop_sz;
- extern unsigned struct_mtpos_sz;
extern unsigned struct_rtentry_sz;
extern unsigned struct_sbi_instrument_sz;
extern unsigned struct_seq_event_rec_sz;
extern unsigned struct_synth_info_sz;
- extern unsigned struct_termio_sz;
- extern unsigned struct_vt_consize_sz;
extern unsigned struct_vt_mode_sz;
- extern unsigned struct_vt_sizes_sz;
- extern unsigned struct_vt_stat_sz;
-#endif
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD
#if SANITIZER_LINUX && !SANITIZER_ANDROID
- extern unsigned struct_audio_buf_info_sz;
extern unsigned struct_ax25_parms_struct_sz;
extern unsigned struct_cyclades_monitor_sz;
extern unsigned struct_input_keymap_entry_sz;
@@ -570,7 +809,6 @@ namespace __sanitizer {
extern unsigned struct_kbsentry_sz;
extern unsigned struct_mtconfiginfo_sz;
extern unsigned struct_nr_parms_struct_sz;
- extern unsigned struct_ppp_stats_sz;
extern unsigned struct_scc_modem_sz;
extern unsigned struct_scc_stat_sz;
extern unsigned struct_serial_multiport_struct_sz;
@@ -578,7 +816,12 @@ namespace __sanitizer {
extern unsigned struct_sockaddr_ax25_sz;
extern unsigned struct_unimapdesc_sz;
extern unsigned struct_unimapinit_sz;
-#endif
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+ extern unsigned struct_audio_buf_info_sz;
+ extern unsigned struct_ppp_stats_sz;
+#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
#if !SANITIZER_ANDROID && !SANITIZER_MAC
extern unsigned struct_sioc_sg_req_sz;
@@ -635,7 +878,7 @@ namespace __sanitizer {
extern unsigned IOCTL_TIOCSPGRP;
extern unsigned IOCTL_TIOCSTI;
extern unsigned IOCTL_TIOCSWINSZ;
-#if (SANITIZER_LINUX && !SANITIZER_ANDROID)
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
extern unsigned IOCTL_SIOCGETSGCNT;
extern unsigned IOCTL_SIOCGETVIFCNT;
#endif
@@ -729,9 +972,7 @@ namespace __sanitizer {
extern unsigned IOCTL_HDIO_SET_MULTCOUNT;
extern unsigned IOCTL_HDIO_SET_NOWERR;
extern unsigned IOCTL_HDIO_SET_UNMASKINTR;
- extern unsigned IOCTL_MTIOCGET;
extern unsigned IOCTL_MTIOCPOS;
- extern unsigned IOCTL_MTIOCTOP;
extern unsigned IOCTL_PPPIOCGASYNCMAP;
extern unsigned IOCTL_PPPIOCGDEBUG;
extern unsigned IOCTL_PPPIOCGFLAGS;
@@ -743,9 +984,7 @@ namespace __sanitizer {
extern unsigned IOCTL_PPPIOCSMAXCID;
extern unsigned IOCTL_PPPIOCSMRU;
extern unsigned IOCTL_PPPIOCSXASYNCMAP;
- extern unsigned IOCTL_SIOCADDRT;
extern unsigned IOCTL_SIOCDARP;
- extern unsigned IOCTL_SIOCDELRT;
extern unsigned IOCTL_SIOCDRARP;
extern unsigned IOCTL_SIOCGARP;
extern unsigned IOCTL_SIOCGIFENCAP;
@@ -774,6 +1013,39 @@ namespace __sanitizer {
extern unsigned IOCTL_SNDCTL_COPR_SENDMSG;
extern unsigned IOCTL_SNDCTL_COPR_WCODE;
extern unsigned IOCTL_SNDCTL_COPR_WDATA;
+ extern unsigned IOCTL_TCFLSH;
+ extern unsigned IOCTL_TCGETA;
+ extern unsigned IOCTL_TCGETS;
+ extern unsigned IOCTL_TCSBRK;
+ extern unsigned IOCTL_TCSBRKP;
+ extern unsigned IOCTL_TCSETA;
+ extern unsigned IOCTL_TCSETAF;
+ extern unsigned IOCTL_TCSETAW;
+ extern unsigned IOCTL_TCSETS;
+ extern unsigned IOCTL_TCSETSF;
+ extern unsigned IOCTL_TCSETSW;
+ extern unsigned IOCTL_TCXONC;
+ extern unsigned IOCTL_TIOCGLCKTRMIOS;
+ extern unsigned IOCTL_TIOCGSOFTCAR;
+ extern unsigned IOCTL_TIOCINQ;
+ extern unsigned IOCTL_TIOCLINUX;
+ extern unsigned IOCTL_TIOCSERCONFIG;
+ extern unsigned IOCTL_TIOCSERGETLSR;
+ extern unsigned IOCTL_TIOCSERGWILD;
+ extern unsigned IOCTL_TIOCSERSWILD;
+ extern unsigned IOCTL_TIOCSLCKTRMIOS;
+ extern unsigned IOCTL_TIOCSSOFTCAR;
+ extern unsigned IOCTL_VT_DISALLOCATE;
+ extern unsigned IOCTL_VT_GETSTATE;
+ extern unsigned IOCTL_VT_RESIZE;
+ extern unsigned IOCTL_VT_RESIZEX;
+ extern unsigned IOCTL_VT_SENDSIG;
+#endif // SANITIZER_LINUX
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+ extern unsigned IOCTL_MTIOCGET;
+ extern unsigned IOCTL_MTIOCTOP;
+ extern unsigned IOCTL_SIOCADDRT;
+ extern unsigned IOCTL_SIOCDELRT;
extern unsigned IOCTL_SNDCTL_DSP_GETBLKSIZE;
extern unsigned IOCTL_SNDCTL_DSP_GETFMTS;
extern unsigned IOCTL_SNDCTL_DSP_NONBLOCK;
@@ -864,40 +1136,14 @@ namespace __sanitizer {
extern unsigned IOCTL_SOUND_PCM_READ_RATE;
extern unsigned IOCTL_SOUND_PCM_WRITE_CHANNELS;
extern unsigned IOCTL_SOUND_PCM_WRITE_FILTER;
- extern unsigned IOCTL_TCFLSH;
- extern unsigned IOCTL_TCGETA;
- extern unsigned IOCTL_TCGETS;
- extern unsigned IOCTL_TCSBRK;
- extern unsigned IOCTL_TCSBRKP;
- extern unsigned IOCTL_TCSETA;
- extern unsigned IOCTL_TCSETAF;
- extern unsigned IOCTL_TCSETAW;
- extern unsigned IOCTL_TCSETS;
- extern unsigned IOCTL_TCSETSF;
- extern unsigned IOCTL_TCSETSW;
- extern unsigned IOCTL_TCXONC;
- extern unsigned IOCTL_TIOCGLCKTRMIOS;
- extern unsigned IOCTL_TIOCGSOFTCAR;
- extern unsigned IOCTL_TIOCINQ;
- extern unsigned IOCTL_TIOCLINUX;
- extern unsigned IOCTL_TIOCSERCONFIG;
- extern unsigned IOCTL_TIOCSERGETLSR;
- extern unsigned IOCTL_TIOCSERGWILD;
- extern unsigned IOCTL_TIOCSERSWILD;
- extern unsigned IOCTL_TIOCSLCKTRMIOS;
- extern unsigned IOCTL_TIOCSSOFTCAR;
extern unsigned IOCTL_VT_ACTIVATE;
- extern unsigned IOCTL_VT_DISALLOCATE;
extern unsigned IOCTL_VT_GETMODE;
- extern unsigned IOCTL_VT_GETSTATE;
extern unsigned IOCTL_VT_OPENQRY;
extern unsigned IOCTL_VT_RELDISP;
- extern unsigned IOCTL_VT_RESIZE;
- extern unsigned IOCTL_VT_RESIZEX;
- extern unsigned IOCTL_VT_SENDSIG;
extern unsigned IOCTL_VT_SETMODE;
extern unsigned IOCTL_VT_WAITACTIVE;
-#endif
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD
+
#if SANITIZER_LINUX && !SANITIZER_ANDROID
extern unsigned IOCTL_CYGETDEFTHRESH;
extern unsigned IOCTL_CYGETDEFTIMEOUT;
@@ -923,37 +1169,25 @@ namespace __sanitizer {
extern unsigned IOCTL_FS_IOC_SETVERSION;
extern unsigned IOCTL_GIO_CMAP;
extern unsigned IOCTL_GIO_FONT;
- extern unsigned IOCTL_GIO_SCRNMAP;
extern unsigned IOCTL_GIO_UNIMAP;
extern unsigned IOCTL_GIO_UNISCRNMAP;
extern unsigned IOCTL_KDADDIO;
extern unsigned IOCTL_KDDELIO;
- extern unsigned IOCTL_KDDISABIO;
- extern unsigned IOCTL_KDENABIO;
extern unsigned IOCTL_KDGETKEYCODE;
- extern unsigned IOCTL_KDGETLED;
- extern unsigned IOCTL_KDGETMODE;
extern unsigned IOCTL_KDGKBDIACR;
extern unsigned IOCTL_KDGKBENT;
extern unsigned IOCTL_KDGKBLED;
extern unsigned IOCTL_KDGKBMETA;
- extern unsigned IOCTL_KDGKBMODE;
extern unsigned IOCTL_KDGKBSENT;
- extern unsigned IOCTL_KDGKBTYPE;
extern unsigned IOCTL_KDMAPDISP;
- extern unsigned IOCTL_KDMKTONE;
extern unsigned IOCTL_KDSETKEYCODE;
- extern unsigned IOCTL_KDSETLED;
- extern unsigned IOCTL_KDSETMODE;
extern unsigned IOCTL_KDSIGACCEPT;
extern unsigned IOCTL_KDSKBDIACR;
extern unsigned IOCTL_KDSKBENT;
extern unsigned IOCTL_KDSKBLED;
extern unsigned IOCTL_KDSKBMETA;
- extern unsigned IOCTL_KDSKBMODE;
extern unsigned IOCTL_KDSKBSENT;
extern unsigned IOCTL_KDUNMAPDISP;
- extern unsigned IOCTL_KIOCSOUND;
extern unsigned IOCTL_LPABORT;
extern unsigned IOCTL_LPABORTOPEN;
extern unsigned IOCTL_LPCAREFUL;
@@ -968,7 +1202,6 @@ namespace __sanitizer {
extern unsigned IOCTL_MTIOCSETCONFIG;
extern unsigned IOCTL_PIO_CMAP;
extern unsigned IOCTL_PIO_FONT;
- extern unsigned IOCTL_PIO_SCRNMAP;
extern unsigned IOCTL_PIO_UNIMAP;
extern unsigned IOCTL_PIO_UNIMAPCLR;
extern unsigned IOCTL_PIO_UNISCRNMAP;
@@ -996,9 +1229,29 @@ namespace __sanitizer {
extern unsigned IOCTL_TIOCSERGETMULTI;
extern unsigned IOCTL_TIOCSERSETMULTI;
extern unsigned IOCTL_TIOCSSERIAL;
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+ extern unsigned IOCTL_GIO_SCRNMAP;
+ extern unsigned IOCTL_KDDISABIO;
+ extern unsigned IOCTL_KDENABIO;
+ extern unsigned IOCTL_KDGETLED;
+ extern unsigned IOCTL_KDGETMODE;
+ extern unsigned IOCTL_KDGKBMODE;
+ extern unsigned IOCTL_KDGKBTYPE;
+ extern unsigned IOCTL_KDMKTONE;
+ extern unsigned IOCTL_KDSETLED;
+ extern unsigned IOCTL_KDSETMODE;
+ extern unsigned IOCTL_KDSKBMODE;
+ extern unsigned IOCTL_KIOCSOUND;
+ extern unsigned IOCTL_PIO_SCRNMAP;
#endif
+ extern const int errno_EINVAL;
extern const int errno_EOWNERDEAD;
+
+ extern const int si_SEGV_MAPERR;
+ extern const int si_SEGV_ACCERR;
} // namespace __sanitizer
#define CHECK_TYPE_SIZE(TYPE) \
diff --git a/libsanitizer/sanitizer_common/sanitizer_posix.cc b/libsanitizer/sanitizer_common/sanitizer_posix.cc
index ef5cb0b03b8e..e24d5ed50316 100644
--- a/libsanitizer/sanitizer_common/sanitizer_posix.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_posix.cc
@@ -11,7 +11,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
-#if SANITIZER_LINUX || SANITIZER_MAC
+#if SANITIZER_POSIX
#include "sanitizer_common.h"
#include "sanitizer_libc.h"
@@ -20,6 +20,14 @@
#include <sys/mman.h>
+#if SANITIZER_LINUX
+#include <sys/utsname.h>
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#include <sys/personality.h>
+#endif
+
namespace __sanitizer {
// ------------- sanitizer_common.h
@@ -27,21 +35,65 @@ uptr GetMmapGranularity() {
return GetPageSize();
}
+#if SANITIZER_WORDSIZE == 32
+// Take care of unusable kernel area in top gigabyte.
+static uptr GetKernelAreaSize() {
+#if SANITIZER_LINUX
+ const uptr gbyte = 1UL << 30;
+
+ // Firstly check if there are writable segments
+ // mapped to top gigabyte (e.g. stack).
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
+ uptr end, prot;
+ while (proc_maps.Next(/*start*/0, &end,
+ /*offset*/0, /*filename*/0,
+ /*filename_size*/0, &prot)) {
+ if ((end >= 3 * gbyte)
+ && (prot & MemoryMappingLayout::kProtectionWrite) != 0)
+ return 0;
+ }
+
+#if !SANITIZER_ANDROID
+ // Even if nothing is mapped, top Gb may still be accessible
+ // if we are running on 64-bit kernel.
+ // Uname may report misleading results if personality type
+ // is modified (e.g. under schroot) so check this as well.
+ struct utsname uname_info;
+ int pers = personality(0xffffffffUL);
+ if (!(pers & PER_MASK)
+ && uname(&uname_info) == 0
+ && internal_strstr(uname_info.machine, "64"))
+ return 0;
+#endif // SANITIZER_ANDROID
+
+ // Top gigabyte is reserved for kernel.
+ return gbyte;
+#else
+ return 0;
+#endif // SANITIZER_LINUX
+}
+#endif // SANITIZER_WORDSIZE == 32
+
uptr GetMaxVirtualAddress() {
#if SANITIZER_WORDSIZE == 64
# if defined(__powerpc64__)
// On PowerPC64 we have two different address space layouts: 44- and 46-bit.
- // We somehow need to figure our which one we are using now and choose
+ // We somehow need to figure out which one we are using now and choose
// one of 0x00000fffffffffffUL and 0x00003fffffffffffUL.
// Note that with 'ulimit -s unlimited' the stack is moved away from the top
// of the address space, so simply checking the stack address is not enough.
return (1ULL << 44) - 1; // 0x00000fffffffffffUL
+# elif defined(__aarch64__)
+ return (1ULL << 39) - 1;
# else
return (1ULL << 47) - 1; // 0x00007fffffffffffUL;
# endif
#else // SANITIZER_WORDSIZE == 32
- // FIXME: We can probably lower this on Android?
- return (1ULL << 32) - 1; // 0xffffffff;
+ uptr res = (1ULL << 32) - 1; // 0xffffffff;
+ if (!common_flags()->full_address_space)
+ res -= GetKernelAreaSize();
+ CHECK_LT(reinterpret_cast<uptr>(&res), res);
+ return res;
#endif // SANITIZER_WORDSIZE
}
@@ -60,11 +112,13 @@ void *MmapOrDie(uptr size, const char *mem_type) {
Die();
}
recursion_count++;
- Report("ERROR: %s failed to allocate 0x%zx (%zd) bytes of %s: %d\n",
+ Report("ERROR: %s failed to "
+ "allocate 0x%zx (%zd) bytes of %s (errno: %d)\n",
SanitizerToolName, size, size, mem_type, reserrno);
DumpProcessMap();
CHECK("unable to mmap" && 0);
}
+ IncreaseTotalMmap(size);
return (void *)res;
}
@@ -76,6 +130,25 @@ void UnmapOrDie(void *addr, uptr size) {
SanitizerToolName, size, size, addr);
CHECK("unable to unmap" && 0);
}
+ DecreaseTotalMmap(size);
+}
+
+void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
+ uptr PageSize = GetPageSizeCached();
+ uptr p = internal_mmap(0,
+ RoundUpTo(size, PageSize),
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ -1, 0);
+ int reserrno;
+ if (internal_iserror(p, &reserrno)) {
+ Report("ERROR: %s failed to "
+ "allocate noreserve 0x%zx (%zd) bytes for '%s' (errno: %d)\n",
+ SanitizerToolName, size, size, mem_type, reserrno);
+ CHECK("unable to mmap" && 0);
+ }
+ IncreaseTotalMmap(size);
+ return (void *)p;
}
void *MmapFixedNoReserve(uptr fixed_addr, uptr size) {
@@ -87,9 +160,10 @@ void *MmapFixedNoReserve(uptr fixed_addr, uptr size) {
-1, 0);
int reserrno;
if (internal_iserror(p, &reserrno))
- Report("ERROR: "
- "%s failed to allocate 0x%zx (%zd) bytes at address %p (%d)\n",
+ Report("ERROR: %s failed to "
+ "allocate 0x%zx (%zd) bytes at address %zx (errno: %d)\n",
SanitizerToolName, size, size, fixed_addr, reserrno);
+ IncreaseTotalMmap(size);
return (void *)p;
}
@@ -102,11 +176,12 @@ void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
-1, 0);
int reserrno;
if (internal_iserror(p, &reserrno)) {
- Report("ERROR:"
- " %s failed to allocate 0x%zx (%zd) bytes at address %p (%d)\n",
+ Report("ERROR: %s failed to "
+ "allocate 0x%zx (%zd) bytes at address %zx (errno: %d)\n",
SanitizerToolName, size, size, fixed_addr, reserrno);
CHECK("unable to mmap" && 0);
}
+ IncreaseTotalMmap(size);
return (void *)p;
}
@@ -157,7 +232,7 @@ void DumpProcessMap() {
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
uptr start, end;
const sptr kBufSize = 4095;
- char *filename = (char*)MmapOrDie(kBufSize, __FUNCTION__);
+ char *filename = (char*)MmapOrDie(kBufSize, __func__);
Report("Process memory map follows:\n");
while (proc_maps.Next(&start, &end, /* file_offset */0,
filename, kBufSize, /* protection */0)) {
@@ -204,7 +279,7 @@ void MaybeOpenReportFile() {
if (report_fd_pid == pid) return;
InternalScopedBuffer<char> report_path_full(4096);
internal_snprintf(report_path_full.data(), report_path_full.size(),
- "%s.%d", report_path_prefix, pid);
+ "%s.%zu", report_path_prefix, pid);
uptr openrv = OpenFile(report_path_full.data(), true);
if (internal_iserror(openrv)) {
report_fd = kStderrFd;
@@ -248,4 +323,4 @@ bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end) {
} // namespace __sanitizer
-#endif // SANITIZER_LINUX || SANITIZER_MAC
+#endif // SANITIZER_POSIX
diff --git a/libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cc
index ae782ac39cbc..8e3a96f01e4d 100644
--- a/libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cc
@@ -12,12 +12,15 @@
#include "sanitizer_platform.h"
-#if SANITIZER_LINUX || SANITIZER_MAC
+#if SANITIZER_POSIX
#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_platform_limits_posix.h"
#include "sanitizer_stacktrace.h"
#include <errno.h>
#include <pthread.h>
+#include <signal.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <sys/resource.h>
@@ -49,7 +52,7 @@ void DisableCoreDumper() {
bool StackSizeIsUnlimited() {
struct rlimit rlim;
CHECK_EQ(0, getrlimit(RLIMIT_STACK, &rlim));
- return (rlim.rlim_cur == (uptr)-1);
+ return ((uptr)rlim.rlim_cur == (uptr)-1);
}
void SetStackSizeLimitInBytes(uptr limit) {
@@ -87,6 +90,59 @@ int internal_isatty(fd_t fd) {
return isatty(fd);
}
+#ifndef SANITIZER_GO
+// TODO(glider): different tools may require different altstack size.
+static const uptr kAltStackSize = SIGSTKSZ * 4; // SIGSTKSZ is not enough.
+
+void SetAlternateSignalStack() {
+ stack_t altstack, oldstack;
+ CHECK_EQ(0, sigaltstack(0, &oldstack));
+ // If the alternate stack is already in place, do nothing.
+ // Android always sets an alternate stack, but it's too small for us.
+ if (!SANITIZER_ANDROID && !(oldstack.ss_flags & SS_DISABLE)) return;
+ // TODO(glider): the mapped stack should have the MAP_STACK flag in the
+ // future. It is not required by man 2 sigaltstack now (they're using
+ // malloc()).
+ void* base = MmapOrDie(kAltStackSize, __func__);
+ altstack.ss_sp = (char*) base;
+ altstack.ss_flags = 0;
+ altstack.ss_size = kAltStackSize;
+ CHECK_EQ(0, sigaltstack(&altstack, 0));
+}
+
+void UnsetAlternateSignalStack() {
+ stack_t altstack, oldstack;
+ altstack.ss_sp = 0;
+ altstack.ss_flags = SS_DISABLE;
+ altstack.ss_size = kAltStackSize; // Some sane value required on Darwin.
+ CHECK_EQ(0, sigaltstack(&altstack, &oldstack));
+ UnmapOrDie(oldstack.ss_sp, oldstack.ss_size);
+}
+
+typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
+static void MaybeInstallSigaction(int signum,
+ SignalHandlerType handler) {
+ if (!IsDeadlySignal(signum))
+ return;
+ struct sigaction sigact;
+ internal_memset(&sigact, 0, sizeof(sigact));
+ sigact.sa_sigaction = (sa_sigaction_t)handler;
+ sigact.sa_flags = SA_SIGINFO;
+ if (common_flags()->use_sigaltstack) sigact.sa_flags |= SA_ONSTACK;
+ CHECK_EQ(0, internal_sigaction(signum, &sigact, 0));
+ VReport(1, "Installed the sigaction for signal %d\n", signum);
+}
+
+void InstallDeadlySignalHandlers(SignalHandlerType handler) {
+ // Set the alternate signal stack for the main thread.
+ // This will cause SetAlternateSignalStack to be called twice, but the stack
+ // will be actually set only once.
+ if (common_flags()->use_sigaltstack) SetAlternateSignalStack();
+ MaybeInstallSigaction(SIGSEGV, handler);
+ MaybeInstallSigaction(SIGBUS, handler);
+}
+#endif // SANITIZER_GO
+
} // namespace __sanitizer
-#endif
+#endif // SANITIZER_POSIX
diff --git a/libsanitizer/sanitizer_common/sanitizer_printf.cc b/libsanitizer/sanitizer_common/sanitizer_printf.cc
index 08951c7e2473..4fc26308ee48 100644
--- a/libsanitizer/sanitizer_common/sanitizer_printf.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_printf.cc
@@ -14,6 +14,7 @@
#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
#include "sanitizer_libc.h"
#include <stdio.h>
@@ -92,11 +93,14 @@ static int AppendSignedDecimal(char **buff, const char *buff_end, s64 num,
minimal_num_length, pad_with_zero, negative);
}
-static int AppendString(char **buff, const char *buff_end, const char *s) {
+static int AppendString(char **buff, const char *buff_end, int precision,
+ const char *s) {
if (s == 0)
s = "<null>";
int result = 0;
for (; *s; s++) {
+ if (precision >= 0 && result >= precision)
+ break;
result += AppendChar(buff, buff_end, *s);
}
return result;
@@ -104,7 +108,7 @@ static int AppendString(char **buff, const char *buff_end, const char *s) {
static int AppendPointer(char **buff, const char *buff_end, u64 ptr_value) {
int result = 0;
- result += AppendString(buff, buff_end, "0x");
+ result += AppendString(buff, buff_end, -1, "0x");
result += AppendUnsigned(buff, buff_end, ptr_value, 16,
(SANITIZER_WORDSIZE == 64) ? 12 : 8, true);
return result;
@@ -113,7 +117,7 @@ static int AppendPointer(char **buff, const char *buff_end, u64 ptr_value) {
int VSNPrintf(char *buff, int buff_length,
const char *format, va_list args) {
static const char *kPrintfFormatsHelp =
- "Supported Printf formats: %([0-9]*)?(z|ll)?{d,u,x}; %p; %s; %c\n";
+ "Supported Printf formats: %([0-9]*)?(z|ll)?{d,u,x}; %p; %(\\.\\*)?s; %c\n";
RAW_CHECK(format);
RAW_CHECK(buff_length > 0);
const char *buff_end = &buff[buff_length - 1];
@@ -133,6 +137,12 @@ int VSNPrintf(char *buff, int buff_length,
width = width * 10 + *cur++ - '0';
}
}
+ bool have_precision = (cur[0] == '.' && cur[1] == '*');
+ int precision = -1;
+ if (have_precision) {
+ cur += 2;
+ precision = va_arg(args, int);
+ }
bool have_z = (*cur == 'z');
cur += have_z;
bool have_ll = !have_z && (cur[0] == 'l' && cur[1] == 'l');
@@ -140,6 +150,8 @@ int VSNPrintf(char *buff, int buff_length,
s64 dval;
u64 uval;
bool have_flags = have_width | have_z | have_ll;
+ // Only %s supports precision for now
+ CHECK(!(precision >= 0 && *cur != 's'));
switch (*cur) {
case 'd': {
dval = have_ll ? va_arg(args, s64)
@@ -165,7 +177,7 @@ int VSNPrintf(char *buff, int buff_length,
}
case 's': {
RAW_CHECK_MSG(!have_flags, kPrintfFormatsHelp);
- result += AppendString(&buff, buff_end, va_arg(args, char*));
+ result += AppendString(&buff, buff_end, precision, va_arg(args, char*));
break;
}
case 'c': {
@@ -258,6 +270,7 @@ static void SharedPrintfCode(bool append_pid, const char *format,
break;
}
RawWrite(buffer);
+ AndroidLogWrite(buffer);
CallPrintfAndReportCallback(buffer);
// If we had mapped any memory, clean up.
if (buffer != local_buffer)
@@ -265,6 +278,7 @@ static void SharedPrintfCode(bool append_pid, const char *format,
va_end(args2);
}
+FORMAT(1, 2)
void Printf(const char *format, ...) {
va_list args;
va_start(args, format);
@@ -273,6 +287,7 @@ void Printf(const char *format, ...) {
}
// Like Printf, but prints the current PID before the output string.
+FORMAT(1, 2)
void Report(const char *format, ...) {
va_list args;
va_start(args, format);
@@ -284,6 +299,7 @@ void Report(const char *format, ...) {
// Returns the number of symbols that should have been written to buffer
// (not including trailing '\0'). Thus, the string is truncated
// iff return value is not less than "length".
+FORMAT(3, 4)
int internal_snprintf(char *buffer, uptr length, const char *format, ...) {
va_list args;
va_start(args, format);
@@ -292,6 +308,7 @@ int internal_snprintf(char *buffer, uptr length, const char *format, ...) {
return needed_length;
}
+FORMAT(2, 3)
void InternalScopedString::append(const char *format, ...) {
CHECK_LT(length_, size());
va_list args;
@@ -299,6 +316,7 @@ void InternalScopedString::append(const char *format, ...) {
VSNPrintf(data() + length_, size() - length_, format, args);
va_end(args);
length_ += internal_strlen(data() + length_);
+ CHECK_LT(length_, size());
}
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_procmaps.h b/libsanitizer/sanitizer_common/sanitizer_procmaps.h
index 87887f6b74b1..d140c47fda9b 100644
--- a/libsanitizer/sanitizer_common/sanitizer_procmaps.h
+++ b/libsanitizer/sanitizer_common/sanitizer_procmaps.h
@@ -12,49 +12,35 @@
#ifndef SANITIZER_PROCMAPS_H
#define SANITIZER_PROCMAPS_H
+#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_mutex.h"
namespace __sanitizer {
-#if SANITIZER_WINDOWS
-class MemoryMappingLayout {
- public:
- explicit MemoryMappingLayout(bool cache_enabled) {
- (void)cache_enabled;
- }
- bool GetObjectNameAndOffset(uptr addr, uptr *offset,
- char filename[], uptr filename_size,
- uptr *protection) {
- UNIMPLEMENTED();
- }
-};
-
-#else // SANITIZER_WINDOWS
-#if SANITIZER_LINUX
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
struct ProcSelfMapsBuff {
char *data;
uptr mmaped_size;
uptr len;
};
-#endif // SANITIZER_LINUX
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
class MemoryMappingLayout {
public:
explicit MemoryMappingLayout(bool cache_enabled);
+ ~MemoryMappingLayout();
bool Next(uptr *start, uptr *end, uptr *offset,
char filename[], uptr filename_size, uptr *protection);
void Reset();
- // Gets the object file name and the offset in that object for a given
- // address 'addr'. Returns true on success.
- bool GetObjectNameAndOffset(uptr addr, uptr *offset,
- char filename[], uptr filename_size,
- uptr *protection);
// In some cases, e.g. when running under a sandbox on Linux, ASan is unable
// to obtain the memory mappings. It should fall back to pre-cached data
// instead of aborting.
static void CacheMemoryMappings();
- ~MemoryMappingLayout();
+
+ // Stores the list of mapped objects into an array.
+ uptr DumpListOfModules(LoadedModule *modules, uptr max_modules,
+ string_predicate_t filter);
// Memory protection masks.
static const uptr kProtectionRead = 1;
@@ -64,39 +50,10 @@ class MemoryMappingLayout {
private:
void LoadFromCache();
- // Default implementation of GetObjectNameAndOffset.
- // Quite slow, because it iterates through the whole process map for each
- // lookup.
- bool IterateForObjectNameAndOffset(uptr addr, uptr *offset,
- char filename[], uptr filename_size,
- uptr *protection) {
- Reset();
- uptr start, end, file_offset;
- for (int i = 0; Next(&start, &end, &file_offset, filename, filename_size,
- protection);
- i++) {
- if (addr >= start && addr < end) {
- // Don't subtract 'start' for the first entry:
- // * If a binary is compiled w/o -pie, then the first entry in
- // process maps is likely the binary itself (all dynamic libs
- // are mapped higher in address space). For such a binary,
- // instruction offset in binary coincides with the actual
- // instruction address in virtual memory (as code section
- // is mapped to a fixed memory range).
- // * If a binary is compiled with -pie, all the modules are
- // mapped high at address space (in particular, higher than
- // shadow memory of the tool), so the module can't be the
- // first entry.
- *offset = (addr - (i ? start : 0)) + file_offset;
- return true;
- }
- }
- if (filename_size)
- filename[0] = '\0';
- return false;
- }
-# if SANITIZER_LINUX
+ // FIXME: Hide implementation details for different platforms in
+ // platform-specific files.
+# if SANITIZER_FREEBSD || SANITIZER_LINUX
ProcSelfMapsBuff proc_self_maps_;
char *current_;
@@ -127,8 +84,6 @@ void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size);
// Returns code range for the specified module.
bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end);
-#endif // SANITIZER_WINDOWS
-
} // namespace __sanitizer
#endif // SANITIZER_PROCMAPS_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_procmaps_linux.cc b/libsanitizer/sanitizer_common/sanitizer_procmaps_linux.cc
new file mode 100644
index 000000000000..20a074a799f7
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_procmaps_linux.cc
@@ -0,0 +1,302 @@
+//===-- sanitizer_procmaps_linux.cc ---------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Information about the process mappings (Linux-specific parts).
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
+#include "sanitizer_common.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_procmaps.h"
+
+#if SANITIZER_FREEBSD
+#include <unistd.h>
+#include <sys/sysctl.h>
+#include <sys/user.h>
+#endif
+
+namespace __sanitizer {
+
+// Linker initialized.
+ProcSelfMapsBuff MemoryMappingLayout::cached_proc_self_maps_;
+StaticSpinMutex MemoryMappingLayout::cache_lock_; // Linker initialized.
+
+static void ReadProcMaps(ProcSelfMapsBuff *proc_maps) {
+#if SANITIZER_FREEBSD
+ const int Mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid() };
+ size_t Size = 0;
+ int Err = sysctl(Mib, 4, NULL, &Size, NULL, 0);
+ CHECK_EQ(Err, 0);
+ CHECK_GT(Size, 0);
+
+ size_t MmapedSize = Size * 4 / 3;
+ void *VmMap = MmapOrDie(MmapedSize, "ReadProcMaps()");
+ Size = MmapedSize;
+ Err = sysctl(Mib, 4, VmMap, &Size, NULL, 0);
+ CHECK_EQ(Err, 0);
+
+ proc_maps->data = (char*)VmMap;
+ proc_maps->mmaped_size = MmapedSize;
+ proc_maps->len = Size;
+#else
+ proc_maps->len = ReadFileToBuffer("/proc/self/maps", &proc_maps->data,
+ &proc_maps->mmaped_size, 1 << 26);
+#endif
+}
+
+MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
+ ReadProcMaps(&proc_self_maps_);
+ if (cache_enabled) {
+ if (proc_self_maps_.mmaped_size == 0) {
+ LoadFromCache();
+ CHECK_GT(proc_self_maps_.len, 0);
+ }
+ } else {
+ CHECK_GT(proc_self_maps_.mmaped_size, 0);
+ }
+ Reset();
+ // FIXME: in the future we may want to cache the mappings on demand only.
+ if (cache_enabled)
+ CacheMemoryMappings();
+}
+
+MemoryMappingLayout::~MemoryMappingLayout() {
+ // Only unmap the buffer if it is different from the cached one. Otherwise
+ // it will be unmapped when the cache is refreshed.
+ if (proc_self_maps_.data != cached_proc_self_maps_.data) {
+ UnmapOrDie(proc_self_maps_.data, proc_self_maps_.mmaped_size);
+ }
+}
+
+void MemoryMappingLayout::Reset() {
+ current_ = proc_self_maps_.data;
+}
+
+// static
+void MemoryMappingLayout::CacheMemoryMappings() {
+ SpinMutexLock l(&cache_lock_);
+ // Don't invalidate the cache if the mappings are unavailable.
+ ProcSelfMapsBuff old_proc_self_maps;
+ old_proc_self_maps = cached_proc_self_maps_;
+ ReadProcMaps(&cached_proc_self_maps_);
+ if (cached_proc_self_maps_.mmaped_size == 0) {
+ cached_proc_self_maps_ = old_proc_self_maps;
+ } else {
+ if (old_proc_self_maps.mmaped_size) {
+ UnmapOrDie(old_proc_self_maps.data,
+ old_proc_self_maps.mmaped_size);
+ }
+ }
+}
+
+void MemoryMappingLayout::LoadFromCache() {
+ SpinMutexLock l(&cache_lock_);
+ if (cached_proc_self_maps_.data) {
+ proc_self_maps_ = cached_proc_self_maps_;
+ }
+}
+
+#if !SANITIZER_FREEBSD
+// Parse a hex value in str and update str.
+static uptr ParseHex(char **str) {
+ uptr x = 0;
+ char *s;
+ for (s = *str; ; s++) {
+ char c = *s;
+ uptr v = 0;
+ if (c >= '0' && c <= '9')
+ v = c - '0';
+ else if (c >= 'a' && c <= 'f')
+ v = c - 'a' + 10;
+ else if (c >= 'A' && c <= 'F')
+ v = c - 'A' + 10;
+ else
+ break;
+ x = x * 16 + v;
+ }
+ *str = s;
+ return x;
+}
+
+static bool IsOneOf(char c, char c1, char c2) {
+ return c == c1 || c == c2;
+}
+#endif
+
+static bool IsDecimal(char c) {
+ return c >= '0' && c <= '9';
+}
+
+static bool IsHex(char c) {
+ return (c >= '0' && c <= '9')
+ || (c >= 'a' && c <= 'f');
+}
+
+static uptr ReadHex(const char *p) {
+ uptr v = 0;
+ for (; IsHex(p[0]); p++) {
+ if (p[0] >= '0' && p[0] <= '9')
+ v = v * 16 + p[0] - '0';
+ else
+ v = v * 16 + p[0] - 'a' + 10;
+ }
+ return v;
+}
+
+static uptr ReadDecimal(const char *p) {
+ uptr v = 0;
+ for (; IsDecimal(p[0]); p++)
+ v = v * 10 + p[0] - '0';
+ return v;
+}
+
+bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
+ char filename[], uptr filename_size,
+ uptr *protection) {
+ char *last = proc_self_maps_.data + proc_self_maps_.len;
+ if (current_ >= last) return false;
+ uptr dummy;
+ if (!start) start = &dummy;
+ if (!end) end = &dummy;
+ if (!offset) offset = &dummy;
+ if (!protection) protection = &dummy;
+#if SANITIZER_FREEBSD
+ struct kinfo_vmentry *VmEntry = (struct kinfo_vmentry*)current_;
+
+ *start = (uptr)VmEntry->kve_start;
+ *end = (uptr)VmEntry->kve_end;
+ *offset = (uptr)VmEntry->kve_offset;
+
+ *protection = 0;
+ if ((VmEntry->kve_protection & KVME_PROT_READ) != 0)
+ *protection |= kProtectionRead;
+ if ((VmEntry->kve_protection & KVME_PROT_WRITE) != 0)
+ *protection |= kProtectionWrite;
+ if ((VmEntry->kve_protection & KVME_PROT_EXEC) != 0)
+ *protection |= kProtectionExecute;
+
+ if (filename != NULL && filename_size > 0) {
+ internal_snprintf(filename,
+ Min(filename_size, (uptr)PATH_MAX),
+ "%s", VmEntry->kve_path);
+ }
+
+ current_ += VmEntry->kve_structsize;
+#else // !SANITIZER_FREEBSD
+ char *next_line = (char*)internal_memchr(current_, '\n', last - current_);
+ if (next_line == 0)
+ next_line = last;
+ // Example: 08048000-08056000 r-xp 00000000 03:0c 64593 /foo/bar
+ *start = ParseHex(&current_);
+ CHECK_EQ(*current_++, '-');
+ *end = ParseHex(&current_);
+ CHECK_EQ(*current_++, ' ');
+ CHECK(IsOneOf(*current_, '-', 'r'));
+ *protection = 0;
+ if (*current_++ == 'r')
+ *protection |= kProtectionRead;
+ CHECK(IsOneOf(*current_, '-', 'w'));
+ if (*current_++ == 'w')
+ *protection |= kProtectionWrite;
+ CHECK(IsOneOf(*current_, '-', 'x'));
+ if (*current_++ == 'x')
+ *protection |= kProtectionExecute;
+ CHECK(IsOneOf(*current_, 's', 'p'));
+ if (*current_++ == 's')
+ *protection |= kProtectionShared;
+ CHECK_EQ(*current_++, ' ');
+ *offset = ParseHex(&current_);
+ CHECK_EQ(*current_++, ' ');
+ ParseHex(&current_);
+ CHECK_EQ(*current_++, ':');
+ ParseHex(&current_);
+ CHECK_EQ(*current_++, ' ');
+ while (IsDecimal(*current_))
+ current_++;
+ // Qemu may lack the trailing space.
+ // http://code.google.com/p/address-sanitizer/issues/detail?id=160
+ // CHECK_EQ(*current_++, ' ');
+ // Skip spaces.
+ while (current_ < next_line && *current_ == ' ')
+ current_++;
+ // Fill in the filename.
+ uptr i = 0;
+ while (current_ < next_line) {
+ if (filename && i < filename_size - 1)
+ filename[i++] = *current_;
+ current_++;
+ }
+ if (filename && i < filename_size)
+ filename[i] = 0;
+ current_ = next_line + 1;
+#endif // !SANITIZER_FREEBSD
+ return true;
+}
+
+uptr MemoryMappingLayout::DumpListOfModules(LoadedModule *modules,
+ uptr max_modules,
+ string_predicate_t filter) {
+ Reset();
+ uptr cur_beg, cur_end, cur_offset;
+ InternalScopedBuffer<char> module_name(kMaxPathLength);
+ uptr n_modules = 0;
+ for (uptr i = 0; n_modules < max_modules &&
+ Next(&cur_beg, &cur_end, &cur_offset, module_name.data(),
+ module_name.size(), 0);
+ i++) {
+ const char *cur_name = module_name.data();
+ if (cur_name[0] == '\0')
+ continue;
+ if (filter && !filter(cur_name))
+ continue;
+ void *mem = &modules[n_modules];
+ // Don't subtract 'cur_beg' from the first entry:
+ // * If a binary is compiled w/o -pie, then the first entry in
+ // process maps is likely the binary itself (all dynamic libs
+ // are mapped higher in address space). For such a binary,
+ // instruction offset in binary coincides with the actual
+ // instruction address in virtual memory (as code section
+ // is mapped to a fixed memory range).
+ // * If a binary is compiled with -pie, all the modules are
+ // mapped high at address space (in particular, higher than
+ // shadow memory of the tool), so the module can't be the
+ // first entry.
+ uptr base_address = (i ? cur_beg : 0) - cur_offset;
+ LoadedModule *cur_module = new(mem) LoadedModule(cur_name, base_address);
+ cur_module->addAddressRange(cur_beg, cur_end);
+ n_modules++;
+ }
+ return n_modules;
+}
+
+void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {
+ char *smaps = 0;
+ uptr smaps_cap = 0;
+ uptr smaps_len = ReadFileToBuffer("/proc/self/smaps",
+ &smaps, &smaps_cap, 64<<20);
+ uptr start = 0;
+ bool file = false;
+ const char *pos = smaps;
+ while (pos < smaps + smaps_len) {
+ if (IsHex(pos[0])) {
+ start = ReadHex(pos);
+ for (; *pos != '/' && *pos > '\n'; pos++) {}
+ file = *pos == '/';
+ } else if (internal_strncmp(pos, "Rss:", 4) == 0) {
+ for (; *pos < '0' || *pos > '9'; pos++) {}
+ uptr rss = ReadDecimal(pos) * 1024;
+ cb(start, rss, file, stats, stats_size);
+ }
+ while (*pos++ != '\n') {}
+ }
+ UnmapOrDie(smaps, smaps_cap);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
diff --git a/libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cc b/libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cc
new file mode 100644
index 000000000000..c68530683467
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cc
@@ -0,0 +1,186 @@
+//===-- sanitizer_procmaps_mac.cc -----------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Information about the process mappings (Mac-specific parts).
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_MAC
+#include "sanitizer_common.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_procmaps.h"
+
+#include <mach-o/dyld.h>
+#include <mach-o/loader.h>
+
+namespace __sanitizer {
+
+MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
+ Reset();
+}
+
+MemoryMappingLayout::~MemoryMappingLayout() {
+}
+
+// More information about Mach-O headers can be found in mach-o/loader.h
+// Each Mach-O image has a header (mach_header or mach_header_64) starting with
+// a magic number, and a list of linker load commands directly following the
+// header.
+// A load command is at least two 32-bit words: the command type and the
+// command size in bytes. We're interested only in segment load commands
+// (LC_SEGMENT and LC_SEGMENT_64), which tell that a part of the file is mapped
+// into the task's address space.
+// The |vmaddr|, |vmsize| and |fileoff| fields of segment_command or
+// segment_command_64 correspond to the memory address, memory size and the
+// file offset of the current memory segment.
+// Because these fields are taken from the images as is, one needs to add
+// _dyld_get_image_vmaddr_slide() to get the actual addresses at runtime.
+
+void MemoryMappingLayout::Reset() {
+ // Count down from the top.
+ // TODO(glider): as per man 3 dyld, iterating over the headers with
+ // _dyld_image_count is thread-unsafe. We need to register callbacks for
+ // adding and removing images which will invalidate the MemoryMappingLayout
+ // state.
+ current_image_ = _dyld_image_count();
+ current_load_cmd_count_ = -1;
+ current_load_cmd_addr_ = 0;
+ current_magic_ = 0;
+ current_filetype_ = 0;
+}
+
+// static
+void MemoryMappingLayout::CacheMemoryMappings() {
+ // No-op on Mac for now.
+}
+
+void MemoryMappingLayout::LoadFromCache() {
+ // No-op on Mac for now.
+}
+
+// Next and NextSegmentLoad were inspired by base/sysinfo.cc in
+// Google Perftools, http://code.google.com/p/google-perftools.
+
+// NextSegmentLoad scans the current image for the next segment load command
+// and returns the start and end addresses and file offset of the corresponding
+// segment.
+// Note that the segment addresses are not necessarily sorted.
+template<u32 kLCSegment, typename SegmentCommand>
+bool MemoryMappingLayout::NextSegmentLoad(
+ uptr *start, uptr *end, uptr *offset,
+ char filename[], uptr filename_size, uptr *protection) {
+ if (protection)
+ UNIMPLEMENTED();
+ const char* lc = current_load_cmd_addr_;
+ current_load_cmd_addr_ += ((const load_command *)lc)->cmdsize;
+ if (((const load_command *)lc)->cmd == kLCSegment) {
+ const sptr dlloff = _dyld_get_image_vmaddr_slide(current_image_);
+ const SegmentCommand* sc = (const SegmentCommand *)lc;
+ if (start) *start = sc->vmaddr + dlloff;
+ if (end) *end = sc->vmaddr + sc->vmsize + dlloff;
+ if (offset) {
+ if (current_filetype_ == /*MH_EXECUTE*/ 0x2) {
+ *offset = sc->vmaddr;
+ } else {
+ *offset = sc->fileoff;
+ }
+ }
+ if (filename) {
+ internal_strncpy(filename, _dyld_get_image_name(current_image_),
+ filename_size);
+ }
+ return true;
+ }
+ return false;
+}
+
+bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
+ char filename[], uptr filename_size,
+ uptr *protection) {
+ for (; current_image_ >= 0; current_image_--) {
+ const mach_header* hdr = _dyld_get_image_header(current_image_);
+ if (!hdr) continue;
+ if (current_load_cmd_count_ < 0) {
+ // Set up for this image;
+ current_load_cmd_count_ = hdr->ncmds;
+ current_magic_ = hdr->magic;
+ current_filetype_ = hdr->filetype;
+ switch (current_magic_) {
+#ifdef MH_MAGIC_64
+ case MH_MAGIC_64: {
+ current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header_64);
+ break;
+ }
+#endif
+ case MH_MAGIC: {
+ current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header);
+ break;
+ }
+ default: {
+ continue;
+ }
+ }
+ }
+
+ for (; current_load_cmd_count_ >= 0; current_load_cmd_count_--) {
+ switch (current_magic_) {
+ // current_magic_ may be only one of MH_MAGIC, MH_MAGIC_64.
+#ifdef MH_MAGIC_64
+ case MH_MAGIC_64: {
+ if (NextSegmentLoad<LC_SEGMENT_64, struct segment_command_64>(
+ start, end, offset, filename, filename_size, protection))
+ return true;
+ break;
+ }
+#endif
+ case MH_MAGIC: {
+ if (NextSegmentLoad<LC_SEGMENT, struct segment_command>(
+ start, end, offset, filename, filename_size, protection))
+ return true;
+ break;
+ }
+ }
+ }
+ // If we get here, no more load_cmd's in this image talk about
+ // segments. Go on to the next image.
+ }
+ return false;
+}
+
+uptr MemoryMappingLayout::DumpListOfModules(LoadedModule *modules,
+ uptr max_modules,
+ string_predicate_t filter) {
+ Reset();
+ uptr cur_beg, cur_end;
+ InternalScopedBuffer<char> module_name(kMaxPathLength);
+ uptr n_modules = 0;
+ for (uptr i = 0; n_modules < max_modules &&
+ Next(&cur_beg, &cur_end, 0, module_name.data(),
+ module_name.size(), 0);
+ i++) {
+ const char *cur_name = module_name.data();
+ if (cur_name[0] == '\0')
+ continue;
+ if (filter && !filter(cur_name))
+ continue;
+ LoadedModule *cur_module = 0;
+ if (n_modules > 0 &&
+ 0 == internal_strcmp(cur_name, modules[n_modules - 1].full_name())) {
+ cur_module = &modules[n_modules - 1];
+ } else {
+ void *mem = &modules[n_modules];
+ cur_module = new(mem) LoadedModule(cur_name, cur_beg);
+ n_modules++;
+ }
+ cur_module->addAddressRange(cur_beg, cur_end);
+ }
+ return n_modules;
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_MAC
diff --git a/libsanitizer/sanitizer_common/sanitizer_report_decorator.h b/libsanitizer/sanitizer_common/sanitizer_report_decorator.h
index c6e79ad0c7c5..c85105851112 100644
--- a/libsanitizer/sanitizer_common/sanitizer_report_decorator.h
+++ b/libsanitizer/sanitizer_common/sanitizer_report_decorator.h
@@ -15,6 +15,8 @@
#ifndef SANITIZER_REPORT_DECORATOR_H
#define SANITIZER_REPORT_DECORATOR_H
+#include "sanitizer_common.h"
+
namespace __sanitizer {
class AnsiColorDecorator {
// FIXME: This is not portable. It assumes the special strings are printed to
@@ -34,6 +36,15 @@ class AnsiColorDecorator {
private:
bool ansi_;
};
+
+class SanitizerCommonDecorator: protected AnsiColorDecorator {
+ public:
+ SanitizerCommonDecorator()
+ : __sanitizer::AnsiColorDecorator(ColorizeReports()) { }
+ const char *Warning() { return Red(); }
+ const char *EndWarning() { return Default(); }
+};
+
} // namespace __sanitizer
#endif // SANITIZER_REPORT_DECORATOR_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc b/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc
index 3a9e902537ac..3938f03a4d8f 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc
@@ -11,9 +11,7 @@
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
-#include "sanitizer_procmaps.h"
#include "sanitizer_stacktrace.h"
-#include "sanitizer_symbolizer.h"
namespace __sanitizer {
@@ -22,97 +20,13 @@ uptr StackTrace::GetPreviousInstructionPc(uptr pc) {
// Cancel Thumb bit.
pc = pc & (~1);
#endif
-#if defined(__powerpc__) || defined(__powerpc64__)
- // PCs are always 4 byte aligned.
- return pc - 4;
-#elif defined(__sparc__)
+#if defined(__sparc__)
return pc - 8;
#else
return pc - 1;
#endif
}
-static void PrintStackFramePrefix(InternalScopedString *buffer, uptr frame_num,
- uptr pc) {
- buffer->append(" #%zu 0x%zx", frame_num, pc);
-}
-
-void StackTrace::PrintStack(const uptr *addr, uptr size,
- SymbolizeCallback symbolize_callback) {
- if (addr == 0 || size == 0) {
- Printf(" <empty stack>\n\n");
- return;
- }
- MemoryMappingLayout proc_maps(/*cache_enabled*/true);
- InternalScopedBuffer<char> buff(GetPageSizeCached() * 2);
- InternalScopedBuffer<AddressInfo> addr_frames(64);
- InternalScopedString frame_desc(GetPageSizeCached() * 2);
- uptr frame_num = 0;
- for (uptr i = 0; i < size && addr[i]; i++) {
- // PCs in stack traces are actually the return addresses, that is,
- // addresses of the next instructions after the call.
- uptr pc = GetPreviousInstructionPc(addr[i]);
- uptr addr_frames_num = 0; // The number of stack frames for current
- // instruction address.
- if (symbolize_callback) {
- if (symbolize_callback((void*)pc, buff.data(), buff.size())) {
- addr_frames_num = 1;
- frame_desc.clear();
- PrintStackFramePrefix(&frame_desc, frame_num, pc);
- // We can't know anything about the string returned by external
- // symbolizer, but if it starts with filename, try to strip path prefix
- // from it.
- frame_desc.append(
- " %s",
- StripPathPrefix(buff.data(), common_flags()->strip_path_prefix));
- Printf("%s\n", frame_desc.data());
- frame_num++;
- }
- }
- if (common_flags()->symbolize && addr_frames_num == 0) {
- // Use our own (online) symbolizer, if necessary.
- if (Symbolizer *sym = Symbolizer::GetOrNull())
- addr_frames_num =
- sym->SymbolizeCode(pc, addr_frames.data(), addr_frames.size());
- for (uptr j = 0; j < addr_frames_num; j++) {
- AddressInfo &info = addr_frames[j];
- frame_desc.clear();
- PrintStackFramePrefix(&frame_desc, frame_num, pc);
- if (info.function) {
- frame_desc.append(" in %s", info.function);
- }
- if (info.file) {
- frame_desc.append(" ");
- PrintSourceLocation(&frame_desc, info.file, info.line, info.column);
- } else if (info.module) {
- frame_desc.append(" ");
- PrintModuleAndOffset(&frame_desc, info.module, info.module_offset);
- }
- Printf("%s\n", frame_desc.data());
- frame_num++;
- info.Clear();
- }
- }
- if (addr_frames_num == 0) {
- // If online symbolization failed, try to output at least module and
- // offset for instruction.
- frame_desc.clear();
- PrintStackFramePrefix(&frame_desc, frame_num, pc);
- uptr offset;
- if (proc_maps.GetObjectNameAndOffset(pc, &offset,
- buff.data(), buff.size(),
- /* protection */0)) {
- frame_desc.append(" ");
- PrintModuleAndOffset(&frame_desc, buff.data(), offset);
- }
- Printf("%s\n", frame_desc.data());
- frame_num++;
- }
- }
- // Always print a trailing empty line after stack trace.
- Printf("\n");
-}
-
uptr StackTrace::GetCurrentPc() {
return GET_CALLER_PC();
}
@@ -120,10 +34,7 @@ uptr StackTrace::GetCurrentPc() {
void StackTrace::FastUnwindStack(uptr pc, uptr bp,
uptr stack_top, uptr stack_bottom,
uptr max_depth) {
- if (max_depth == 0) {
- size = 0;
- return;
- }
+ CHECK_GE(max_depth, 2);
trace[0] = pc;
size = 1;
uhwptr *frame = (uhwptr *)bp;
@@ -144,22 +55,22 @@ void StackTrace::FastUnwindStack(uptr pc, uptr bp,
}
}
+static bool MatchPc(uptr cur_pc, uptr trace_pc, uptr threshold) {
+ return cur_pc - trace_pc <= threshold || trace_pc - cur_pc <= threshold;
+}
+
void StackTrace::PopStackFrames(uptr count) {
- CHECK(size >= count);
+ CHECK_LT(count, size);
size -= count;
- for (uptr i = 0; i < size; i++) {
+ for (uptr i = 0; i < size; ++i) {
trace[i] = trace[i + count];
}
}
-static bool MatchPc(uptr cur_pc, uptr trace_pc, uptr threshold) {
- return cur_pc - trace_pc <= threshold || trace_pc - cur_pc <= threshold;
-}
-
uptr StackTrace::LocatePcInTrace(uptr pc) {
// Use threshold to find PC in stack trace, as PC we want to unwind from may
// slightly differ from return address in the actual unwinded stack trace.
- const int kPcThreshold = 192;
+ const int kPcThreshold = 288;
for (uptr i = 0; i < size; ++i) {
if (MatchPc(pc, trace[i], kPcThreshold))
return i;
diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace.h b/libsanitizer/sanitizer_common/sanitizer_stacktrace.h
index d06db5ffa7a9..f66857ed1fb7 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stacktrace.h
+++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace.h
@@ -17,10 +17,9 @@ namespace __sanitizer {
static const uptr kStackTraceMax = 256;
-#if SANITIZER_LINUX && (defined(__arm__) || \
- defined(__powerpc__) || defined(__powerpc64__) || \
- defined(__sparc__) || \
- defined(__mips__))
+#if SANITIZER_LINUX && (defined(__aarch64__) || defined(__powerpc__) || \
+ defined(__powerpc64__) || defined(__sparc__) || \
+ defined(__mips__))
# define SANITIZER_CAN_FAST_UNWIND 0
#elif SANITIZER_WINDOWS
# define SANITIZER_CAN_FAST_UNWIND 0
@@ -36,8 +35,10 @@ struct StackTrace {
uptr trace[kStackTraceMax];
// Prints a symbolized stacktrace, followed by an empty line.
- static void PrintStack(const uptr *addr, uptr size,
- SymbolizeCallback symbolize_callback = 0);
+ static void PrintStack(const uptr *addr, uptr size);
+ void Print() const {
+ PrintStack(trace, size);
+ }
void CopyFrom(const uptr *src, uptr src_size) {
top_frame_bp = 0;
@@ -56,7 +57,7 @@ struct StackTrace {
return request_fast_unwind;
}
- void Unwind(uptr max_depth, uptr pc, uptr bp, uptr stack_top,
+ void Unwind(uptr max_depth, uptr pc, uptr bp, void *context, uptr stack_top,
uptr stack_bottom, bool request_fast_unwind);
static uptr GetCurrentPc();
@@ -66,6 +67,8 @@ struct StackTrace {
void FastUnwindStack(uptr pc, uptr bp, uptr stack_top, uptr stack_bottom,
uptr max_depth);
void SlowUnwindStack(uptr pc, uptr max_depth);
+ void SlowUnwindStackWithContext(uptr pc, void *context,
+ uptr max_depth);
void PopStackFrames(uptr count);
uptr LocatePcInTrace(uptr pc);
};
diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_stacktrace_libcdep.cc
index ea2f9d07771d..c3c1045ee04a 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stacktrace_libcdep.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace_libcdep.cc
@@ -9,18 +9,80 @@
// run-time libraries.
//===----------------------------------------------------------------------===//
+#include "sanitizer_common.h"
#include "sanitizer_stacktrace.h"
+#include "sanitizer_symbolizer.h"
namespace __sanitizer {
-void StackTrace::Unwind(uptr max_depth, uptr pc, uptr bp, uptr stack_top,
- uptr stack_bottom, bool request_fast_unwind) {
- if (!WillUseFastUnwind(request_fast_unwind))
- SlowUnwindStack(pc, max_depth);
- else
- FastUnwindStack(pc, bp, stack_top, stack_bottom, max_depth);
+static void PrintStackFramePrefix(InternalScopedString *buffer, uptr frame_num,
+ uptr pc) {
+ buffer->append(" #%zu 0x%zx", frame_num, pc);
+}
- top_frame_bp = size ? bp : 0;
+void StackTrace::PrintStack(const uptr *addr, uptr size) {
+ if (addr == 0 || size == 0) {
+ Printf(" <empty stack>\n\n");
+ return;
+ }
+ InternalScopedBuffer<char> buff(GetPageSizeCached() * 2);
+ InternalScopedBuffer<AddressInfo> addr_frames(64);
+ InternalScopedString frame_desc(GetPageSizeCached() * 2);
+ uptr frame_num = 0;
+ for (uptr i = 0; i < size && addr[i]; i++) {
+ // PCs in stack traces are actually the return addresses, that is,
+ // addresses of the next instructions after the call.
+ uptr pc = GetPreviousInstructionPc(addr[i]);
+ uptr addr_frames_num = Symbolizer::GetOrInit()->SymbolizePC(
+ pc, addr_frames.data(), addr_frames.size());
+ for (uptr j = 0; j < addr_frames_num; j++) {
+ AddressInfo &info = addr_frames[j];
+ frame_desc.clear();
+ PrintStackFramePrefix(&frame_desc, frame_num, pc);
+ if (info.function) {
+ frame_desc.append(" in %s", info.function);
+ // Print offset in function if we don't know the source file.
+ if (!info.file && info.function_offset != AddressInfo::kUnknown)
+ frame_desc.append("+0x%zx", info.function_offset);
+ }
+ if (info.file) {
+ frame_desc.append(" ");
+ PrintSourceLocation(&frame_desc, info.file, info.line, info.column);
+ } else if (info.module) {
+ frame_desc.append(" ");
+ PrintModuleAndOffset(&frame_desc, info.module, info.module_offset);
+ }
+ Printf("%s\n", frame_desc.data());
+ frame_num++;
+ info.Clear();
+ }
+ }
+ // Always print a trailing empty line after stack trace.
+ Printf("\n");
+}
+
+void StackTrace::Unwind(uptr max_depth, uptr pc, uptr bp, void *context,
+ uptr stack_top, uptr stack_bottom,
+ bool request_fast_unwind) {
+ top_frame_bp = (max_depth > 0) ? bp : 0;
+ // Avoid doing any work for small max_depth.
+ if (max_depth == 0) {
+ size = 0;
+ return;
+ }
+ if (max_depth == 1) {
+ size = 1;
+ trace[0] = pc;
+ return;
+ }
+ if (!WillUseFastUnwind(request_fast_unwind)) {
+ if (context)
+ SlowUnwindStackWithContext(pc, context, max_depth);
+ else
+ SlowUnwindStack(pc, max_depth);
+ } else {
+ FastUnwindStack(pc, bp, stack_top, stack_bottom, max_depth);
+ }
}
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc
index 6ee63ec31686..58812023674e 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc
@@ -98,12 +98,11 @@ bool ThreadSuspender::SuspendThread(SuspendedThreadID thread_id) {
&pterrno)) {
// Either the thread is dead, or something prevented us from attaching.
// Log this event and move on.
- if (common_flags()->verbosity)
- Report("Could not attach to thread %d (errno %d).\n", thread_id, pterrno);
+ VReport(1, "Could not attach to thread %d (errno %d).\n", thread_id,
+ pterrno);
return false;
} else {
- if (common_flags()->verbosity)
- Report("Attached to thread %d.\n", thread_id);
+ VReport(1, "Attached to thread %d.\n", thread_id);
// The thread is not guaranteed to stop before ptrace returns, so we must
// wait on it.
uptr waitpid_status;
@@ -112,9 +111,8 @@ bool ThreadSuspender::SuspendThread(SuspendedThreadID thread_id) {
if (internal_iserror(waitpid_status, &wperrno)) {
// Got a ECHILD error. I don't think this situation is possible, but it
// doesn't hurt to report it.
- if (common_flags()->verbosity)
- Report("Waiting on thread %d failed, detaching (errno %d).\n",
- thread_id, wperrno);
+ VReport(1, "Waiting on thread %d failed, detaching (errno %d).\n",
+ thread_id, wperrno);
internal_ptrace(PTRACE_DETACH, thread_id, NULL, NULL);
return false;
}
@@ -129,14 +127,12 @@ void ThreadSuspender::ResumeAllThreads() {
int pterrno;
if (!internal_iserror(internal_ptrace(PTRACE_DETACH, tid, NULL, NULL),
&pterrno)) {
- if (common_flags()->verbosity)
- Report("Detached from thread %d.\n", tid);
+ VReport(1, "Detached from thread %d.\n", tid);
} else {
// Either the thread is dead, or we are already detached.
// The latter case is possible, for instance, if this function was called
// from a signal handler.
- if (common_flags()->verbosity)
- Report("Could not detach from thread %d (errno %d).\n", tid, pterrno);
+ VReport(1, "Could not detach from thread %d (errno %d).\n", tid, pterrno);
}
}
}
@@ -248,18 +244,18 @@ static int TracerThread(void* argument) {
// the mask we inherited from the caller thread.
for (uptr signal_index = 0; signal_index < ARRAY_SIZE(kUnblockedSignals);
signal_index++) {
- __sanitizer_kernel_sigaction_t new_sigaction;
+ __sanitizer_sigaction new_sigaction;
internal_memset(&new_sigaction, 0, sizeof(new_sigaction));
new_sigaction.sigaction = TracerThreadSignalHandler;
new_sigaction.sa_flags = SA_ONSTACK | SA_SIGINFO;
internal_sigfillset(&new_sigaction.sa_mask);
- internal_sigaction(kUnblockedSignals[signal_index], &new_sigaction, NULL);
+ internal_sigaction_norestorer(kUnblockedSignals[signal_index],
+ &new_sigaction, NULL);
}
int exit_code = 0;
if (!thread_suspender.SuspendAllThreads()) {
- if (common_flags()->verbosity)
- Report("Failed suspending threads.\n");
+ VReport(1, "Failed suspending threads.\n");
exit_code = 3;
} else {
tracer_thread_argument->callback(thread_suspender.suspended_threads_list(),
@@ -299,9 +295,9 @@ class ScopedStackSpaceWithGuard {
// We have a limitation on the stack frame size, so some stuff had to be moved
// into globals.
-static __sanitizer_kernel_sigset_t blocked_sigset;
-static __sanitizer_kernel_sigset_t old_sigset;
-static __sanitizer_kernel_sigaction_t old_sigactions
+static __sanitizer_sigset_t blocked_sigset;
+static __sanitizer_sigset_t old_sigset;
+static __sanitizer_sigaction old_sigactions
[ARRAY_SIZE(kUnblockedSignals)];
class StopTheWorldScope {
@@ -318,12 +314,12 @@ class StopTheWorldScope {
// Remove the signal from the set of blocked signals.
internal_sigdelset(&blocked_sigset, kUnblockedSignals[signal_index]);
// Install the default handler.
- __sanitizer_kernel_sigaction_t new_sigaction;
+ __sanitizer_sigaction new_sigaction;
internal_memset(&new_sigaction, 0, sizeof(new_sigaction));
new_sigaction.handler = SIG_DFL;
internal_sigfillset(&new_sigaction.sa_mask);
- internal_sigaction(kUnblockedSignals[signal_index], &new_sigaction,
- &old_sigactions[signal_index]);
+ internal_sigaction_norestorer(kUnblockedSignals[signal_index],
+ &new_sigaction, &old_sigactions[signal_index]);
}
int sigprocmask_status =
internal_sigprocmask(SIG_BLOCK, &blocked_sigset, &old_sigset);
@@ -344,8 +340,8 @@ class StopTheWorldScope {
// Restore the signal handlers.
for (uptr signal_index = 0; signal_index < ARRAY_SIZE(kUnblockedSignals);
signal_index++) {
- internal_sigaction(kUnblockedSignals[signal_index],
- &old_sigactions[signal_index], NULL);
+ internal_sigaction_norestorer(kUnblockedSignals[signal_index],
+ &old_sigactions[signal_index], NULL);
}
internal_sigprocmask(SIG_SETMASK, &old_sigset, &old_sigset);
}
@@ -387,8 +383,7 @@ void StopTheWorld(StopTheWorldCallback callback, void *argument) {
/* child_tidptr */);
int local_errno = 0;
if (internal_iserror(tracer_pid, &local_errno)) {
- if (common_flags()->verbosity)
- Report("Failed spawning a tracer thread (errno %d).\n", local_errno);
+ VReport(1, "Failed spawning a tracer thread (errno %d).\n", local_errno);
tracer_thread_argument.mutex.Unlock();
} else {
ScopedSetTracerPID scoped_set_tracer_pid(tracer_pid);
@@ -404,11 +399,9 @@ void StopTheWorld(StopTheWorldCallback callback, void *argument) {
// At this point, any signal will either be blocked or kill us, so waitpid
// should never return (and set errno) while the tracer thread is alive.
uptr waitpid_status = internal_waitpid(tracer_pid, NULL, __WALL);
- if (internal_iserror(waitpid_status, &local_errno)) {
- if (common_flags()->verbosity)
- Report("Waiting on the tracer thread failed (errno %d).\n",
- local_errno);
- }
+ if (internal_iserror(waitpid_status, &local_errno))
+ VReport(1, "Waiting on the tracer thread failed (errno %d).\n",
+ local_errno);
}
}
@@ -449,9 +442,8 @@ int SuspendedThreadsList::GetRegistersAndSP(uptr index,
int pterrno;
if (internal_iserror(internal_ptrace(PTRACE_GETREGS, tid, NULL, &regs),
&pterrno)) {
- if (common_flags()->verbosity)
- Report("Could not get registers from thread %d (errno %d).\n",
- tid, pterrno);
+ VReport(1, "Could not get registers from thread %d (errno %d).\n", tid,
+ pterrno);
return -1;
}
diff --git a/libsanitizer/sanitizer_common/sanitizer_suppressions.cc b/libsanitizer/sanitizer_common/sanitizer_suppressions.cc
index 14f13e620815..1766fb5a3190 100644
--- a/libsanitizer/sanitizer_common/sanitizer_suppressions.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_suppressions.cc
@@ -18,8 +18,8 @@
namespace __sanitizer {
static const char *const kTypeStrings[SuppressionTypeCount] = {
- "none", "race", "mutex", "thread", "signal", "leak", "called_from_lib"
-};
+ "none", "race", "mutex", "thread",
+ "signal", "leak", "called_from_lib", "deadlock"};
bool TemplateMatch(char *templ, const char *str) {
if (str == 0 || str[0] == 0)
diff --git a/libsanitizer/sanitizer_common/sanitizer_suppressions.h b/libsanitizer/sanitizer_common/sanitizer_suppressions.h
index b4c719cb1874..033ddc5f52ad 100644
--- a/libsanitizer/sanitizer_common/sanitizer_suppressions.h
+++ b/libsanitizer/sanitizer_common/sanitizer_suppressions.h
@@ -24,6 +24,7 @@ enum SuppressionType {
SuppressionSignal,
SuppressionLeak,
SuppressionLib,
+ SuppressionDeadlock,
SuppressionTypeCount
};
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer.h b/libsanitizer/sanitizer_common/sanitizer_symbolizer.h
index af93de75081b..05fc6a7cbb92 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer.h
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer.h
@@ -25,23 +25,30 @@ namespace __sanitizer {
struct AddressInfo {
uptr address;
+
char *module;
uptr module_offset;
+
+ static const uptr kUnknown = ~(uptr)0;
char *function;
+ uptr function_offset;
+
char *file;
int line;
int column;
AddressInfo() {
internal_memset(this, 0, sizeof(AddressInfo));
+ function_offset = kUnknown;
}
- // Deletes all strings and sets all fields to zero.
+ // Deletes all strings and resets all fields.
void Clear() {
InternalFree(module);
InternalFree(function);
InternalFree(file);
internal_memset(this, 0, sizeof(AddressInfo));
+ function_offset = kUnknown;
}
void FillAddressAndModuleInfo(uptr addr, const char *mod_name,
@@ -77,22 +84,20 @@ class Symbolizer {
/// reasons as this function will check $PATH for an external symbolizer. Not
/// thread safe.
static Symbolizer *Init(const char* path_to_external = 0);
- /// Initialize the symbolizer in a disabled state. Not thread safe.
- static Symbolizer *Disable();
// Fills at most "max_frames" elements of "frames" with descriptions
// for a given address (in all inlined functions). Returns the number
// of descriptions actually filled.
- virtual uptr SymbolizeCode(uptr address, AddressInfo *frames,
- uptr max_frames) {
+ virtual uptr SymbolizePC(uptr address, AddressInfo *frames, uptr max_frames) {
return 0;
}
virtual bool SymbolizeData(uptr address, DataInfo *info) {
return false;
}
- virtual bool IsAvailable() {
+ virtual bool GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,
+ uptr *module_address) {
return false;
}
- virtual bool IsExternalAvailable() {
+ virtual bool CanReturnFileLineInfo() {
return false;
}
// Release internal caches (if any).
@@ -119,6 +124,8 @@ class Symbolizer {
/// Create a symbolizer and store it to symbolizer_ without checking if one
/// already exists. Not thread safe.
static Symbolizer *CreateAndStore(const char *path_to_external);
+ /// Initialize the symbolizer in a disabled state. Not thread safe.
+ static Symbolizer *Disable();
static Symbolizer *symbolizer_;
static StaticSpinMutex init_mu_;
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.cc b/libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.cc
index 3023002af47a..86d32e529ec7 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.cc
@@ -35,7 +35,7 @@ namespace __sanitizer {
namespace {
-#if SANITIZER_CP_DEMANGLE
+# if SANITIZER_CP_DEMANGLE
struct CplusV3DemangleData {
char *buf;
uptr size, allocated;
@@ -62,13 +62,13 @@ static void CplusV3DemangleCallback(const char *s, size_t l, void *vdata) {
}
} // extern "C"
-char *CplusV3Demangle(const char *name, bool always_alloc) {
+char *CplusV3Demangle(const char *name) {
CplusV3DemangleData data;
data.buf = 0;
data.size = 0;
data.allocated = 0;
if (cplus_demangle_v3_callback(name, DMGL_PARAMS | DMGL_ANSI,
- CplusV3DemangleCallback, &data)) {
+ CplusV3DemangleCallback, &data)) {
if (data.size + 64 > data.allocated)
return data.buf;
char *buf = internal_strdup(data.buf);
@@ -77,17 +77,9 @@ char *CplusV3Demangle(const char *name, bool always_alloc) {
}
if (data.buf)
InternalFree(data.buf);
- if (always_alloc)
- return internal_strdup(name);
- return 0;
-}
-#else
-const char *CplusV3Demangle(const char *name, bool always_alloc) {
- if (always_alloc)
- return internal_strdup(name);
return 0;
}
-#endif
+# endif // SANITIZER_CP_DEMANGLE
struct SymbolizeCodeData {
AddressInfo *frames;
@@ -107,7 +99,7 @@ static int SymbolizeCodePCInfoCallback(void *vdata, uintptr_t addr,
info->Clear();
info->FillAddressAndModuleInfo(addr, cdata->module_name,
cdata->module_offset);
- info->function = CplusV3Demangle(function, true);
+ info->function = LibbacktraceSymbolizer::Demangle(function, true);
if (filename)
info->file = internal_strdup(filename);
info->line = lineno;
@@ -125,7 +117,7 @@ static void SymbolizeCodeCallback(void *vdata, uintptr_t addr,
info->Clear();
info->FillAddressAndModuleInfo(addr, cdata->module_name,
cdata->module_offset);
- info->function = CplusV3Demangle(symname, true);
+ info->function = LibbacktraceSymbolizer::Demangle(symname, true);
cdata->n_frames = 1;
}
}
@@ -134,7 +126,7 @@ static void SymbolizeDataCallback(void *vdata, uintptr_t, const char *symname,
uintptr_t symval, uintptr_t symsize) {
DataInfo *info = (DataInfo *)vdata;
if (symname && symval) {
- info->name = CplusV3Demangle(symname, true);
+ info->name = LibbacktraceSymbolizer::Demangle(symname, true);
info->start = symval;
info->size = symsize;
}
@@ -179,17 +171,6 @@ bool LibbacktraceSymbolizer::SymbolizeData(DataInfo *info) {
return true;
}
-const char *LibbacktraceSymbolizer::Demangle(const char *name) {
-#if SANITIZER_CP_DEMANGLE
- const char *demangled = CplusV3Demangle(name, false);
- if (demangled)
- return demangled;
- return name;
-#else
- return 0;
-#endif
-}
-
#else // SANITIZER_LIBBACKTRACE
LibbacktraceSymbolizer *LibbacktraceSymbolizer::get(LowLevelAllocator *alloc) {
@@ -208,10 +189,16 @@ bool LibbacktraceSymbolizer::SymbolizeData(DataInfo *info) {
return false;
}
-const char *LibbacktraceSymbolizer::Demangle(const char *name) {
+#endif // SANITIZER_LIBBACKTRACE
+
+char *LibbacktraceSymbolizer::Demangle(const char *name, bool always_alloc) {
+#if SANITIZER_LIBBACKTRACE && SANITIZER_CP_DEMANGLE
+ if (char *demangled = CplusV3Demangle(name))
+ return demangled;
+#endif
+ if (always_alloc)
+ return internal_strdup(name);
return 0;
}
-#endif // SANITIZER_LIBBACKTRACE
-
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.h b/libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.h
index 05f0558c3d87..c7a83bfca921 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.h
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.h
@@ -1,4 +1,4 @@
-//===-- sanitizer_symbolizer_libbacktrace.h -------------------------------===//
+//===-- sanitizer_symbolizer_libbacktrace.h ---------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
@@ -9,6 +9,8 @@
// run-time libraries.
// Header for libbacktrace symbolizer.
//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_SYMBOLIZER_LIBBACKTRACE_H
+#define SANITIZER_SYMBOLIZER_LIBBACKTRACE_H
#include "sanitizer_platform.h"
#include "sanitizer_common.h"
@@ -18,6 +20,10 @@
# define SANITIZER_LIBBACKTRACE 0
#endif
+#ifndef SANITIZER_CP_DEMANGLE
+# define SANITIZER_CP_DEMANGLE 0
+#endif
+
namespace __sanitizer {
class LibbacktraceSymbolizer {
@@ -29,7 +35,8 @@ class LibbacktraceSymbolizer {
bool SymbolizeData(DataInfo *info);
- const char *Demangle(const char *name);
+ // May return NULL if demangling failed.
+ static char *Demangle(const char *name, bool always_alloc = false);
private:
explicit LibbacktraceSymbolizer(void *state) : state_(state) {}
@@ -38,3 +45,4 @@ class LibbacktraceSymbolizer {
};
} // namespace __sanitizer
+#endif // SANITIZER_SYMBOLIZER_LIBBACKTRACE_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc
index 7aead9703822..161c21b3aad7 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc
@@ -14,6 +14,7 @@
#if SANITIZER_POSIX
#include "sanitizer_allocator_internal.h"
#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_linux.h"
#include "sanitizer_placement_new.h"
@@ -52,107 +53,6 @@ static const char *DemangleCXXABI(const char *name) {
return name;
}
-#if defined(__x86_64__)
-static const char* const kSymbolizerArch = "--default-arch=x86_64";
-#elif defined(__i386__)
-static const char* const kSymbolizerArch = "--default-arch=i386";
-#elif defined(__powerpc64__)
-static const char* const kSymbolizerArch = "--default-arch=powerpc64";
-#else
-static const char* const kSymbolizerArch = "--default-arch=unknown";
-#endif
-
-static const int kSymbolizerStartupTimeMillis = 10;
-
-// Creates external symbolizer connected via pipe, user should write
-// to output_fd and read from input_fd.
-static bool StartSymbolizerSubprocess(const char *path_to_symbolizer,
- int *input_fd, int *output_fd) {
- if (!FileExists(path_to_symbolizer)) {
- Report("WARNING: invalid path to external symbolizer!\n");
- return false;
- }
-
- int *infd = NULL;
- int *outfd = NULL;
- // The client program may close its stdin and/or stdout and/or stderr
- // thus allowing socketpair to reuse file descriptors 0, 1 or 2.
- // In this case the communication between the forked processes may be
- // broken if either the parent or the child tries to close or duplicate
- // these descriptors. The loop below produces two pairs of file
- // descriptors, each greater than 2 (stderr).
- int sock_pair[5][2];
- for (int i = 0; i < 5; i++) {
- if (pipe(sock_pair[i]) == -1) {
- for (int j = 0; j < i; j++) {
- internal_close(sock_pair[j][0]);
- internal_close(sock_pair[j][1]);
- }
- Report("WARNING: Can't create a socket pair to start "
- "external symbolizer (errno: %d)\n", errno);
- return false;
- } else if (sock_pair[i][0] > 2 && sock_pair[i][1] > 2) {
- if (infd == NULL) {
- infd = sock_pair[i];
- } else {
- outfd = sock_pair[i];
- for (int j = 0; j < i; j++) {
- if (sock_pair[j] == infd) continue;
- internal_close(sock_pair[j][0]);
- internal_close(sock_pair[j][1]);
- }
- break;
- }
- }
- }
- CHECK(infd);
- CHECK(outfd);
-
- int pid = fork();
- if (pid == -1) {
- // Fork() failed.
- internal_close(infd[0]);
- internal_close(infd[1]);
- internal_close(outfd[0]);
- internal_close(outfd[1]);
- Report("WARNING: failed to fork external symbolizer "
- " (errno: %d)\n", errno);
- return false;
- } else if (pid == 0) {
- // Child subprocess.
- internal_close(STDOUT_FILENO);
- internal_close(STDIN_FILENO);
- internal_dup2(outfd[0], STDIN_FILENO);
- internal_dup2(infd[1], STDOUT_FILENO);
- internal_close(outfd[0]);
- internal_close(outfd[1]);
- internal_close(infd[0]);
- internal_close(infd[1]);
- for (int fd = getdtablesize(); fd > 2; fd--)
- internal_close(fd);
- execl(path_to_symbolizer, path_to_symbolizer, kSymbolizerArch, (char*)0);
- internal__exit(1);
- }
-
- // Continue execution in parent process.
- internal_close(outfd[0]);
- internal_close(infd[1]);
- *input_fd = infd[0];
- *output_fd = outfd[1];
-
- // Check that symbolizer subprocess started successfully.
- int pid_status;
- SleepForMillis(kSymbolizerStartupTimeMillis);
- int exited_pid = waitpid(pid, &pid_status, WNOHANG);
- if (exited_pid != 0) {
- // Either waitpid failed, or child has already exited.
- Report("WARNING: external symbolizer didn't start up correctly!\n");
- return false;
- }
-
- return true;
-}
-
// Extracts the prefix of "str" that consists of any characters not
// present in "delims" string, and copies this prefix to "result", allocating
// space for it.
@@ -192,29 +92,30 @@ static const char *ExtractUptr(const char *str, const char *delims,
return ret;
}
-// ExternalSymbolizer encapsulates communication between the tool and
-// external symbolizer program, running in a different subprocess,
-// For now we assume the following protocol:
-// For each request of the form
-// <module_name> <module_offset>
-// passed to STDIN, external symbolizer prints to STDOUT response:
-// <function_name>
-// <file_name>:<line_number>:<column_number>
-// <function_name>
-// <file_name>:<line_number>:<column_number>
-// ...
-// <empty line>
-// ExternalSymbolizer may not be used from two threads simultaneously.
-class ExternalSymbolizer {
+class ExternalSymbolizerInterface {
+ public:
+ // Can't declare pure virtual functions in sanitizer runtimes:
+ // __cxa_pure_virtual might be unavailable.
+ virtual char *SendCommand(bool is_data, const char *module_name,
+ uptr module_offset) {
+ UNIMPLEMENTED();
+ }
+};
+
+// SymbolizerProcess encapsulates communication between the tool and
+// external symbolizer program, running in a different subprocess.
+// SymbolizerProcess may not be used from two threads simultaneously.
+class SymbolizerProcess : public ExternalSymbolizerInterface {
public:
- explicit ExternalSymbolizer(const char *path)
+ explicit SymbolizerProcess(const char *path)
: path_(path),
input_fd_(kInvalidFd),
output_fd_(kInvalidFd),
times_restarted_(0),
- failed_to_start_(false) {
+ failed_to_start_(false),
+ reported_invalid_path_(false) {
CHECK(path_);
- CHECK_NE(path[0], '\0');
+ CHECK_NE(path_[0], '\0');
}
char *SendCommand(bool is_data, const char *module_name, uptr module_offset) {
@@ -231,16 +132,13 @@ class ExternalSymbolizer {
return 0;
}
- void Flush() {
- }
-
private:
bool Restart() {
if (input_fd_ != kInvalidFd)
internal_close(input_fd_);
if (output_fd_ != kInvalidFd)
internal_close(output_fd_);
- return StartSymbolizerSubprocess(path_, &input_fd_, &output_fd_);
+ return StartSymbolizerSubprocess();
}
char *SendCommandImpl(bool is_data, const char *module_name,
@@ -248,8 +146,9 @@ class ExternalSymbolizer {
if (input_fd_ == kInvalidFd || output_fd_ == kInvalidFd)
return 0;
CHECK(module_name);
- internal_snprintf(buffer_, kBufferSize, "%s\"%s\" 0x%zx\n",
- is_data ? "DATA " : "", module_name, module_offset);
+ if (!RenderInputCommand(buffer_, kBufferSize, is_data, module_name,
+ module_offset))
+ return 0;
if (!writeToSymbolizer(buffer_, internal_strlen(buffer_)))
return 0;
if (!readFromSymbolizer(buffer_, kBufferSize))
@@ -263,7 +162,7 @@ class ExternalSymbolizer {
uptr read_len = 0;
while (true) {
uptr just_read = internal_read(input_fd_, buffer + read_len,
- max_length - read_len);
+ max_length - read_len - 1);
// We can't read 0 bytes, as we don't expect external symbolizer to close
// its stdout.
if (just_read == 0 || just_read == (uptr)-1) {
@@ -271,12 +170,10 @@ class ExternalSymbolizer {
return false;
}
read_len += just_read;
- // Empty line marks the end of symbolizer output.
- if (read_len >= 2 && buffer[read_len - 1] == '\n' &&
- buffer[read_len - 2] == '\n') {
+ if (ReachedEndOfOutput(buffer, read_len))
break;
- }
}
+ buffer[read_len] = '\0';
return true;
}
@@ -291,6 +188,110 @@ class ExternalSymbolizer {
return true;
}
+ bool StartSymbolizerSubprocess() {
+ if (!FileExists(path_)) {
+ if (!reported_invalid_path_) {
+ Report("WARNING: invalid path to external symbolizer!\n");
+ reported_invalid_path_ = true;
+ }
+ return false;
+ }
+
+ int *infd = NULL;
+ int *outfd = NULL;
+ // The client program may close its stdin and/or stdout and/or stderr
+ // thus allowing socketpair to reuse file descriptors 0, 1 or 2.
+ // In this case the communication between the forked processes may be
+ // broken if either the parent or the child tries to close or duplicate
+ // these descriptors. The loop below produces two pairs of file
+ // descriptors, each greater than 2 (stderr).
+ int sock_pair[5][2];
+ for (int i = 0; i < 5; i++) {
+ if (pipe(sock_pair[i]) == -1) {
+ for (int j = 0; j < i; j++) {
+ internal_close(sock_pair[j][0]);
+ internal_close(sock_pair[j][1]);
+ }
+ Report("WARNING: Can't create a socket pair to start "
+ "external symbolizer (errno: %d)\n", errno);
+ return false;
+ } else if (sock_pair[i][0] > 2 && sock_pair[i][1] > 2) {
+ if (infd == NULL) {
+ infd = sock_pair[i];
+ } else {
+ outfd = sock_pair[i];
+ for (int j = 0; j < i; j++) {
+ if (sock_pair[j] == infd) continue;
+ internal_close(sock_pair[j][0]);
+ internal_close(sock_pair[j][1]);
+ }
+ break;
+ }
+ }
+ }
+ CHECK(infd);
+ CHECK(outfd);
+
+ // Real fork() may call user callbacks registered with pthread_atfork().
+ int pid = internal_fork();
+ if (pid == -1) {
+ // Fork() failed.
+ internal_close(infd[0]);
+ internal_close(infd[1]);
+ internal_close(outfd[0]);
+ internal_close(outfd[1]);
+ Report("WARNING: failed to fork external symbolizer "
+ " (errno: %d)\n", errno);
+ return false;
+ } else if (pid == 0) {
+ // Child subprocess.
+ internal_close(STDOUT_FILENO);
+ internal_close(STDIN_FILENO);
+ internal_dup2(outfd[0], STDIN_FILENO);
+ internal_dup2(infd[1], STDOUT_FILENO);
+ internal_close(outfd[0]);
+ internal_close(outfd[1]);
+ internal_close(infd[0]);
+ internal_close(infd[1]);
+ for (int fd = getdtablesize(); fd > 2; fd--)
+ internal_close(fd);
+ ExecuteWithDefaultArgs(path_);
+ internal__exit(1);
+ }
+
+ // Continue execution in parent process.
+ internal_close(outfd[0]);
+ internal_close(infd[1]);
+ input_fd_ = infd[0];
+ output_fd_ = outfd[1];
+
+ // Check that symbolizer subprocess started successfully.
+ int pid_status;
+ SleepForMillis(kSymbolizerStartupTimeMillis);
+ int exited_pid = waitpid(pid, &pid_status, WNOHANG);
+ if (exited_pid != 0) {
+ // Either waitpid failed, or child has already exited.
+ Report("WARNING: external symbolizer didn't start up correctly!\n");
+ return false;
+ }
+
+ return true;
+ }
+
+ virtual bool RenderInputCommand(char *buffer, uptr max_length, bool is_data,
+ const char *module_name,
+ uptr module_offset) const {
+ UNIMPLEMENTED();
+ }
+
+ virtual bool ReachedEndOfOutput(const char *buffer, uptr length) const {
+ UNIMPLEMENTED();
+ }
+
+ virtual void ExecuteWithDefaultArgs(const char *path_to_binary) const {
+ UNIMPLEMENTED();
+ }
+
const char *path_;
int input_fd_;
int output_fd_;
@@ -299,8 +300,120 @@ class ExternalSymbolizer {
char buffer_[kBufferSize];
static const uptr kMaxTimesRestarted = 5;
+ static const int kSymbolizerStartupTimeMillis = 10;
uptr times_restarted_;
bool failed_to_start_;
+ bool reported_invalid_path_;
+};
+
+// For now we assume the following protocol:
+// For each request of the form
+// <module_name> <module_offset>
+// passed to STDIN, external symbolizer prints to STDOUT response:
+// <function_name>
+// <file_name>:<line_number>:<column_number>
+// <function_name>
+// <file_name>:<line_number>:<column_number>
+// ...
+// <empty line>
+class LLVMSymbolizerProcess : public SymbolizerProcess {
+ public:
+ explicit LLVMSymbolizerProcess(const char *path) : SymbolizerProcess(path) {}
+
+ private:
+ bool RenderInputCommand(char *buffer, uptr max_length, bool is_data,
+ const char *module_name, uptr module_offset) const {
+ internal_snprintf(buffer, max_length, "%s\"%s\" 0x%zx\n",
+ is_data ? "DATA " : "", module_name, module_offset);
+ return true;
+ }
+
+ bool ReachedEndOfOutput(const char *buffer, uptr length) const {
+ // Empty line marks the end of llvm-symbolizer output.
+ return length >= 2 && buffer[length - 1] == '\n' &&
+ buffer[length - 2] == '\n';
+ }
+
+ void ExecuteWithDefaultArgs(const char *path_to_binary) const {
+#if defined(__x86_64__)
+ const char* const kSymbolizerArch = "--default-arch=x86_64";
+#elif defined(__i386__)
+ const char* const kSymbolizerArch = "--default-arch=i386";
+#elif defined(__powerpc64__)
+ const char* const kSymbolizerArch = "--default-arch=powerpc64";
+#else
+ const char* const kSymbolizerArch = "--default-arch=unknown";
+#endif
+ execl(path_to_binary, path_to_binary, kSymbolizerArch, (char *)0);
+ }
+};
+
+class Addr2LineProcess : public SymbolizerProcess {
+ public:
+ Addr2LineProcess(const char *path, const char *module_name)
+ : SymbolizerProcess(path), module_name_(internal_strdup(module_name)) {}
+
+ const char *module_name() const { return module_name_; }
+
+ private:
+ bool RenderInputCommand(char *buffer, uptr max_length, bool is_data,
+ const char *module_name, uptr module_offset) const {
+ if (is_data)
+ return false;
+ CHECK_EQ(0, internal_strcmp(module_name, module_name_));
+ internal_snprintf(buffer, max_length, "0x%zx\n", module_offset);
+ return true;
+ }
+
+ bool ReachedEndOfOutput(const char *buffer, uptr length) const {
+ // Output should consist of two lines.
+ int num_lines = 0;
+ for (uptr i = 0; i < length; ++i) {
+ if (buffer[i] == '\n')
+ num_lines++;
+ if (num_lines >= 2)
+ return true;
+ }
+ return false;
+ }
+
+ void ExecuteWithDefaultArgs(const char *path_to_binary) const {
+ execl(path_to_binary, path_to_binary, "-Cfe", module_name_, (char *)0);
+ }
+
+ const char *module_name_; // Owned, leaked.
+};
+
+class Addr2LinePool : public ExternalSymbolizerInterface {
+ public:
+ explicit Addr2LinePool(const char *addr2line_path,
+ LowLevelAllocator *allocator)
+ : addr2line_path_(addr2line_path), allocator_(allocator),
+ addr2line_pool_(16) {}
+
+ char *SendCommand(bool is_data, const char *module_name, uptr module_offset) {
+ if (is_data)
+ return 0;
+ Addr2LineProcess *addr2line = 0;
+ for (uptr i = 0; i < addr2line_pool_.size(); ++i) {
+ if (0 ==
+ internal_strcmp(module_name, addr2line_pool_[i]->module_name())) {
+ addr2line = addr2line_pool_[i];
+ break;
+ }
+ }
+ if (!addr2line) {
+ addr2line =
+ new(*allocator_) Addr2LineProcess(addr2line_path_, module_name);
+ addr2line_pool_.push_back(addr2line);
+ }
+ return addr2line->SendCommand(is_data, module_name, module_offset);
+ }
+
+ private:
+ const char *addr2line_path_;
+ LowLevelAllocator *allocator_;
+ InternalMmapVector<Addr2LineProcess*> addr2line_pool_;
};
#if SANITIZER_SUPPORTS_WEAK_HOOKS
@@ -384,7 +497,7 @@ class InternalSymbolizer {
class POSIXSymbolizer : public Symbolizer {
public:
- POSIXSymbolizer(ExternalSymbolizer *external_symbolizer,
+ POSIXSymbolizer(ExternalSymbolizerInterface *external_symbolizer,
InternalSymbolizer *internal_symbolizer,
LibbacktraceSymbolizer *libbacktrace_symbolizer)
: Symbolizer(),
@@ -392,15 +505,14 @@ class POSIXSymbolizer : public Symbolizer {
internal_symbolizer_(internal_symbolizer),
libbacktrace_symbolizer_(libbacktrace_symbolizer) {}
- uptr SymbolizeCode(uptr addr, AddressInfo *frames, uptr max_frames) {
+ uptr SymbolizePC(uptr addr, AddressInfo *frames, uptr max_frames) {
BlockingMutexLock l(&mu_);
if (max_frames == 0)
return 0;
- LoadedModule *module = FindModuleForAddress(addr);
- if (module == 0)
+ const char *module_name;
+ uptr module_offset;
+ if (!FindModuleNameAndOffsetForAddress(addr, &module_name, &module_offset))
return 0;
- const char *module_name = module->full_name();
- uptr module_offset = addr - module->base_address();
// First, try to use libbacktrace symbolizer (if it's available).
if (libbacktrace_symbolizer_ != 0) {
mu_.CheckLocked();
@@ -472,6 +584,7 @@ class POSIXSymbolizer : public Symbolizer {
info->address = addr;
info->module = internal_strdup(module_name);
info->module_offset = module_offset;
+ // First, try to use libbacktrace symbolizer (if it's available).
if (libbacktrace_symbolizer_ != 0) {
mu_.CheckLocked();
if (libbacktrace_symbolizer_->SymbolizeData(info))
@@ -487,13 +600,15 @@ class POSIXSymbolizer : public Symbolizer {
return true;
}
- bool IsAvailable() {
- return internal_symbolizer_ != 0 || external_symbolizer_ != 0 ||
- libbacktrace_symbolizer_ != 0;
+ bool GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,
+ uptr *module_address) {
+ BlockingMutexLock l(&mu_);
+ return FindModuleNameAndOffsetForAddress(pc, module_name, module_address);
}
- bool IsExternalAvailable() {
- return external_symbolizer_ != 0;
+ bool CanReturnFileLineInfo() {
+ return internal_symbolizer_ != 0 || external_symbolizer_ != 0 ||
+ libbacktrace_symbolizer_ != 0;
}
void Flush() {
@@ -502,8 +617,6 @@ class POSIXSymbolizer : public Symbolizer {
SymbolizerScope sym_scope(this);
internal_symbolizer_->Flush();
}
- if (external_symbolizer_ != 0)
- external_symbolizer_->Flush();
}
const char *Demangle(const char *name) {
@@ -511,13 +624,13 @@ class POSIXSymbolizer : public Symbolizer {
// Run hooks even if we don't use internal symbolizer, as cxxabi
// demangle may call system functions.
SymbolizerScope sym_scope(this);
- if (internal_symbolizer_ != 0)
- return internal_symbolizer_->Demangle(name);
+ // Try to use libbacktrace demangler (if available).
if (libbacktrace_symbolizer_ != 0) {
- const char *demangled = libbacktrace_symbolizer_->Demangle(name);
- if (demangled)
- return demangled;
+ if (const char *demangled = libbacktrace_symbolizer_->Demangle(name))
+ return demangled;
}
+ if (internal_symbolizer_ != 0)
+ return internal_symbolizer_->Demangle(name);
return DemangleCXXABI(name);
}
@@ -540,6 +653,7 @@ class POSIXSymbolizer : public Symbolizer {
}
// Otherwise, fall back to external symbolizer.
if (external_symbolizer_) {
+ SymbolizerScope sym_scope(this);
return external_symbolizer_->SendCommand(is_data, module_name,
module_offset);
}
@@ -555,8 +669,7 @@ class POSIXSymbolizer : public Symbolizer {
CHECK(modules_);
n_modules_ = GetListOfModules(modules_, kMaxNumberOfModuleContexts,
/* filter */ 0);
- // FIXME: Return this check when GetListOfModules is implemented on Mac.
- // CHECK_GT(n_modules_, 0);
+ CHECK_GT(n_modules_, 0);
CHECK_LT(n_modules_, kMaxNumberOfModuleContexts);
modules_fresh_ = true;
modules_were_reloaded = true;
@@ -577,6 +690,17 @@ class POSIXSymbolizer : public Symbolizer {
return 0;
}
+ bool FindModuleNameAndOffsetForAddress(uptr address, const char **module_name,
+ uptr *module_offset) {
+ mu_.CheckLocked();
+ LoadedModule *module = FindModuleForAddress(address);
+ if (module == 0)
+ return false;
+ *module_name = module->full_name();
+ *module_offset = address - module->base_address();
+ return true;
+ }
+
// 16K loaded modules should be enough for everyone.
static const uptr kMaxNumberOfModuleContexts = 1 << 14;
LoadedModule *modules_; // Array of module descriptions is leaked.
@@ -585,27 +709,41 @@ class POSIXSymbolizer : public Symbolizer {
bool modules_fresh_;
BlockingMutex mu_;
- ExternalSymbolizer *external_symbolizer_; // Leaked.
- InternalSymbolizer *const internal_symbolizer_; // Leaked.
- LibbacktraceSymbolizer *libbacktrace_symbolizer_; // Leaked.
+ ExternalSymbolizerInterface *external_symbolizer_; // Leaked.
+ InternalSymbolizer *const internal_symbolizer_; // Leaked.
+ LibbacktraceSymbolizer *libbacktrace_symbolizer_; // Leaked.
};
Symbolizer *Symbolizer::PlatformInit(const char *path_to_external) {
+ if (!common_flags()->symbolize) {
+ return new(symbolizer_allocator_) POSIXSymbolizer(0, 0, 0);
+ }
InternalSymbolizer* internal_symbolizer =
InternalSymbolizer::get(&symbolizer_allocator_);
- ExternalSymbolizer *external_symbolizer = 0;
+ ExternalSymbolizerInterface *external_symbolizer = 0;
LibbacktraceSymbolizer *libbacktrace_symbolizer = 0;
if (!internal_symbolizer) {
libbacktrace_symbolizer =
LibbacktraceSymbolizer::get(&symbolizer_allocator_);
if (!libbacktrace_symbolizer) {
- // Find path to llvm-symbolizer if it's not provided.
- if (!path_to_external)
- path_to_external = FindPathToBinary("llvm-symbolizer");
- if (path_to_external && path_to_external[0] != '\0')
- external_symbolizer = new(symbolizer_allocator_)
- ExternalSymbolizer(path_to_external);
+ if (path_to_external && path_to_external[0] == '\0') {
+ // External symbolizer is explicitly disabled. Do nothing.
+ } else {
+ // Find path to llvm-symbolizer if it's not provided.
+ if (!path_to_external)
+ path_to_external = FindPathToBinary("llvm-symbolizer");
+ if (path_to_external) {
+ external_symbolizer = new(symbolizer_allocator_)
+ LLVMSymbolizerProcess(path_to_external);
+ } else if (common_flags()->allow_addr2line) {
+ // If llvm-symbolizer is not found, try to use addr2line.
+ if (const char *addr2line_path = FindPathToBinary("addr2line")) {
+ external_symbolizer = new(symbolizer_allocator_)
+ Addr2LinePool(addr2line_path, &symbolizer_allocator_);
+ }
+ }
+ }
}
}
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cc b/libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cc
index 446de8af2936..c014c6af42bd 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cc
@@ -12,11 +12,96 @@
#include "sanitizer_platform.h"
#if SANITIZER_WINDOWS
+#include <windows.h>
+#include <dbghelp.h>
+#pragma comment(lib, "dbghelp.lib")
+
#include "sanitizer_symbolizer.h"
namespace __sanitizer {
-Symbolizer *Symbolizer::PlatformInit(const char *path_to_external) { return 0; }
+class WinSymbolizer : public Symbolizer {
+ public:
+ WinSymbolizer() : initialized_(false) {}
+
+ uptr SymbolizePC(uptr addr, AddressInfo *frames, uptr max_frames) {
+ if (max_frames == 0)
+ return 0;
+
+ BlockingMutexLock l(&dbghelp_mu_);
+ if (!initialized_) {
+ SymSetOptions(SYMOPT_DEFERRED_LOADS |
+ SYMOPT_UNDNAME |
+ SYMOPT_LOAD_LINES);
+ CHECK(SymInitialize(GetCurrentProcess(), 0, TRUE));
+ // FIXME: We don't call SymCleanup() on exit yet - should we?
+ initialized_ = true;
+ }
+
+ // See http://msdn.microsoft.com/en-us/library/ms680578(VS.85).aspx
+ char buffer[sizeof(SYMBOL_INFO) + MAX_SYM_NAME * sizeof(CHAR)];
+ PSYMBOL_INFO symbol = (PSYMBOL_INFO)buffer;
+ symbol->SizeOfStruct = sizeof(SYMBOL_INFO);
+ symbol->MaxNameLen = MAX_SYM_NAME;
+ DWORD64 offset = 0;
+ BOOL got_objname = SymFromAddr(GetCurrentProcess(),
+ (DWORD64)addr, &offset, symbol);
+ if (!got_objname)
+ return 0;
+
+ DWORD unused;
+ IMAGEHLP_LINE64 line_info;
+ line_info.SizeOfStruct = sizeof(IMAGEHLP_LINE64);
+ BOOL got_fileline = SymGetLineFromAddr64(GetCurrentProcess(), (DWORD64)addr,
+ &unused, &line_info);
+ AddressInfo *info = &frames[0];
+ info->Clear();
+ info->function = internal_strdup(symbol->Name);
+ info->function_offset = (uptr)offset;
+ if (got_fileline) {
+ info->file = internal_strdup(line_info.FileName);
+ info->line = line_info.LineNumber;
+ }
+
+ IMAGEHLP_MODULE64 mod_info;
+ internal_memset(&mod_info, 0, sizeof(mod_info));
+ mod_info.SizeOfStruct = sizeof(mod_info);
+ if (SymGetModuleInfo64(GetCurrentProcess(), addr, &mod_info))
+ info->FillAddressAndModuleInfo(addr, mod_info.ImageName,
+ addr - (uptr)mod_info.BaseOfImage);
+ return 1;
+ }
+
+ bool CanReturnFileLineInfo() {
+ return true;
+ }
+
+ const char *Demangle(const char *name) {
+ CHECK(initialized_);
+ static char demangle_buffer[1000];
+ if (name[0] == '\01' &&
+ UnDecorateSymbolName(name + 1, demangle_buffer, sizeof(demangle_buffer),
+ UNDNAME_NAME_ONLY))
+ return demangle_buffer;
+ else
+ return name;
+ }
+
+ // FIXME: Implement GetModuleNameAndOffsetForPC().
+
+ private:
+ // All DbgHelp functions are single threaded, so we should use a mutex to
+ // serialize accesses.
+ BlockingMutex dbghelp_mu_;
+ bool initialized_;
+};
+
+Symbolizer *Symbolizer::PlatformInit(const char *path_to_external) {
+ static bool called_once = false;
+ CHECK(!called_once && "Shouldn't create more than one symbolizer");
+ called_once = true;
+ return new(symbolizer_allocator_) WinSymbolizer();
+}
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_syscall_generic.inc b/libsanitizer/sanitizer_common/sanitizer_syscall_generic.inc
index 6b2c915a3bc2..7667b753a8b1 100644
--- a/libsanitizer/sanitizer_common/sanitizer_syscall_generic.inc
+++ b/libsanitizer/sanitizer_common/sanitizer_syscall_generic.inc
@@ -9,7 +9,17 @@
//
//===----------------------------------------------------------------------===//
-#define internal_syscall syscall
+#if SANITIZER_FREEBSD
+# define SYSCALL(name) SYS_ ## name
+#else
+# define SYSCALL(name) __NR_ ## name
+#endif
+
+#if SANITIZER_FREEBSD && defined(__x86_64__)
+# define internal_syscall __syscall
+# else
+# define internal_syscall syscall
+#endif
bool internal_iserror(uptr retval, int *rverrno) {
if (retval == (uptr)-1) {
diff --git a/libsanitizer/sanitizer_common/sanitizer_syscall_linux_x86_64.inc b/libsanitizer/sanitizer_common/sanitizer_syscall_linux_x86_64.inc
index 8810c7faa4bf..b610d66be4ff 100644
--- a/libsanitizer/sanitizer_common/sanitizer_syscall_linux_x86_64.inc
+++ b/libsanitizer/sanitizer_common/sanitizer_syscall_linux_x86_64.inc
@@ -9,6 +9,8 @@
//
//===----------------------------------------------------------------------===//
+#define SYSCALL(name) __NR_ ## name
+
static uptr internal_syscall(u64 nr) {
u64 retval;
asm volatile("syscall" : "=a"(retval) : "a"(nr) : "rcx", "r11",
diff --git a/libsanitizer/sanitizer_common/sanitizer_thread_registry.cc b/libsanitizer/sanitizer_common/sanitizer_thread_registry.cc
index 666955f6c9a9..0ad47561b8b1 100644
--- a/libsanitizer/sanitizer_common/sanitizer_thread_registry.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_thread_registry.cc
@@ -15,8 +15,9 @@
namespace __sanitizer {
ThreadContextBase::ThreadContextBase(u32 tid)
- : tid(tid), unique_id(0), os_id(0), user_id(0), status(ThreadStatusInvalid),
- detached(false), reuse_count(0), parent_tid(0), next(0) {
+ : tid(tid), unique_id(0), reuse_count(), os_id(0), user_id(0),
+ status(ThreadStatusInvalid),
+ detached(false), parent_tid(0), next(0) {
name[0] = '\0';
}
@@ -76,7 +77,6 @@ void ThreadContextBase::SetCreated(uptr _user_id, u64 _unique_id,
void ThreadContextBase::Reset() {
status = ThreadStatusInvalid;
- reuse_count++;
SetName(0);
OnReset();
}
@@ -86,10 +86,11 @@ void ThreadContextBase::Reset() {
const u32 ThreadRegistry::kUnknownTid = ~0U;
ThreadRegistry::ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
- u32 thread_quarantine_size)
+ u32 thread_quarantine_size, u32 max_reuse)
: context_factory_(factory),
max_threads_(max_threads),
thread_quarantine_size_(thread_quarantine_size),
+ max_reuse_(max_reuse),
mtx_(),
n_contexts_(0),
total_threads_(0),
@@ -128,8 +129,13 @@ u32 ThreadRegistry::CreateThread(uptr user_id, bool detached, u32 parent_tid,
tctx = context_factory_(tid);
threads_[tid] = tctx;
} else {
+#ifndef SANITIZER_GO
Report("%s: Thread limit (%u threads) exceeded. Dying.\n",
SanitizerToolName, max_threads_);
+#else
+ Printf("race: limit on %u simultaneously alive goroutines is exceeded,"
+ " dying\n", max_threads_);
+#endif
Die();
}
CHECK_NE(tctx, 0);
@@ -275,6 +281,9 @@ void ThreadRegistry::QuarantinePush(ThreadContextBase *tctx) {
dead_threads_.pop_front();
CHECK_EQ(tctx->status, ThreadStatusDead);
tctx->Reset();
+ tctx->reuse_count++;
+ if (max_reuse_ > 0 && tctx->reuse_count >= max_reuse_)
+ return;
invalid_threads_.push_back(tctx);
}
diff --git a/libsanitizer/sanitizer_common/sanitizer_thread_registry.h b/libsanitizer/sanitizer_common/sanitizer_thread_registry.h
index 81c270945d35..2d7f9e90e0d8 100644
--- a/libsanitizer/sanitizer_common/sanitizer_thread_registry.h
+++ b/libsanitizer/sanitizer_common/sanitizer_thread_registry.h
@@ -36,13 +36,13 @@ class ThreadContextBase {
const u32 tid; // Thread ID. Main thread should have tid = 0.
u64 unique_id; // Unique thread ID.
+ u32 reuse_count; // Number of times this tid was reused.
uptr os_id; // PID (used for reporting).
uptr user_id; // Some opaque user thread id (e.g. pthread_t).
char name[64]; // As annotated by user.
ThreadStatus status;
bool detached;
- int reuse_count;
u32 parent_tid;
ThreadContextBase *next; // For storing thread contexts in a list.
@@ -75,7 +75,7 @@ class ThreadRegistry {
static const u32 kUnknownTid;
ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
- u32 thread_quarantine_size);
+ u32 thread_quarantine_size, u32 max_reuse = 0);
void GetNumberOfThreads(uptr *total = 0, uptr *running = 0, uptr *alive = 0);
uptr GetMaxAliveThreads();
@@ -117,6 +117,7 @@ class ThreadRegistry {
const ThreadContextFactory context_factory_;
const u32 max_threads_;
const u32 thread_quarantine_size_;
+ const u32 max_reuse_;
BlockingMutex mtx_;
diff --git a/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.cc b/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.cc
new file mode 100644
index 000000000000..1d6170f9157c
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.cc
@@ -0,0 +1,129 @@
+//===-- sanitizer_tls_get_addr.cc -----------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Handle the __tls_get_addr call.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_tls_get_addr.h"
+
+#include "sanitizer_flags.h"
+#include "sanitizer_platform_interceptors.h"
+
+namespace __sanitizer {
+#if SANITIZER_INTERCEPT_TLS_GET_ADDR
+
+// The actual parameter that comes to __tls_get_addr
+// is a pointer to a struct with two words in it:
+struct TlsGetAddrParam {
+ uptr dso_id;
+ uptr offset;
+};
+
+// Glibc starting from 2.19 allocates tls using __signal_safe_memalign,
+// which has such header.
+struct Glibc_2_19_tls_header {
+ uptr size;
+ uptr start;
+};
+
+// This must be static TLS
+__attribute__((tls_model("initial-exec")))
+static __thread DTLS dtls;
+
+// Make sure we properly destroy the DTLS objects:
+// this counter should never get too large.
+static atomic_uintptr_t number_of_live_dtls;
+
+static const uptr kDestroyedThread = -1;
+
+static inline void DTLS_Deallocate(DTLS::DTV *dtv, uptr size) {
+ if (!size) return;
+ VPrintf(2, "__tls_get_addr: DTLS_Deallocate %p %zd\n", dtv, size);
+ UnmapOrDie(dtv, size * sizeof(DTLS::DTV));
+ atomic_fetch_sub(&number_of_live_dtls, 1, memory_order_relaxed);
+}
+
+static inline void DTLS_Resize(uptr new_size) {
+ if (dtls.dtv_size >= new_size) return;
+ new_size = RoundUpToPowerOfTwo(new_size);
+ new_size = Max(new_size, 4096UL / sizeof(DTLS::DTV));
+ DTLS::DTV *new_dtv =
+ (DTLS::DTV *)MmapOrDie(new_size * sizeof(DTLS::DTV), "DTLS_Resize");
+ uptr num_live_dtls =
+ atomic_fetch_add(&number_of_live_dtls, 1, memory_order_relaxed);
+ VPrintf(2, "__tls_get_addr: DTLS_Resize %p %zd\n", &dtls, num_live_dtls);
+ CHECK_LT(num_live_dtls, 1 << 20);
+ uptr old_dtv_size = dtls.dtv_size;
+ DTLS::DTV *old_dtv = dtls.dtv;
+ if (old_dtv_size)
+ internal_memcpy(new_dtv, dtls.dtv, dtls.dtv_size * sizeof(DTLS::DTV));
+ dtls.dtv = new_dtv;
+ dtls.dtv_size = new_size;
+ if (old_dtv_size)
+ DTLS_Deallocate(old_dtv, old_dtv_size);
+}
+
+void DTLS_Destroy() {
+ if (!common_flags()->intercept_tls_get_addr) return;
+ VPrintf(2, "__tls_get_addr: DTLS_Destroy %p %zd\n", &dtls, dtls.dtv_size);
+ uptr s = dtls.dtv_size;
+ dtls.dtv_size = kDestroyedThread; // Do this before unmap for AS-safety.
+ DTLS_Deallocate(dtls.dtv, s);
+}
+
+void DTLS_on_tls_get_addr(void *arg_void, void *res) {
+ if (!common_flags()->intercept_tls_get_addr) return;
+ TlsGetAddrParam *arg = reinterpret_cast<TlsGetAddrParam *>(arg_void);
+ uptr dso_id = arg->dso_id;
+ if (dtls.dtv_size == kDestroyedThread) return;
+ DTLS_Resize(dso_id + 1);
+ if (dtls.dtv[dso_id].beg)
+ return;
+ uptr tls_size = 0;
+ uptr tls_beg = reinterpret_cast<uptr>(res) - arg->offset;
+ VPrintf(2, "__tls_get_addr: %p {%p,%p} => %p; tls_beg: %p; sp: %p "
+ "num_live_dtls %zd\n",
+ arg, arg->dso_id, arg->offset, res, tls_beg, &tls_beg,
+ atomic_load(&number_of_live_dtls, memory_order_relaxed));
+ if (dtls.last_memalign_ptr == tls_beg) {
+ tls_size = dtls.last_memalign_size;
+ VPrintf(2, "__tls_get_addr: glibc <=2.18 suspected; tls={%p,%p}\n",
+ tls_beg, tls_size);
+ } else if ((tls_beg % 4096) == sizeof(Glibc_2_19_tls_header)) {
+ // We may want to check gnu_get_libc_version().
+ Glibc_2_19_tls_header *header = (Glibc_2_19_tls_header *)tls_beg - 1;
+ tls_size = header->size;
+ tls_beg = header->start;
+ VPrintf(2, "__tls_get_addr: glibc >=2.19 suspected; tls={%p %p}\n",
+ tls_beg, tls_size);
+ } else {
+ VPrintf(2, "__tls_get_addr: Can't guess glibc version\n");
+ // This may happen inside the DTOR of main thread, so just ignore it.
+ tls_size = 0;
+ }
+ dtls.dtv[dso_id].beg = tls_beg;
+ dtls.dtv[dso_id].size = tls_size;
+}
+
+void DTLS_on_libc_memalign(void *ptr, uptr size) {
+ if (!common_flags()->intercept_tls_get_addr) return;
+ VPrintf(2, "DTLS_on_libc_memalign: %p %p\n", ptr, size);
+ dtls.last_memalign_ptr = reinterpret_cast<uptr>(ptr);
+ dtls.last_memalign_size = size;
+}
+
+DTLS *DTLS_Get() { return &dtls; }
+
+#else
+void DTLS_on_libc_memalign(void *ptr, uptr size) {}
+void DTLS_on_tls_get_addr(void *arg, void *res) {}
+DTLS *DTLS_Get() { return 0; }
+void DTLS_Destroy() {}
+#endif // SANITIZER_INTERCEPT_TLS_GET_ADDR
+
+} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.h b/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.h
new file mode 100644
index 000000000000..22a5e1c41c21
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.h
@@ -0,0 +1,56 @@
+//===-- sanitizer_tls_get_addr.h --------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Handle the __tls_get_addr call.
+//
+// All this magic is specific to glibc and is required to workaround
+// the lack of interface that would tell us about the Dynamic TLS (DTLS).
+// https://sourceware.org/bugzilla/show_bug.cgi?id=16291
+//
+// The matters get worse because the glibc implementation changed between
+// 2.18 and 2.19:
+// https://groups.google.com/forum/#!topic/address-sanitizer/BfwYD8HMxTM
+//
+// Before 2.19, every DTLS chunk is allocated with __libc_memalign,
+// which we intercept and thus know where is the DTLS.
+// Since 2.19, DTLS chunks are allocated with __signal_safe_memalign,
+// which is an internal function that wraps a mmap call, neither of which
+// we can intercept. Luckily, __signal_safe_memalign has a simple parseable
+// header which we can use.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_TLS_GET_ADDR_H
+#define SANITIZER_TLS_GET_ADDR_H
+
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+
+struct DTLS {
+ // Array of DTLS chunks for the current Thread.
+ // If beg == 0, the chunk is unused.
+ struct DTV {
+ uptr beg, size;
+ };
+
+ uptr dtv_size;
+ DTV *dtv; // dtv_size elements, allocated by MmapOrDie.
+
+ // Auxiliary fields, don't access them outside sanitizer_tls_get_addr.cc
+ uptr last_memalign_size;
+ uptr last_memalign_ptr;
+};
+
+void DTLS_on_tls_get_addr(void *arg, void *res);
+void DTLS_on_libc_memalign(void *ptr, uptr size);
+DTLS *DTLS_Get();
+void DTLS_Destroy(); // Make sure to call this before the thread is destroyed.
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_TLS_GET_ADDR_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_win.cc b/libsanitizer/sanitizer_common/sanitizer_win.cc
index c48274e36426..6065838cefe3 100644
--- a/libsanitizer/sanitizer_common/sanitizer_win.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_win.cc
@@ -78,8 +78,9 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
void *MmapOrDie(uptr size, const char *mem_type) {
void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
if (rv == 0) {
- Report("ERROR: Failed to allocate 0x%zx (%zd) bytes of %s\n",
- size, size, mem_type);
+ Report("ERROR: %s failed to "
+ "allocate 0x%zx (%zd) bytes of %s (error code: %d)\n",
+ SanitizerToolName, size, size, mem_type, GetLastError());
CHECK("unable to mmap" && 0);
}
return rv;
@@ -87,8 +88,9 @@ void *MmapOrDie(uptr size, const char *mem_type) {
void UnmapOrDie(void *addr, uptr size) {
if (VirtualFree(addr, size, MEM_DECOMMIT) == 0) {
- Report("ERROR: Failed to deallocate 0x%zx (%zd) bytes at address %p\n",
- size, size, addr);
+ Report("ERROR: %s failed to "
+ "deallocate 0x%zx (%zd) bytes at address %p (error code: %d)\n",
+ SanitizerToolName, size, size, addr, GetLastError());
CHECK("unable to unmap" && 0);
}
}
@@ -99,8 +101,9 @@ void *MmapFixedNoReserve(uptr fixed_addr, uptr size) {
void *p = VirtualAlloc((LPVOID)fixed_addr, size,
MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
if (p == 0)
- Report("ERROR: Failed to allocate 0x%zx (%zd) bytes at %p (%d)\n",
- size, size, fixed_addr, GetLastError());
+ Report("ERROR: %s failed to "
+ "allocate %p (%zd) bytes at %p (error code: %d)\n",
+ SanitizerToolName, size, size, fixed_addr, GetLastError());
return p;
}
@@ -108,6 +111,11 @@ void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
return MmapFixedNoReserve(fixed_addr, size);
}
+void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
+ // FIXME: make this really NoReserve?
+ return MmapOrDie(size, mem_type);
+}
+
void *Mprotect(uptr fixed_addr, uptr size) {
return VirtualAlloc((LPVOID)fixed_addr, size,
MEM_RESERVE | MEM_COMMIT, PAGE_NOACCESS);
@@ -175,14 +183,15 @@ void DumpProcessMap() {
}
void DisableCoreDumper() {
- UNIMPLEMENTED();
+ // Do nothing.
}
void ReExec() {
UNIMPLEMENTED();
}
-void PrepareForSandboxing() {
+void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
+ (void)args;
// Nothing here for now.
}
@@ -264,13 +273,48 @@ uptr internal_read(fd_t fd, void *buf, uptr count) {
uptr internal_write(fd_t fd, const void *buf, uptr count) {
if (fd != kStderrFd)
UNIMPLEMENTED();
- HANDLE err = GetStdHandle(STD_ERROR_HANDLE);
- if (err == 0)
- return 0; // FIXME: this might not work on some apps.
- DWORD ret;
- if (!WriteFile(err, buf, count, &ret, 0))
+
+ static HANDLE output_stream = 0;
+ // Abort immediately if we know printing is not possible.
+ if (output_stream == INVALID_HANDLE_VALUE)
return 0;
- return ret;
+
+ // If called for the first time, try to use stderr to output stuff,
+ // falling back to stdout if anything goes wrong.
+ bool fallback_to_stdout = false;
+ if (output_stream == 0) {
+ output_stream = GetStdHandle(STD_ERROR_HANDLE);
+ // We don't distinguish "no such handle" from error.
+ if (output_stream == 0)
+ output_stream = INVALID_HANDLE_VALUE;
+
+ if (output_stream == INVALID_HANDLE_VALUE) {
+ // Retry with stdout?
+ output_stream = GetStdHandle(STD_OUTPUT_HANDLE);
+ if (output_stream == 0)
+ output_stream = INVALID_HANDLE_VALUE;
+ if (output_stream == INVALID_HANDLE_VALUE)
+ return 0;
+ } else {
+ // Successfully got an stderr handle. However, if WriteFile() fails,
+ // we can still try to fallback to stdout.
+ fallback_to_stdout = true;
+ }
+ }
+
+ DWORD ret;
+ if (WriteFile(output_stream, buf, count, &ret, 0))
+ return ret;
+
+ // Re-try with stdout if using a valid stderr handle fails.
+ if (fallback_to_stdout) {
+ output_stream = GetStdHandle(STD_OUTPUT_HANDLE);
+ if (output_stream == 0)
+ output_stream = INVALID_HANDLE_VALUE;
+ if (output_stream != INVALID_HANDLE_VALUE)
+ return internal_write(fd, buf, count);
+ }
+ return 0;
}
uptr internal_stat(const char *path, void *buf) {
@@ -375,16 +419,25 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
}
void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
+ CHECK_GE(max_depth, 2);
// FIXME: CaptureStackBackTrace might be too slow for us.
// FIXME: Compare with StackWalk64.
// FIXME: Look at LLVMUnhandledExceptionFilter in Signals.inc
size = CaptureStackBackTrace(2, Min(max_depth, kStackTraceMax),
(void**)trace, 0);
+ if (size == 0)
+ return;
+
// Skip the RTL frames by searching for the PC in the stacktrace.
uptr pc_location = LocatePcInTrace(pc);
PopStackFrames(pc_location);
}
+void StackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
+ uptr max_depth) {
+ UNREACHABLE("no signal context on windows");
+}
+
void MaybeOpenReportFile() {
// Windows doesn't have native fork, and we don't support Cygwin or other
// environments that try to fake it, so the initial report_fd will always be
@@ -392,8 +445,6 @@ void MaybeOpenReportFile() {
}
void RawWrite(const char *buffer) {
- static const char *kRawWriteError =
- "RawWrite can't output requested buffer!\n";
uptr length = (uptr)internal_strlen(buffer);
if (length != internal_write(report_fd, buffer, length)) {
// stderr may be closed, but we may be able to print to the debugger
@@ -403,6 +454,24 @@ void RawWrite(const char *buffer) {
}
}
+void SetAlternateSignalStack() {
+ // FIXME: Decide what to do on Windows.
+}
+
+void UnsetAlternateSignalStack() {
+ // FIXME: Decide what to do on Windows.
+}
+
+void InstallDeadlySignalHandlers(SignalHandlerType handler) {
+ (void)handler;
+ // FIXME: Decide what to do on Windows.
+}
+
+bool IsDeadlySignal(int signum) {
+ // FIXME: Decide what to do on Windows.
+ return false;
+}
+
} // namespace __sanitizer
#endif // _WIN32
diff --git a/libsanitizer/tsan/Makefile.am b/libsanitizer/tsan/Makefile.am
index 39ed2529c2c7..5d98e215e057 100644
--- a/libsanitizer/tsan/Makefile.am
+++ b/libsanitizer/tsan/Makefile.am
@@ -11,32 +11,32 @@ ACLOCAL_AMFLAGS = -I m4
toolexeclib_LTLIBRARIES = libtsan.la
tsan_files = \
- tsan_clock.cc \
- tsan_interface_atomic.cc \
- tsan_mutex.cc \
- tsan_report.cc \
- tsan_rtl_thread.cc \
- tsan_symbolize.cc \
- tsan_flags.cc \
- tsan_interface.cc \
- tsan_platform_linux.cc \
- tsan_rtl.cc \
- tsan_stat.cc \
- tsan_sync.cc \
- tsan_ignoreset.cc \
- tsan_interceptors.cc \
- tsan_md5.cc \
- tsan_platform_mac.cc \
- tsan_rtl_mutex.cc \
- tsan_suppressions.cc \
- tsan_interface_ann.cc \
- tsan_mman.cc \
- tsan_rtl_report.cc \
+ tsan_clock.cc \
tsan_fd.cc \
- tsan_interface_java.cc \
- tsan_mutexset.cc \
- tsan_symbolize_addr2line_linux.cc \
- tsan_rtl_amd64.S
+ tsan_flags.cc \
+ tsan_ignoreset.cc \
+ tsan_interceptors.cc \
+ tsan_interface_ann.cc \
+ tsan_interface_atomic.cc \
+ tsan_interface.cc \
+ tsan_interface_java.cc \
+ tsan_md5.cc \
+ tsan_mman.cc \
+ tsan_mutex.cc \
+ tsan_mutexset.cc \
+ tsan_platform_linux.cc \
+ tsan_platform_mac.cc \
+ tsan_platform_windows.cc \
+ tsan_report.cc \
+ tsan_rtl.cc \
+ tsan_rtl_mutex.cc \
+ tsan_rtl_report.cc \
+ tsan_rtl_thread.cc \
+ tsan_stat.cc \
+ tsan_suppressions.cc \
+ tsan_symbolize.cc \
+ tsan_sync.cc \
+ tsan_rtl_amd64.S
libtsan_la_SOURCES = $(tsan_files)
libtsan_la_LIBADD = $(top_builddir)/sanitizer_common/libsanitizer_common.la $(top_builddir)/interception/libinterception.la
diff --git a/libsanitizer/tsan/Makefile.in b/libsanitizer/tsan/Makefile.in
index 01c27b9b7d46..068aaa87fe82 100644
--- a/libsanitizer/tsan/Makefile.in
+++ b/libsanitizer/tsan/Makefile.in
@@ -84,15 +84,15 @@ libtsan_la_DEPENDENCIES = \
$(top_builddir)/sanitizer_common/libsanitizer_common.la \
$(top_builddir)/interception/libinterception.la \
$(am__append_1) $(am__DEPENDENCIES_1)
-am__objects_1 = tsan_clock.lo tsan_interface_atomic.lo tsan_mutex.lo \
- tsan_report.lo tsan_rtl_thread.lo tsan_symbolize.lo \
- tsan_flags.lo tsan_interface.lo tsan_platform_linux.lo \
- tsan_rtl.lo tsan_stat.lo tsan_sync.lo tsan_ignoreset.lo \
- tsan_interceptors.lo tsan_md5.lo tsan_platform_mac.lo \
- tsan_rtl_mutex.lo tsan_suppressions.lo tsan_interface_ann.lo \
- tsan_mman.lo tsan_rtl_report.lo tsan_fd.lo \
- tsan_interface_java.lo tsan_mutexset.lo \
- tsan_symbolize_addr2line_linux.lo tsan_rtl_amd64.lo
+am__objects_1 = tsan_clock.lo tsan_fd.lo tsan_flags.lo \
+ tsan_ignoreset.lo tsan_interceptors.lo tsan_interface_ann.lo \
+ tsan_interface_atomic.lo tsan_interface.lo \
+ tsan_interface_java.lo tsan_md5.lo tsan_mman.lo tsan_mutex.lo \
+ tsan_mutexset.lo tsan_platform_linux.lo tsan_platform_mac.lo \
+ tsan_platform_windows.lo tsan_report.lo tsan_rtl.lo \
+ tsan_rtl_mutex.lo tsan_rtl_report.lo tsan_rtl_thread.lo \
+ tsan_stat.lo tsan_suppressions.lo tsan_symbolize.lo \
+ tsan_sync.lo tsan_rtl_amd64.lo
am_libtsan_la_OBJECTS = $(am__objects_1)
libtsan_la_OBJECTS = $(am_libtsan_la_OBJECTS)
libtsan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
@@ -280,32 +280,32 @@ AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \
ACLOCAL_AMFLAGS = -I m4
toolexeclib_LTLIBRARIES = libtsan.la
tsan_files = \
- tsan_clock.cc \
- tsan_interface_atomic.cc \
- tsan_mutex.cc \
- tsan_report.cc \
- tsan_rtl_thread.cc \
- tsan_symbolize.cc \
- tsan_flags.cc \
- tsan_interface.cc \
- tsan_platform_linux.cc \
- tsan_rtl.cc \
- tsan_stat.cc \
- tsan_sync.cc \
- tsan_ignoreset.cc \
- tsan_interceptors.cc \
- tsan_md5.cc \
- tsan_platform_mac.cc \
- tsan_rtl_mutex.cc \
- tsan_suppressions.cc \
- tsan_interface_ann.cc \
- tsan_mman.cc \
- tsan_rtl_report.cc \
+ tsan_clock.cc \
tsan_fd.cc \
- tsan_interface_java.cc \
- tsan_mutexset.cc \
- tsan_symbolize_addr2line_linux.cc \
- tsan_rtl_amd64.S
+ tsan_flags.cc \
+ tsan_ignoreset.cc \
+ tsan_interceptors.cc \
+ tsan_interface_ann.cc \
+ tsan_interface_atomic.cc \
+ tsan_interface.cc \
+ tsan_interface_java.cc \
+ tsan_md5.cc \
+ tsan_mman.cc \
+ tsan_mutex.cc \
+ tsan_mutexset.cc \
+ tsan_platform_linux.cc \
+ tsan_platform_mac.cc \
+ tsan_platform_windows.cc \
+ tsan_report.cc \
+ tsan_rtl.cc \
+ tsan_rtl_mutex.cc \
+ tsan_rtl_report.cc \
+ tsan_rtl_thread.cc \
+ tsan_stat.cc \
+ tsan_suppressions.cc \
+ tsan_symbolize.cc \
+ tsan_sync.cc \
+ tsan_rtl_amd64.S
libtsan_la_SOURCES = $(tsan_files)
libtsan_la_LIBADD = \
@@ -442,6 +442,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_mutexset.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_platform_linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_platform_mac.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_platform_windows.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_report.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_amd64.Plo@am__quote@
@@ -451,7 +452,6 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_stat.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_suppressions.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_symbolize.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_symbolize_addr2line_linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_sync.Plo@am__quote@
.S.o:
diff --git a/libsanitizer/tsan/tsan_clock.cc b/libsanitizer/tsan/tsan_clock.cc
index 5d45a5d15fb4..b944cc50d54f 100644
--- a/libsanitizer/tsan/tsan_clock.cc
+++ b/libsanitizer/tsan/tsan_clock.cc
@@ -11,99 +11,338 @@
#include "tsan_clock.h"
#include "tsan_rtl.h"
-// It's possible to optimize clock operations for some important cases
-// so that they are O(1). The cases include singletons, once's, local mutexes.
-// First, SyncClock must be re-implemented to allow indexing by tid.
-// It must not necessarily be a full vector clock, though. For example it may
-// be a multi-level table.
-// Then, each slot in SyncClock must contain a dirty bit (it's united with
-// the clock value, so no space increase). The acquire algorithm looks
-// as follows:
-// void acquire(thr, tid, thr_clock, sync_clock) {
-// if (!sync_clock[tid].dirty)
-// return; // No new info to acquire.
-// // This handles constant reads of singleton pointers and
-// // stop-flags.
-// acquire_impl(thr_clock, sync_clock); // As usual, O(N).
-// sync_clock[tid].dirty = false;
-// sync_clock.dirty_count--;
-// }
-// The release operation looks as follows:
-// void release(thr, tid, thr_clock, sync_clock) {
-// // thr->sync_cache is a simple fixed-size hash-based cache that holds
-// // several previous sync_clock's.
-// if (thr->sync_cache[sync_clock] >= thr->last_acquire_epoch) {
-// // The thread did no acquire operations since last release on this clock.
-// // So update only the thread's slot (other slots can't possibly change).
-// sync_clock[tid].clock = thr->epoch;
-// if (sync_clock.dirty_count == sync_clock.cnt
-// || (sync_clock.dirty_count == sync_clock.cnt - 1
-// && sync_clock[tid].dirty == false))
-// // All dirty flags are set, bail out.
-// return;
-// set all dirty bits, but preserve the thread's bit. // O(N)
-// update sync_clock.dirty_count;
-// return;
+// SyncClock and ThreadClock implement vector clocks for sync variables
+// (mutexes, atomic variables, file descriptors, etc) and threads, respectively.
+// ThreadClock contains fixed-size vector clock for maximum number of threads.
+// SyncClock contains growable vector clock for currently necessary number of
+// threads.
+// Together they implement very simple model of operations, namely:
+//
+// void ThreadClock::acquire(const SyncClock *src) {
+// for (int i = 0; i < kMaxThreads; i++)
+// clock[i] = max(clock[i], src->clock[i]);
+// }
+//
+// void ThreadClock::release(SyncClock *dst) const {
+// for (int i = 0; i < kMaxThreads; i++)
+// dst->clock[i] = max(dst->clock[i], clock[i]);
+// }
+//
+// void ThreadClock::ReleaseStore(SyncClock *dst) const {
+// for (int i = 0; i < kMaxThreads; i++)
+// dst->clock[i] = clock[i];
+// }
+//
+// void ThreadClock::acq_rel(SyncClock *dst) {
+// acquire(dst);
+// release(dst);
// }
-// release_impl(thr_clock, sync_clock); // As usual, O(N).
-// set all dirty bits, but preserve the thread's bit.
-// // The previous step is combined with release_impl(), so that
-// // we scan the arrays only once.
-// update sync_clock.dirty_count;
-// }
+//
+// Conformance to this model is extensively verified in tsan_clock_test.cc.
+// However, the implementation is significantly more complex. The complexity
+// allows to implement important classes of use cases in O(1) instead of O(N).
+//
+// The use cases are:
+// 1. Singleton/once atomic that has a single release-store operation followed
+// by zillions of acquire-loads (the acquire-load is O(1)).
+// 2. Thread-local mutex (both lock and unlock can be O(1)).
+// 3. Leaf mutex (unlock is O(1)).
+// 4. A mutex shared by 2 threads (both lock and unlock can be O(1)).
+// 5. An atomic with a single writer (writes can be O(1)).
+// The implementation dynamically adopts to workload. So if an atomic is in
+// read-only phase, these reads will be O(1); if it later switches to read/write
+// phase, the implementation will correctly handle that by switching to O(N).
+//
+// Thread-safety note: all const operations on SyncClock's are conducted under
+// a shared lock; all non-const operations on SyncClock's are conducted under
+// an exclusive lock; ThreadClock's are private to respective threads and so
+// do not need any protection.
+//
+// Description of ThreadClock state:
+// clk_ - fixed size vector clock.
+// nclk_ - effective size of the vector clock (the rest is zeros).
+// tid_ - index of the thread associated with he clock ("current thread").
+// last_acquire_ - current thread time when it acquired something from
+// other threads.
+//
+// Description of SyncClock state:
+// clk_ - variable size vector clock, low kClkBits hold timestamp,
+// the remaining bits hold "acquired" flag (the actual value is thread's
+// reused counter);
+// if acquried == thr->reused_, then the respective thread has already
+// acquired this clock (except possibly dirty_tids_).
+// dirty_tids_ - holds up to two indeces in the vector clock that other threads
+// need to acquire regardless of "acquired" flag value;
+// release_store_tid_ - denotes that the clock state is a result of
+// release-store operation by the thread with release_store_tid_ index.
+// release_store_reused_ - reuse count of release_store_tid_.
+
+// We don't have ThreadState in these methods, so this is an ugly hack that
+// works only in C++.
+#ifndef TSAN_GO
+# define CPP_STAT_INC(typ) StatInc(cur_thread(), typ)
+#else
+# define CPP_STAT_INC(typ) (void)0
+#endif
namespace __tsan {
-ThreadClock::ThreadClock() {
- nclk_ = 0;
- for (uptr i = 0; i < (uptr)kMaxTidInClock; i++)
- clk_[i] = 0;
+const unsigned kInvalidTid = (unsigned)-1;
+
+ThreadClock::ThreadClock(unsigned tid, unsigned reused)
+ : tid_(tid)
+ , reused_(reused + 1) { // 0 has special meaning
+ CHECK_LT(tid, kMaxTidInClock);
+ CHECK_EQ(reused_, ((u64)reused_ << kClkBits) >> kClkBits);
+ nclk_ = tid_ + 1;
+ last_acquire_ = 0;
+ internal_memset(clk_, 0, sizeof(clk_));
+ clk_[tid_].reused = reused_;
}
void ThreadClock::acquire(const SyncClock *src) {
DCHECK(nclk_ <= kMaxTid);
DCHECK(src->clk_.Size() <= kMaxTid);
+ CPP_STAT_INC(StatClockAcquire);
+ // Check if it's empty -> no need to do anything.
const uptr nclk = src->clk_.Size();
- if (nclk == 0)
+ if (nclk == 0) {
+ CPP_STAT_INC(StatClockAcquireEmpty);
return;
+ }
+
+ // Check if we've already acquired src after the last release operation on src
+ bool acquired = false;
+ if (nclk > tid_) {
+ CPP_STAT_INC(StatClockAcquireLarge);
+ if (src->clk_[tid_].reused == reused_) {
+ CPP_STAT_INC(StatClockAcquireRepeat);
+ for (unsigned i = 0; i < kDirtyTids; i++) {
+ unsigned tid = src->dirty_tids_[i];
+ if (tid != kInvalidTid) {
+ u64 epoch = src->clk_[tid].epoch;
+ if (clk_[tid].epoch < epoch) {
+ clk_[tid].epoch = epoch;
+ acquired = true;
+ }
+ }
+ }
+ if (acquired) {
+ CPP_STAT_INC(StatClockAcquiredSomething);
+ last_acquire_ = clk_[tid_].epoch;
+ }
+ return;
+ }
+ }
+
+ // O(N) acquire.
+ CPP_STAT_INC(StatClockAcquireFull);
nclk_ = max(nclk_, nclk);
for (uptr i = 0; i < nclk; i++) {
- if (clk_[i] < src->clk_[i])
- clk_[i] = src->clk_[i];
+ u64 epoch = src->clk_[i].epoch;
+ if (clk_[i].epoch < epoch) {
+ clk_[i].epoch = epoch;
+ acquired = true;
+ }
+ }
+
+ // Remember that this thread has acquired this clock.
+ if (nclk > tid_)
+ src->clk_[tid_].reused = reused_;
+
+ if (acquired) {
+ CPP_STAT_INC(StatClockAcquiredSomething);
+ last_acquire_ = clk_[tid_].epoch;
}
}
void ThreadClock::release(SyncClock *dst) const {
- DCHECK(nclk_ <= kMaxTid);
- DCHECK(dst->clk_.Size() <= kMaxTid);
+ DCHECK_LE(nclk_, kMaxTid);
+ DCHECK_LE(dst->clk_.Size(), kMaxTid);
- if (dst->clk_.Size() < nclk_)
+ if (dst->clk_.Size() == 0) {
+ // ReleaseStore will correctly set release_store_tid_,
+ // which can be important for future operations.
+ ReleaseStore(dst);
+ return;
+ }
+
+ CPP_STAT_INC(StatClockRelease);
+ // Check if we need to resize dst.
+ if (dst->clk_.Size() < nclk_) {
+ CPP_STAT_INC(StatClockReleaseResize);
dst->clk_.Resize(nclk_);
+ }
+
+ // Check if we had not acquired anything from other threads
+ // since the last release on dst. If so, we need to update
+ // only dst->clk_[tid_].
+ if (dst->clk_[tid_].epoch > last_acquire_) {
+ UpdateCurrentThread(dst);
+ if (dst->release_store_tid_ != tid_ ||
+ dst->release_store_reused_ != reused_)
+ dst->release_store_tid_ = kInvalidTid;
+ return;
+ }
+
+ // O(N) release.
+ CPP_STAT_INC(StatClockReleaseFull);
+ // First, remember whether we've acquired dst.
+ bool acquired = IsAlreadyAcquired(dst);
+ if (acquired)
+ CPP_STAT_INC(StatClockReleaseAcquired);
+ // Update dst->clk_.
for (uptr i = 0; i < nclk_; i++) {
- if (dst->clk_[i] < clk_[i])
- dst->clk_[i] = clk_[i];
+ dst->clk_[i].epoch = max(dst->clk_[i].epoch, clk_[i].epoch);
+ dst->clk_[i].reused = 0;
}
+ // Clear 'acquired' flag in the remaining elements.
+ if (nclk_ < dst->clk_.Size())
+ CPP_STAT_INC(StatClockReleaseClearTail);
+ for (uptr i = nclk_; i < dst->clk_.Size(); i++)
+ dst->clk_[i].reused = 0;
+ for (unsigned i = 0; i < kDirtyTids; i++)
+ dst->dirty_tids_[i] = kInvalidTid;
+ dst->release_store_tid_ = kInvalidTid;
+ dst->release_store_reused_ = 0;
+ // If we've acquired dst, remember this fact,
+ // so that we don't need to acquire it on next acquire.
+ if (acquired)
+ dst->clk_[tid_].reused = reused_;
}
void ThreadClock::ReleaseStore(SyncClock *dst) const {
DCHECK(nclk_ <= kMaxTid);
DCHECK(dst->clk_.Size() <= kMaxTid);
+ CPP_STAT_INC(StatClockStore);
- if (dst->clk_.Size() < nclk_)
+ // Check if we need to resize dst.
+ if (dst->clk_.Size() < nclk_) {
+ CPP_STAT_INC(StatClockStoreResize);
dst->clk_.Resize(nclk_);
- for (uptr i = 0; i < nclk_; i++)
- dst->clk_[i] = clk_[i];
- for (uptr i = nclk_; i < dst->clk_.Size(); i++)
- dst->clk_[i] = 0;
+ }
+
+ if (dst->release_store_tid_ == tid_ &&
+ dst->release_store_reused_ == reused_ &&
+ dst->clk_[tid_].epoch > last_acquire_) {
+ CPP_STAT_INC(StatClockStoreFast);
+ UpdateCurrentThread(dst);
+ return;
+ }
+
+ // O(N) release-store.
+ CPP_STAT_INC(StatClockStoreFull);
+ for (uptr i = 0; i < nclk_; i++) {
+ dst->clk_[i].epoch = clk_[i].epoch;
+ dst->clk_[i].reused = 0;
+ }
+ // Clear the tail of dst->clk_.
+ if (nclk_ < dst->clk_.Size()) {
+ internal_memset(&dst->clk_[nclk_], 0,
+ (dst->clk_.Size() - nclk_) * sizeof(dst->clk_[0]));
+ CPP_STAT_INC(StatClockStoreTail);
+ }
+ for (unsigned i = 0; i < kDirtyTids; i++)
+ dst->dirty_tids_[i] = kInvalidTid;
+ dst->release_store_tid_ = tid_;
+ dst->release_store_reused_ = reused_;
+ // Rememeber that we don't need to acquire it in future.
+ dst->clk_[tid_].reused = reused_;
}
void ThreadClock::acq_rel(SyncClock *dst) {
+ CPP_STAT_INC(StatClockAcquireRelease);
acquire(dst);
- release(dst);
+ ReleaseStore(dst);
+}
+
+// Updates only single element related to the current thread in dst->clk_.
+void ThreadClock::UpdateCurrentThread(SyncClock *dst) const {
+ // Update the threads time, but preserve 'acquired' flag.
+ dst->clk_[tid_].epoch = clk_[tid_].epoch;
+
+ for (unsigned i = 0; i < kDirtyTids; i++) {
+ if (dst->dirty_tids_[i] == tid_) {
+ CPP_STAT_INC(StatClockReleaseFast1);
+ return;
+ }
+ if (dst->dirty_tids_[i] == kInvalidTid) {
+ CPP_STAT_INC(StatClockReleaseFast2);
+ dst->dirty_tids_[i] = tid_;
+ return;
+ }
+ }
+ // Reset all 'acquired' flags, O(N).
+ CPP_STAT_INC(StatClockReleaseSlow);
+ for (uptr i = 0; i < dst->clk_.Size(); i++) {
+ dst->clk_[i].reused = 0;
+ }
+ for (unsigned i = 0; i < kDirtyTids; i++)
+ dst->dirty_tids_[i] = kInvalidTid;
+}
+
+// Checks whether the current threads has already acquired src.
+bool ThreadClock::IsAlreadyAcquired(const SyncClock *src) const {
+ if (src->clk_[tid_].reused != reused_)
+ return false;
+ for (unsigned i = 0; i < kDirtyTids; i++) {
+ unsigned tid = src->dirty_tids_[i];
+ if (tid != kInvalidTid) {
+ if (clk_[tid].epoch < src->clk_[tid].epoch)
+ return false;
+ }
+ }
+ return true;
+}
+
+// Sets a single element in the vector clock.
+// This function is called only from weird places like AcquireGlobal.
+void ThreadClock::set(unsigned tid, u64 v) {
+ DCHECK_LT(tid, kMaxTid);
+ DCHECK_GE(v, clk_[tid].epoch);
+ clk_[tid].epoch = v;
+ if (nclk_ <= tid)
+ nclk_ = tid + 1;
+ last_acquire_ = clk_[tid_].epoch;
+}
+
+void ThreadClock::DebugDump(int(*printf)(const char *s, ...)) {
+ printf("clock=[");
+ for (uptr i = 0; i < nclk_; i++)
+ printf("%s%llu", i == 0 ? "" : ",", clk_[i].epoch);
+ printf("] reused=[");
+ for (uptr i = 0; i < nclk_; i++)
+ printf("%s%llu", i == 0 ? "" : ",", clk_[i].reused);
+ printf("] tid=%u/%u last_acq=%llu",
+ tid_, reused_, last_acquire_);
}
SyncClock::SyncClock()
- : clk_(MBlockClock) {
+ : clk_(MBlockClock) {
+ release_store_tid_ = kInvalidTid;
+ release_store_reused_ = 0;
+ for (uptr i = 0; i < kDirtyTids; i++)
+ dirty_tids_[i] = kInvalidTid;
+}
+
+void SyncClock::Reset() {
+ clk_.Reset();
+ release_store_tid_ = kInvalidTid;
+ release_store_reused_ = 0;
+ for (uptr i = 0; i < kDirtyTids; i++)
+ dirty_tids_[i] = kInvalidTid;
+}
+
+void SyncClock::DebugDump(int(*printf)(const char *s, ...)) {
+ printf("clock=[");
+ for (uptr i = 0; i < clk_.Size(); i++)
+ printf("%s%llu", i == 0 ? "" : ",", clk_[i].epoch);
+ printf("] reused=[");
+ for (uptr i = 0; i < clk_.Size(); i++)
+ printf("%s%llu", i == 0 ? "" : ",", clk_[i].reused);
+ printf("] release_store_tid=%d/%d dirty_tids=%d/%d",
+ release_store_tid_, release_store_reused_,
+ dirty_tids_[0], dirty_tids_[1]);
}
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_clock.h b/libsanitizer/tsan/tsan_clock.h
index 8e4bf99ca896..2ce480b2d163 100644
--- a/libsanitizer/tsan/tsan_clock.h
+++ b/libsanitizer/tsan/tsan_clock.h
@@ -16,6 +16,11 @@
namespace __tsan {
+struct ClockElem {
+ u64 epoch : kClkBits;
+ u64 reused : 64 - kClkBits;
+};
+
// The clock that lives in sync variables (mutexes, atomics, etc).
class SyncClock {
public:
@@ -25,38 +30,43 @@ class SyncClock {
return clk_.Size();
}
- void Reset() {
- clk_.Reset();
+ u64 get(unsigned tid) const {
+ DCHECK_LT(tid, clk_.Size());
+ return clk_[tid].epoch;
}
+ void Reset();
+
+ void DebugDump(int(*printf)(const char *s, ...));
+
private:
- Vector<u64> clk_;
+ unsigned release_store_tid_;
+ unsigned release_store_reused_;
+ static const uptr kDirtyTids = 2;
+ unsigned dirty_tids_[kDirtyTids];
+ mutable Vector<ClockElem> clk_;
friend struct ThreadClock;
};
// The clock that lives in threads.
struct ThreadClock {
public:
- ThreadClock();
+ explicit ThreadClock(unsigned tid, unsigned reused = 0);
u64 get(unsigned tid) const {
DCHECK_LT(tid, kMaxTidInClock);
- return clk_[tid];
+ return clk_[tid].epoch;
}
- void set(unsigned tid, u64 v) {
- DCHECK_LT(tid, kMaxTid);
- DCHECK_GE(v, clk_[tid]);
- clk_[tid] = v;
- if (nclk_ <= tid)
- nclk_ = tid + 1;
+ void set(unsigned tid, u64 v);
+
+ void set(u64 v) {
+ DCHECK_GE(v, clk_[tid_].epoch);
+ clk_[tid_].epoch = v;
}
- void tick(unsigned tid) {
- DCHECK_LT(tid, kMaxTid);
- clk_[tid]++;
- if (nclk_ <= tid)
- nclk_ = tid + 1;
+ void tick() {
+ clk_[tid_].epoch++;
}
uptr size() const {
@@ -68,9 +78,19 @@ struct ThreadClock {
void acq_rel(SyncClock *dst);
void ReleaseStore(SyncClock *dst) const;
+ void DebugReset();
+ void DebugDump(int(*printf)(const char *s, ...));
+
private:
+ static const uptr kDirtyTids = SyncClock::kDirtyTids;
+ const unsigned tid_;
+ const unsigned reused_;
+ u64 last_acquire_;
uptr nclk_;
- u64 clk_[kMaxTidInClock];
+ ClockElem clk_[kMaxTidInClock];
+
+ bool IsAlreadyAcquired(const SyncClock *src) const;
+ void UpdateCurrentThread(SyncClock *dst) const;
};
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_defs.h b/libsanitizer/tsan/tsan_defs.h
index 3f20797ff4bc..cc65ae8b4a30 100644
--- a/libsanitizer/tsan/tsan_defs.h
+++ b/libsanitizer/tsan/tsan_defs.h
@@ -39,6 +39,7 @@ const int kTidBits = 13;
const unsigned kMaxTid = 1 << kTidBits;
const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit.
const int kClkBits = 42;
+const unsigned kMaxTidReuse = (1 << (64 - kClkBits)) - 1;
const uptr kShadowStackSize = 64 * 1024;
const uptr kTraceStackSize = 256;
@@ -63,6 +64,12 @@ const uptr kShadowSize = 8;
// Shadow memory is kShadowMultiplier times larger than user memory.
const uptr kShadowMultiplier = kShadowSize * kShadowCnt / kShadowCell;
+#if defined(TSAN_NO_HISTORY) && TSAN_NO_HISTORY
+const bool kCollectHistory = false;
+#else
+const bool kCollectHistory = true;
+#endif
+
#if defined(TSAN_COLLECT_STATS) && TSAN_COLLECT_STATS
const bool kCollectStats = true;
#else
diff --git a/libsanitizer/tsan/tsan_fd.cc b/libsanitizer/tsan/tsan_fd.cc
index b7ac3111c89b..7d62ae4c5ff3 100644
--- a/libsanitizer/tsan/tsan_fd.cc
+++ b/libsanitizer/tsan/tsan_fd.cc
@@ -63,7 +63,7 @@ static void unref(ThreadState *thr, uptr pc, FdSync *s) {
CHECK_NE(s, &fdctx.globsync);
CHECK_NE(s, &fdctx.filesync);
CHECK_NE(s, &fdctx.socksync);
- SyncVar *v = CTX()->synctab.GetAndRemove(thr, pc, (uptr)s);
+ SyncVar *v = ctx->synctab.GetAndRemove(thr, pc, (uptr)s);
if (v)
DestroyAndFree(v);
internal_free(s);
@@ -283,13 +283,13 @@ void FdSocketConnect(ThreadState *thr, uptr pc, int fd) {
init(thr, pc, fd, &fdctx.socksync);
}
-uptr File2addr(char *path) {
+uptr File2addr(const char *path) {
(void)path;
static u64 addr;
return (uptr)&addr;
}
-uptr Dir2addr(char *path) {
+uptr Dir2addr(const char *path) {
(void)path;
static u64 addr;
return (uptr)&addr;
diff --git a/libsanitizer/tsan/tsan_fd.h b/libsanitizer/tsan/tsan_fd.h
index 3306873223e7..57ae62cfe8ba 100644
--- a/libsanitizer/tsan/tsan_fd.h
+++ b/libsanitizer/tsan/tsan_fd.h
@@ -55,8 +55,8 @@ void FdSocketConnect(ThreadState *thr, uptr pc, int fd);
bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack);
void FdOnFork(ThreadState *thr, uptr pc);
-uptr File2addr(char *path);
-uptr Dir2addr(char *path);
+uptr File2addr(const char *path);
+uptr Dir2addr(const char *path);
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_flags.cc b/libsanitizer/tsan/tsan_flags.cc
index 158e24f82412..e241cb6cda47 100644
--- a/libsanitizer/tsan/tsan_flags.cc
+++ b/libsanitizer/tsan/tsan_flags.cc
@@ -18,17 +18,13 @@
namespace __tsan {
Flags *flags() {
- return &CTX()->flags;
+ return &ctx->flags;
}
// Can be overriden in frontend.
#ifdef TSAN_EXTERNAL_HOOKS
-void OverrideFlags(Flags *f);
extern "C" const char* __tsan_default_options();
#else
-void WEAK OverrideFlags(Flags *f) {
- (void)f;
-}
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
const char *WEAK __tsan_default_options() {
return "";
@@ -36,30 +32,35 @@ const char *WEAK __tsan_default_options() {
#endif
static void ParseFlags(Flags *f, const char *env) {
- ParseFlag(env, &f->enable_annotations, "enable_annotations");
- ParseFlag(env, &f->suppress_equal_stacks, "suppress_equal_stacks");
- ParseFlag(env, &f->suppress_equal_addresses, "suppress_equal_addresses");
- ParseFlag(env, &f->suppress_java, "suppress_java");
- ParseFlag(env, &f->report_bugs, "report_bugs");
- ParseFlag(env, &f->report_thread_leaks, "report_thread_leaks");
- ParseFlag(env, &f->report_destroy_locked, "report_destroy_locked");
- ParseFlag(env, &f->report_signal_unsafe, "report_signal_unsafe");
- ParseFlag(env, &f->report_atomic_races, "report_atomic_races");
- ParseFlag(env, &f->force_seq_cst_atomics, "force_seq_cst_atomics");
- ParseFlag(env, &f->suppressions, "suppressions");
- ParseFlag(env, &f->print_suppressions, "print_suppressions");
- ParseFlag(env, &f->print_benign, "print_benign");
- ParseFlag(env, &f->exitcode, "exitcode");
- ParseFlag(env, &f->halt_on_error, "halt_on_error");
- ParseFlag(env, &f->atexit_sleep_ms, "atexit_sleep_ms");
- ParseFlag(env, &f->profile_memory, "profile_memory");
- ParseFlag(env, &f->flush_memory_ms, "flush_memory_ms");
- ParseFlag(env, &f->flush_symbolizer_ms, "flush_symbolizer_ms");
- ParseFlag(env, &f->memory_limit_mb, "memory_limit_mb");
- ParseFlag(env, &f->stop_on_start, "stop_on_start");
- ParseFlag(env, &f->running_on_valgrind, "running_on_valgrind");
- ParseFlag(env, &f->history_size, "history_size");
- ParseFlag(env, &f->io_sync, "io_sync");
+ ParseFlag(env, &f->enable_annotations, "enable_annotations", "");
+ ParseFlag(env, &f->suppress_equal_stacks, "suppress_equal_stacks", "");
+ ParseFlag(env, &f->suppress_equal_addresses, "suppress_equal_addresses", "");
+ ParseFlag(env, &f->suppress_java, "suppress_java", "");
+ ParseFlag(env, &f->report_bugs, "report_bugs", "");
+ ParseFlag(env, &f->report_thread_leaks, "report_thread_leaks", "");
+ ParseFlag(env, &f->report_destroy_locked, "report_destroy_locked", "");
+ ParseFlag(env, &f->report_mutex_bugs, "report_mutex_bugs", "");
+ ParseFlag(env, &f->report_signal_unsafe, "report_signal_unsafe", "");
+ ParseFlag(env, &f->report_atomic_races, "report_atomic_races", "");
+ ParseFlag(env, &f->force_seq_cst_atomics, "force_seq_cst_atomics", "");
+ ParseFlag(env, &f->suppressions, "suppressions", "");
+ ParseFlag(env, &f->print_suppressions, "print_suppressions", "");
+ ParseFlag(env, &f->print_benign, "print_benign", "");
+ ParseFlag(env, &f->exitcode, "exitcode", "");
+ ParseFlag(env, &f->halt_on_error, "halt_on_error", "");
+ ParseFlag(env, &f->atexit_sleep_ms, "atexit_sleep_ms", "");
+ ParseFlag(env, &f->profile_memory, "profile_memory", "");
+ ParseFlag(env, &f->flush_memory_ms, "flush_memory_ms", "");
+ ParseFlag(env, &f->flush_symbolizer_ms, "flush_symbolizer_ms", "");
+ ParseFlag(env, &f->memory_limit_mb, "memory_limit_mb", "");
+ ParseFlag(env, &f->stop_on_start, "stop_on_start", "");
+ ParseFlag(env, &f->running_on_valgrind, "running_on_valgrind", "");
+ ParseFlag(env, &f->history_size, "history_size", "");
+ ParseFlag(env, &f->io_sync, "io_sync", "");
+ ParseFlag(env, &f->die_after_fork, "die_after_fork", "");
+
+ // DDFlags
+ ParseFlag(env, &f->second_deadlock_stack, "second_deadlock_stack", "");
}
void InitializeFlags(Flags *f, const char *env) {
@@ -73,6 +74,7 @@ void InitializeFlags(Flags *f, const char *env) {
f->report_bugs = true;
f->report_thread_leaks = true;
f->report_destroy_locked = true;
+ f->report_mutex_bugs = true;
f->report_signal_unsafe = true;
f->report_atomic_races = true;
f->force_seq_cst_atomics = false;
@@ -90,11 +92,16 @@ void InitializeFlags(Flags *f, const char *env) {
f->running_on_valgrind = false;
f->history_size = kGoMode ? 1 : 2; // There are a lot of goroutines in Go.
f->io_sync = 1;
+ f->die_after_fork = true;
+
+ // DDFlags
+ f->second_deadlock_stack = false;
SetCommonFlagsDefaults(f);
+ // Override some common flags defaults.
+ f->allow_addr2line = true;
// Let a frontend override.
- OverrideFlags(f);
ParseFlags(f, __tsan_default_options());
ParseCommonFlagsFromString(f, __tsan_default_options());
// Override from command line.
@@ -111,6 +118,8 @@ void InitializeFlags(Flags *f, const char *env) {
f->report_signal_unsafe = false;
}
+ if (f->help) PrintFlagDescriptions();
+
if (f->history_size < 0 || f->history_size > 7) {
Printf("ThreadSanitizer: incorrect value for history_size"
" (must be [0..7])\n");
diff --git a/libsanitizer/tsan/tsan_flags.h b/libsanitizer/tsan/tsan_flags.h
index 05d11a451c69..4bf459d59817 100644
--- a/libsanitizer/tsan/tsan_flags.h
+++ b/libsanitizer/tsan/tsan_flags.h
@@ -12,23 +12,18 @@
#ifndef TSAN_FLAGS_H
#define TSAN_FLAGS_H
-// ----------- ATTENTION -------------
-// ThreadSanitizer user may provide its implementation of weak
-// symbol __tsan::OverrideFlags(__tsan::Flags). Therefore, this
-// header may be included in the user code, and shouldn't include
-// other headers from TSan or common sanitizer runtime.
-
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
namespace __tsan {
-struct Flags : CommonFlags {
+struct Flags : CommonFlags, DDFlags {
// Enable dynamic annotations, otherwise they are no-ops.
bool enable_annotations;
- // Supress a race report if we've already output another race report
+ // Suppress a race report if we've already output another race report
// with the same stack.
bool suppress_equal_stacks;
- // Supress a race report if we've already output another race report
+ // Suppress a race report if we've already output another race report
// on the same address.
bool suppress_equal_addresses;
// Suppress weird race reports that can be seen if JVM is embed
@@ -40,6 +35,8 @@ struct Flags : CommonFlags {
bool report_thread_leaks;
// Report destruction of a locked mutex?
bool report_destroy_locked;
+ // Report incorrect usages of mutexes and mutex annotations?
+ bool report_mutex_bugs;
// Report violations of async signal-safety
// (e.g. malloc() call from a signal handler).
bool report_signal_unsafe;
@@ -85,6 +82,8 @@ struct Flags : CommonFlags {
// 1 - reasonable level of synchronization (write->read)
// 2 - global synchronization of all IO operations
int io_sync;
+ // Die after multi-threaded fork if the child creates new threads.
+ bool die_after_fork;
};
Flags *flags();
diff --git a/libsanitizer/tsan/tsan_interceptors.cc b/libsanitizer/tsan/tsan_interceptors.cc
index 0574beb91df3..19a3b7b0643a 100644
--- a/libsanitizer/tsan/tsan_interceptors.cc
+++ b/libsanitizer/tsan/tsan_interceptors.cc
@@ -27,7 +27,7 @@
using namespace __tsan; // NOLINT
-const int kSigCount = 64;
+const int kSigCount = 65;
struct my_siginfo_t {
// The size is determined by looking at sizeof of real siginfo_t on linux.
@@ -51,6 +51,7 @@ extern "C" int pthread_sigmask(int how, const __sanitizer_sigset_t *set,
__sanitizer_sigset_t *oldset);
// REAL(sigfillset) defined in common interceptors.
DECLARE_REAL(int, sigfillset, __sanitizer_sigset_t *set)
+DECLARE_REAL(int, fflush, __sanitizer_FILE *fp)
extern "C" void *pthread_self();
extern "C" void _exit(int status);
extern "C" int *__errno_location();
@@ -60,6 +61,7 @@ extern "C" void *__libc_calloc(uptr size, uptr n);
extern "C" void *__libc_realloc(void *ptr, uptr size);
extern "C" void __libc_free(void *ptr);
extern "C" int mallopt(int param, int value);
+extern __sanitizer_FILE *stdout, *stderr;
const int PTHREAD_MUTEX_RECURSIVE = 1;
const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
const int EINVAL = 22;
@@ -71,6 +73,7 @@ const int SIGABRT = 6;
const int SIGFPE = 8;
const int SIGSEGV = 11;
const int SIGPIPE = 13;
+const int SIGTERM = 15;
const int SIGBUS = 7;
const int SIGSYS = 31;
void *const MAP_FAILED = (void*)-1;
@@ -142,7 +145,6 @@ void InitializeLibIgnore() {
static SignalContext *SigCtx(ThreadState *thr) {
SignalContext *ctx = (SignalContext*)thr->signal_ctx;
if (ctx == 0 && thr->is_alive) {
- ScopedInRtl in_rtl;
ctx = (SignalContext*)MmapOrDie(sizeof(*ctx), "SignalContext");
MemoryResetRange(thr, (uptr)&SigCtx, (uptr)ctx, sizeof(*ctx));
thr->signal_ctx = ctx;
@@ -159,7 +161,6 @@ class ScopedInterceptor {
private:
ThreadState *const thr_;
const uptr pc_;
- const int in_rtl_;
bool in_ignored_lib_;
};
@@ -167,16 +168,12 @@ ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
uptr pc)
: thr_(thr)
, pc_(pc)
- , in_rtl_(thr->in_rtl)
, in_ignored_lib_(false) {
- if (thr_->in_rtl == 0) {
+ if (!thr_->ignore_interceptors) {
Initialize(thr);
FuncEntry(thr, pc);
- thr_->in_rtl++;
- DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
- } else {
- thr_->in_rtl++;
}
+ DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
if (!thr_->in_ignored_lib && libignore()->IsIgnored(pc)) {
in_ignored_lib_ = true;
thr_->in_ignored_lib = true;
@@ -189,18 +186,14 @@ ScopedInterceptor::~ScopedInterceptor() {
thr_->in_ignored_lib = false;
ThreadIgnoreEnd(thr_, pc_);
}
- thr_->in_rtl--;
- if (thr_->in_rtl == 0) {
- FuncExit(thr_);
+ if (!thr_->ignore_interceptors) {
ProcessPendingSignals(thr_);
+ FuncExit(thr_);
}
- CHECK_EQ(in_rtl_, thr_->in_rtl);
}
#define SCOPED_INTERCEPTOR_RAW(func, ...) \
ThreadState *thr = cur_thread(); \
- StatInc(thr, StatInterceptor); \
- StatInc(thr, StatInt_##func); \
const uptr caller_pc = GET_CALLER_PC(); \
ScopedInterceptor si(thr, #func, caller_pc); \
const uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
@@ -210,15 +203,16 @@ ScopedInterceptor::~ScopedInterceptor() {
#define SCOPED_TSAN_INTERCEPTOR(func, ...) \
SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
if (REAL(func) == 0) { \
- Printf("FATAL: ThreadSanitizer: failed to intercept %s\n", #func); \
+ Report("FATAL: ThreadSanitizer: failed to intercept %s\n", #func); \
Die(); \
} \
- if (thr->in_rtl > 1 || thr->in_ignored_lib) \
+ if (thr->ignore_interceptors || thr->in_ignored_lib) \
return REAL(func)(__VA_ARGS__); \
/**/
#define TSAN_INTERCEPTOR(ret, func, ...) INTERCEPTOR(ret, func, __VA_ARGS__)
#define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
+#define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver)
#define BLOCK_REAL(name) (BlockingCall(thr), REAL(name))
@@ -233,6 +227,13 @@ struct BlockingCall {
}
SignalContext *ctx;
+
+ // When we are in a "blocking call", we process signals asynchronously
+ // (right when they arrive). In this context we do not expect to be
+ // executing any user/runtime code. The known interceptor sequence when
+ // this is not true is: pthread_join -> munmap(stack). It's fine
+ // to ignore munmap in this case -- we handle stack shadow separately.
+ ScopedIgnoreInterceptors ignore_interceptors;
};
TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) {
@@ -258,22 +259,14 @@ TSAN_INTERCEPTOR(int, nanosleep, void *req, void *rem) {
TSAN_INTERCEPTOR(void*, dlopen, const char *filename, int flag) {
SCOPED_INTERCEPTOR_RAW(dlopen, filename, flag);
- // dlopen will execute global constructors, so it must be not in rtl.
- CHECK_EQ(thr->in_rtl, 1);
- thr->in_rtl = 0;
void *res = REAL(dlopen)(filename, flag);
- thr->in_rtl = 1;
libignore()->OnLibraryLoaded(filename);
return res;
}
TSAN_INTERCEPTOR(int, dlclose, void *handle) {
SCOPED_INTERCEPTOR_RAW(dlclose, handle);
- // dlclose will execute global destructors, so it must be not in rtl.
- CHECK_EQ(thr->in_rtl, 1);
- thr->in_rtl = 0;
int res = REAL(dlclose)(handle);
- thr->in_rtl = 1;
libignore()->OnLibraryUnloaded();
return res;
}
@@ -301,7 +294,6 @@ class AtExitContext {
}
void exit(ThreadState *thr, uptr pc) {
- CHECK_EQ(thr->in_rtl, 0);
for (;;) {
atexit_t f = 0;
void *arg = 0;
@@ -313,14 +305,12 @@ class AtExitContext {
f = stack_[pos_];
arg = args_[pos_];
is_on_exit = is_on_exits_[pos_];
- ScopedInRtl in_rtl;
Acquire(thr, pc, (uptr)this);
}
}
if (f == 0)
break;
DPrintf("#%d: executing atexit func %p\n", thr->tid, f);
- CHECK_EQ(thr->in_rtl, 0);
if (is_on_exit)
((void(*)(int status, void *arg))f)(0, arg);
else
@@ -342,7 +332,9 @@ static AtExitContext *atexit_ctx;
TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
if (cur_thread()->in_symbolizer)
return 0;
- SCOPED_TSAN_INTERCEPTOR(atexit, f);
+ // We want to setup the atexit callback even if we are in ignored lib
+ // or after fork.
+ SCOPED_INTERCEPTOR_RAW(atexit, f);
return atexit_ctx->atexit(thr, pc, false, (void(*)())f, 0);
}
@@ -413,7 +405,6 @@ static void LongJmp(ThreadState *thr, uptr *env) {
// FIXME: put everything below into a common extern "C" block?
extern "C" void __tsan_setjmp(uptr sp, uptr mangled_sp) {
- ScopedInRtl in_rtl;
SetJmp(cur_thread(), sp, mangled_sp);
}
@@ -587,21 +578,21 @@ void *operator new[](__sanitizer::uptr size, std::nothrow_t const&) {
user_free(thr, pc, ptr);
SANITIZER_INTERFACE_ATTRIBUTE
-void operator delete(void *ptr);
-void operator delete(void *ptr) {
+void operator delete(void *ptr) throw();
+void operator delete(void *ptr) throw() {
OPERATOR_DELETE_BODY(_ZdlPv);
}
SANITIZER_INTERFACE_ATTRIBUTE
-void operator delete[](void *ptr);
-void operator delete[](void *ptr) {
- OPERATOR_DELETE_BODY(_ZdlPvRKSt9nothrow_t);
+void operator delete[](void *ptr) throw();
+void operator delete[](void *ptr) throw() {
+ OPERATOR_DELETE_BODY(_ZdaPv);
}
SANITIZER_INTERFACE_ATTRIBUTE
void operator delete(void *ptr, std::nothrow_t const&);
void operator delete(void *ptr, std::nothrow_t const&) {
- OPERATOR_DELETE_BODY(_ZdaPv);
+ OPERATOR_DELETE_BODY(_ZdlPvRKSt9nothrow_t);
}
SANITIZER_INTERFACE_ATTRIBUTE
@@ -643,20 +634,6 @@ TSAN_INTERCEPTOR(int, memcmp, const void *s1, const void *s2, uptr n) {
return res;
}
-TSAN_INTERCEPTOR(void*, memchr, void *s, int c, uptr n) {
- SCOPED_TSAN_INTERCEPTOR(memchr, s, c, n);
- void *res = REAL(memchr)(s, c, n);
- uptr len = res ? (char*)res - (char*)s + 1 : n;
- MemoryAccessRange(thr, pc, (uptr)s, len, false);
- return res;
-}
-
-TSAN_INTERCEPTOR(void*, memrchr, char *s, int c, uptr n) {
- SCOPED_TSAN_INTERCEPTOR(memrchr, s, c, n);
- MemoryAccessRange(thr, pc, (uptr)s, n, false);
- return REAL(memrchr)(s, c, n);
-}
-
TSAN_INTERCEPTOR(void*, memmove, void *dst, void *src, uptr n) {
SCOPED_TSAN_INTERCEPTOR(memmove, dst, src, n);
MemoryAccessRange(thr, pc, (uptr)dst, n, true);
@@ -827,7 +804,6 @@ static void thread_finalize(void *v) {
return;
}
{
- ScopedInRtl in_rtl;
ThreadState *thr = cur_thread();
ThreadFinish(thr);
SignalContext *sctx = thr->signal_ctx;
@@ -852,7 +828,8 @@ extern "C" void *__tsan_thread_start_func(void *arg) {
int tid = 0;
{
ThreadState *thr = cur_thread();
- ScopedInRtl in_rtl;
+ // Thread-local state is not initialized yet.
+ ScopedIgnoreInterceptors ignore;
if (pthread_setspecific(g_thread_finalize_key,
(void *)kPthreadDestructorIterations)) {
Printf("ThreadSanitizer: failed to set thread key\n");
@@ -862,7 +839,6 @@ extern "C" void *__tsan_thread_start_func(void *arg) {
pthread_yield();
atomic_store(&p->tid, 0, memory_order_release);
ThreadStart(thr, tid, GetTid());
- CHECK_EQ(thr->in_rtl, 1);
}
void *res = callback(param);
// Prevent the callback from being tail called,
@@ -875,6 +851,17 @@ extern "C" void *__tsan_thread_start_func(void *arg) {
TSAN_INTERCEPTOR(int, pthread_create,
void *th, void *attr, void *(*callback)(void*), void * param) {
SCOPED_INTERCEPTOR_RAW(pthread_create, th, attr, callback, param);
+ if (ctx->after_multithreaded_fork) {
+ if (flags()->die_after_fork) {
+ Report("ThreadSanitizer: starting new threads after multi-threaded "
+ "fork is not supported. Dying (set die_after_fork=0 to override)\n");
+ Die();
+ } else {
+ VPrintf(1, "ThreadSanitizer: starting new threads after multi-threaded "
+ "fork is not supported (pid %d). Continuing because of "
+ "die_after_fork=0, but you are on your own\n", internal_getpid());
+ }
+ }
__sanitizer_pthread_attr_t myattr;
if (attr == 0) {
pthread_attr_init(&myattr);
@@ -882,13 +869,20 @@ TSAN_INTERCEPTOR(int, pthread_create,
}
int detached = 0;
REAL(pthread_attr_getdetachstate)(attr, &detached);
- AdjustStackSizeLinux(attr);
+ AdjustStackSize(attr);
ThreadParam p;
p.callback = callback;
p.param = param;
atomic_store(&p.tid, 0, memory_order_relaxed);
- int res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p);
+ int res = -1;
+ {
+ // Otherwise we see false positives in pthread stack manipulation.
+ ScopedIgnoreInterceptors ignore;
+ ThreadIgnoreBegin(thr, pc);
+ res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p);
+ ThreadIgnoreEnd(thr, pc);
+ }
if (res == 0) {
int tid = ThreadCreate(thr, pc, *(uptr*)th, detached);
CHECK_NE(tid, 0);
@@ -904,7 +898,9 @@ TSAN_INTERCEPTOR(int, pthread_create,
TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret);
int tid = ThreadTid(thr, pc, (uptr)th);
+ ThreadIgnoreBegin(thr, pc);
int res = BLOCK_REAL(pthread_join)(th, ret);
+ ThreadIgnoreEnd(thr, pc);
if (res == 0) {
ThreadJoin(thr, pc, tid);
}
@@ -921,6 +917,122 @@ TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
return res;
}
+// Problem:
+// NPTL implementation of pthread_cond has 2 versions (2.2.5 and 2.3.2).
+// pthread_cond_t has different size in the different versions.
+// If call new REAL functions for old pthread_cond_t, they will corrupt memory
+// after pthread_cond_t (old cond is smaller).
+// If we call old REAL functions for new pthread_cond_t, we will lose some
+// functionality (e.g. old functions do not support waiting against
+// CLOCK_REALTIME).
+// Proper handling would require to have 2 versions of interceptors as well.
+// But this is messy, in particular requires linker scripts when sanitizer
+// runtime is linked into a shared library.
+// Instead we assume we don't have dynamic libraries built against old
+// pthread (2.2.5 is dated by 2002). And provide legacy_pthread_cond flag
+// that allows to work with old libraries (but this mode does not support
+// some features, e.g. pthread_condattr_getpshared).
+static void *init_cond(void *c, bool force = false) {
+ // sizeof(pthread_cond_t) >= sizeof(uptr) in both versions.
+ // So we allocate additional memory on the side large enough to hold
+ // any pthread_cond_t object. Always call new REAL functions, but pass
+ // the aux object to them.
+ // Note: the code assumes that PTHREAD_COND_INITIALIZER initializes
+ // first word of pthread_cond_t to zero.
+ // It's all relevant only for linux.
+ if (!common_flags()->legacy_pthread_cond)
+ return c;
+ atomic_uintptr_t *p = (atomic_uintptr_t*)c;
+ uptr cond = atomic_load(p, memory_order_acquire);
+ if (!force && cond != 0)
+ return (void*)cond;
+ void *newcond = WRAP(malloc)(pthread_cond_t_sz);
+ internal_memset(newcond, 0, pthread_cond_t_sz);
+ if (atomic_compare_exchange_strong(p, &cond, (uptr)newcond,
+ memory_order_acq_rel))
+ return newcond;
+ WRAP(free)(newcond);
+ return (void*)cond;
+}
+
+struct CondMutexUnlockCtx {
+ ThreadState *thr;
+ uptr pc;
+ void *m;
+};
+
+static void cond_mutex_unlock(CondMutexUnlockCtx *arg) {
+ MutexLock(arg->thr, arg->pc, (uptr)arg->m);
+}
+
+INTERCEPTOR(int, pthread_cond_init, void *c, void *a) {
+ void *cond = init_cond(c, true);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_init, cond, a);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
+ return REAL(pthread_cond_init)(cond, a);
+}
+
+INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
+ MutexUnlock(thr, pc, (uptr)m);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
+ CondMutexUnlockCtx arg = {thr, pc, m};
+ // This ensures that we handle mutex lock even in case of pthread_cancel.
+ // See test/tsan/cond_cancel.cc.
+ int res = call_pthread_cancel_with_cleanup(
+ (int(*)(void *c, void *m, void *abstime))REAL(pthread_cond_wait),
+ cond, m, 0, (void(*)(void *arg))cond_mutex_unlock, &arg);
+ if (res == errno_EOWNERDEAD)
+ MutexRepair(thr, pc, (uptr)m);
+ MutexLock(thr, pc, (uptr)m);
+ return res;
+}
+
+INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime);
+ MutexUnlock(thr, pc, (uptr)m);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
+ CondMutexUnlockCtx arg = {thr, pc, m};
+ // This ensures that we handle mutex lock even in case of pthread_cancel.
+ // See test/tsan/cond_cancel.cc.
+ int res = call_pthread_cancel_with_cleanup(
+ REAL(pthread_cond_timedwait), cond, m, abstime,
+ (void(*)(void *arg))cond_mutex_unlock, &arg);
+ if (res == errno_EOWNERDEAD)
+ MutexRepair(thr, pc, (uptr)m);
+ MutexLock(thr, pc, (uptr)m);
+ return res;
+}
+
+INTERCEPTOR(int, pthread_cond_signal, void *c) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
+ return REAL(pthread_cond_signal)(cond);
+}
+
+INTERCEPTOR(int, pthread_cond_broadcast, void *c) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, cond);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
+ return REAL(pthread_cond_broadcast)(cond);
+}
+
+INTERCEPTOR(int, pthread_cond_destroy, void *c) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, cond);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
+ int res = REAL(pthread_cond_destroy)(cond);
+ if (common_flags()->legacy_pthread_cond) {
+ // Free our aux cond and zero the pointer to not leave dangling pointers.
+ WRAP(free)(cond);
+ atomic_store((atomic_uintptr_t*)c, 0, memory_order_relaxed);
+ }
+ return res;
+}
+
TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) {
SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a);
int res = REAL(pthread_mutex_init)(m, a);
@@ -952,7 +1064,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
if (res == EOWNERDEAD)
MutexRepair(thr, pc, (uptr)m);
if (res == 0 || res == EOWNERDEAD)
- MutexLock(thr, pc, (uptr)m);
+ MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true);
return res;
}
@@ -996,7 +1108,7 @@ TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m);
int res = REAL(pthread_spin_trylock)(m);
if (res == 0) {
- MutexLock(thr, pc, (uptr)m);
+ MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true);
}
return res;
}
@@ -1039,7 +1151,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m);
int res = REAL(pthread_rwlock_tryrdlock)(m);
if (res == 0) {
- MutexReadLock(thr, pc, (uptr)m);
+ MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true);
}
return res;
}
@@ -1066,7 +1178,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m);
int res = REAL(pthread_rwlock_trywrlock)(m);
if (res == 0) {
- MutexLock(thr, pc, (uptr)m);
+ MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true);
}
return res;
}
@@ -1087,23 +1199,6 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
return res;
}
-TSAN_INTERCEPTOR(int, pthread_cond_destroy, void *c) {
- SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, c);
- MemoryWrite(thr, pc, (uptr)c, kSizeLog1);
- int res = REAL(pthread_cond_destroy)(c);
- return res;
-}
-
-TSAN_INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m,
- void *abstime) {
- SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, c, m, abstime);
- MutexUnlock(thr, pc, (uptr)m);
- MemoryRead(thr, pc, (uptr)c, kSizeLog1);
- int res = REAL(pthread_cond_timedwait)(c, m, abstime);
- MutexLock(thr, pc, (uptr)m);
- return res;
-}
-
TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
MemoryWrite(thr, pc, (uptr)b, kSizeLog1);
@@ -1132,19 +1227,13 @@ TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
SCOPED_INTERCEPTOR_RAW(pthread_once, o, f);
- // Using SCOPED_INTERCEPTOR_RAW, because if we are called from an ignored lib,
- // the user callback must be executed with thr->in_rtl == 0.
if (o == 0 || f == 0)
return EINVAL;
atomic_uint32_t *a = static_cast<atomic_uint32_t*>(o);
u32 v = atomic_load(a, memory_order_acquire);
if (v == 0 && atomic_compare_exchange_strong(a, &v, 1,
memory_order_relaxed)) {
- const int old_in_rtl = thr->in_rtl;
- thr->in_rtl = 0;
(*f)();
- CHECK_EQ(thr->in_rtl, 0);
- thr->in_rtl = old_in_rtl;
if (!thr->in_ignored_lib)
Release(thr, pc, (uptr)o);
atomic_store(a, 2, memory_order_release);
@@ -1509,10 +1598,9 @@ TSAN_INTERCEPTOR(int, unlink, char *path) {
return res;
}
-TSAN_INTERCEPTOR(void*, fopen, char *path, char *mode) {
- SCOPED_TSAN_INTERCEPTOR(fopen, path, mode);
- void *res = REAL(fopen)(path, mode);
- Acquire(thr, pc, File2addr(path));
+TSAN_INTERCEPTOR(void*, tmpfile, int fake) {
+ SCOPED_TSAN_INTERCEPTOR(tmpfile, fake);
+ void *res = REAL(tmpfile)(fake);
if (res) {
int fd = fileno_unlocked(res);
if (fd >= 0)
@@ -1521,15 +1609,9 @@ TSAN_INTERCEPTOR(void*, fopen, char *path, char *mode) {
return res;
}
-TSAN_INTERCEPTOR(void*, freopen, char *path, char *mode, void *stream) {
- SCOPED_TSAN_INTERCEPTOR(freopen, path, mode, stream);
- if (stream) {
- int fd = fileno_unlocked(stream);
- if (fd >= 0)
- FdClose(thr, pc, fd);
- }
- void *res = REAL(freopen)(path, mode, stream);
- Acquire(thr, pc, File2addr(path));
+TSAN_INTERCEPTOR(void*, tmpfile64, int fake) {
+ SCOPED_TSAN_INTERCEPTOR(tmpfile64, fake);
+ void *res = REAL(tmpfile64)(fake);
if (res) {
int fd = fileno_unlocked(res);
if (fd >= 0)
@@ -1538,19 +1620,6 @@ TSAN_INTERCEPTOR(void*, freopen, char *path, char *mode, void *stream) {
return res;
}
-TSAN_INTERCEPTOR(int, fclose, void *stream) {
- // libc file streams can call user-supplied functions, see fopencookie.
- {
- SCOPED_TSAN_INTERCEPTOR(fclose, stream);
- if (stream) {
- int fd = fileno_unlocked(stream);
- if (fd >= 0)
- FdClose(thr, pc, fd);
- }
- }
- return REAL(fclose)(stream);
-}
-
TSAN_INTERCEPTOR(uptr, fread, void *ptr, uptr size, uptr nmemb, void *f) {
// libc file streams can call user-supplied functions, see fopencookie.
{
@@ -1569,14 +1638,6 @@ TSAN_INTERCEPTOR(uptr, fwrite, const void *p, uptr size, uptr nmemb, void *f) {
return REAL(fwrite)(p, size, nmemb, f);
}
-TSAN_INTERCEPTOR(int, fflush, void *stream) {
- // libc file streams can call user-supplied functions, see fopencookie.
- {
- SCOPED_TSAN_INTERCEPTOR(fflush, stream);
- }
- return REAL(fflush)(stream);
-}
-
TSAN_INTERCEPTOR(void, abort, int fake) {
SCOPED_TSAN_INTERCEPTOR(abort, fake);
REAL(fflush)(0);
@@ -1626,30 +1687,106 @@ TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
return res;
}
+namespace __tsan {
+
+static void CallUserSignalHandler(ThreadState *thr, bool sync, bool sigact,
+ int sig, my_siginfo_t *info, void *uctx) {
+ // Ensure that the handler does not spoil errno.
+ const int saved_errno = errno;
+ errno = 99;
+ // Need to remember pc before the call, because the handler can reset it.
+ uptr pc = sigact ?
+ (uptr)sigactions[sig].sa_sigaction :
+ (uptr)sigactions[sig].sa_handler;
+ pc += 1; // return address is expected, OutputReport() will undo this
+ if (sigact)
+ sigactions[sig].sa_sigaction(sig, info, uctx);
+ else
+ sigactions[sig].sa_handler(sig);
+ // We do not detect errno spoiling for SIGTERM,
+ // because some SIGTERM handlers do spoil errno but reraise SIGTERM,
+ // tsan reports false positive in such case.
+ // It's difficult to properly detect this situation (reraise),
+ // because in async signal processing case (when handler is called directly
+ // from rtl_generic_sighandler) we have not yet received the reraised
+ // signal; and it looks too fragile to intercept all ways to reraise a signal.
+ if (flags()->report_bugs && !sync && sig != SIGTERM && errno != 99) {
+ __tsan::StackTrace stack;
+ stack.ObtainCurrent(thr, pc);
+ ThreadRegistryLock l(ctx->thread_registry);
+ ScopedReport rep(ReportTypeErrnoInSignal);
+ if (!IsFiredSuppression(ctx, rep, stack)) {
+ rep.AddStack(&stack);
+ OutputReport(ctx, rep, rep.GetReport()->stacks[0]);
+ }
+ }
+ errno = saved_errno;
+}
+
+void ProcessPendingSignals(ThreadState *thr) {
+ SignalContext *sctx = SigCtx(thr);
+ if (sctx == 0 || sctx->pending_signal_count == 0 || thr->in_signal_handler)
+ return;
+ thr->in_signal_handler = true;
+ sctx->pending_signal_count = 0;
+ // These are too big for stack.
+ static THREADLOCAL __sanitizer_sigset_t emptyset, oldset;
+ REAL(sigfillset)(&emptyset);
+ pthread_sigmask(SIG_SETMASK, &emptyset, &oldset);
+ for (int sig = 0; sig < kSigCount; sig++) {
+ SignalDesc *signal = &sctx->pending_signals[sig];
+ if (signal->armed) {
+ signal->armed = false;
+ if (sigactions[sig].sa_handler != SIG_DFL
+ && sigactions[sig].sa_handler != SIG_IGN) {
+ CallUserSignalHandler(thr, false, signal->sigaction,
+ sig, &signal->siginfo, &signal->ctx);
+ }
+ }
+ }
+ pthread_sigmask(SIG_SETMASK, &oldset, 0);
+ CHECK_EQ(thr->in_signal_handler, true);
+ thr->in_signal_handler = false;
+}
+
+} // namespace __tsan
+
+static bool is_sync_signal(SignalContext *sctx, int sig) {
+ return sig == SIGSEGV || sig == SIGBUS || sig == SIGILL ||
+ sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS ||
+ // If we are sending signal to ourselves, we must process it now.
+ (sctx && sig == sctx->int_signal_send);
+}
+
void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig,
my_siginfo_t *info, void *ctx) {
ThreadState *thr = cur_thread();
SignalContext *sctx = SigCtx(thr);
+ if (sig < 0 || sig >= kSigCount) {
+ VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig);
+ return;
+ }
// Don't mess with synchronous signals.
- if (sig == SIGSEGV || sig == SIGBUS || sig == SIGILL ||
- sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS ||
- // If we are sending signal to ourselves, we must process it now.
- (sctx && sig == sctx->int_signal_send) ||
+ const bool sync = is_sync_signal(sctx, sig);
+ if (sync ||
// If we are in blocking function, we can safely process it now
// (but check if we are in a recursive interceptor,
// i.e. pthread_join()->munmap()).
- (sctx && sctx->in_blocking_func == 1 && thr->in_rtl == 1)) {
- int in_rtl = thr->in_rtl;
- thr->in_rtl = 0;
+ (sctx && sctx->in_blocking_func == 1)) {
CHECK_EQ(thr->in_signal_handler, false);
thr->in_signal_handler = true;
- if (sigact)
- sigactions[sig].sa_sigaction(sig, info, ctx);
- else
- sigactions[sig].sa_handler(sig);
+ if (sctx && sctx->in_blocking_func == 1) {
+ // We ignore interceptors in blocking functions,
+ // temporary enbled them again while we are calling user function.
+ int const i = thr->ignore_interceptors;
+ thr->ignore_interceptors = 0;
+ CallUserSignalHandler(thr, sync, sigact, sig, info, ctx);
+ thr->ignore_interceptors = i;
+ } else {
+ CallUserSignalHandler(thr, sync, sigact, sig, info, ctx);
+ }
CHECK_EQ(thr->in_signal_handler, true);
thr->in_signal_handler = false;
- thr->in_rtl = in_rtl;
return;
}
@@ -1769,11 +1906,7 @@ TSAN_INTERCEPTOR(int, getaddrinfo, void *node, void *service,
// and can report false race between malloc and free
// inside of getaddrinfo. So ignore memory accesses.
ThreadIgnoreBegin(thr, pc);
- // getaddrinfo calls fopen, which can be intercepted by user.
- thr->in_rtl--;
- CHECK_EQ(thr->in_rtl, 0);
int res = REAL(getaddrinfo)(node, service, hints, rv);
- thr->in_rtl++;
ThreadIgnoreEnd(thr, pc);
return res;
}
@@ -1784,8 +1917,7 @@ static void MlockIsUnsupported() {
static atomic_uint8_t printed;
if (atomic_exchange(&printed, 1, memory_order_relaxed))
return;
- if (flags()->verbosity > 0)
- Printf("INFO: ThreadSanitizer ignores mlock/mlockall/munlock/munlockall\n");
+ VPrintf(1, "INFO: ThreadSanitizer ignores mlock/munlock[all]\n");
}
TSAN_INTERCEPTOR(int, mlock, const void *addr, uptr len) {
@@ -1809,17 +1941,42 @@ TSAN_INTERCEPTOR(int, munlockall, void) {
}
TSAN_INTERCEPTOR(int, fork, int fake) {
+ if (cur_thread()->in_symbolizer)
+ return REAL(fork)(fake);
SCOPED_INTERCEPTOR_RAW(fork, fake);
+ ForkBefore(thr, pc);
int pid = REAL(fork)(fake);
if (pid == 0) {
// child
+ ForkChildAfter(thr, pc);
FdOnFork(thr, pc);
} else if (pid > 0) {
// parent
+ ForkParentAfter(thr, pc);
+ } else {
+ // error
+ ForkParentAfter(thr, pc);
}
return pid;
}
+TSAN_INTERCEPTOR(int, vfork, int fake) {
+ // Some programs (e.g. openjdk) call close for all file descriptors
+ // in the child process. Under tsan it leads to false positives, because
+ // address space is shared, so the parent process also thinks that
+ // the descriptors are closed (while they are actually not).
+ // This leads to false positives due to missed synchronization.
+ // Strictly saying this is undefined behavior, because vfork child is not
+ // allowed to call any functions other than exec/exit. But this is what
+ // openjdk does, so we want to handle it.
+ // We could disable interceptors in the child process. But it's not possible
+ // to simply intercept and wrap vfork, because vfork child is not allowed
+ // to return from the function that calls vfork, and that's exactly what
+ // we would do. So this would require some assembly trickery as well.
+ // Instead we simply turn vfork into fork.
+ return WRAP(fork)(fake);
+}
+
static int OnExit(ThreadState *thr) {
int status = Finalize(thr);
REAL(fflush)(0);
@@ -1841,19 +1998,10 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
}
#include "sanitizer_common/sanitizer_platform_interceptors.h"
-// Causes interceptor recursion (getpwuid_r() calls fopen())
-#undef SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS
-#undef SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS
// Causes interceptor recursion (getaddrinfo() and fopen())
#undef SANITIZER_INTERCEPT_GETADDRINFO
-#undef SANITIZER_INTERCEPT_GETNAMEINFO
-// Causes interceptor recursion (glob64() calls lstat64())
-#undef SANITIZER_INTERCEPT_GLOB
#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
-#define COMMON_INTERCEPTOR_UNPOISON_PARAM(ctx, count) \
- do { \
- } while (false)
#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
MemoryAccessRange(((TsanInterceptorContext *)ctx)->thr, \
@@ -1871,6 +2019,19 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
ctx = (void *)&_ctx; \
(void) ctx;
+#define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) \
+ Acquire(thr, pc, File2addr(path)); \
+ if (file) { \
+ int fd = fileno_unlocked(file); \
+ if (fd >= 0) FdFileCreate(thr, pc, fd); \
+ }
+
+#define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) \
+ if (file) { \
+ int fd = fileno_unlocked(file); \
+ if (fd >= 0) FdClose(thr, pc, fd); \
+ }
+
#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
FdAcquire(((TsanInterceptorContext *) ctx)->thr, pc, fd)
@@ -1887,7 +2048,7 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name)
#define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
- CTX()->thread_registry->SetThreadNameByUserId(thread, name)
+ __tsan::ctx->thread_registry->SetThreadNameByUserId(thread, name)
#define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name)
@@ -1914,6 +2075,8 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
#define TSAN_SYSCALL() \
ThreadState *thr = cur_thread(); \
+ if (thr->ignore_interceptors) \
+ return; \
ScopedSyscall scoped_syscall(thr) \
/**/
@@ -1922,15 +2085,11 @@ struct ScopedSyscall {
explicit ScopedSyscall(ThreadState *thr)
: thr(thr) {
- if (thr->in_rtl == 0)
- Initialize(thr);
- thr->in_rtl++;
+ Initialize(thr);
}
~ScopedSyscall() {
- thr->in_rtl--;
- if (thr->in_rtl == 0)
- ProcessPendingSignals(thr);
+ ProcessPendingSignals(thr);
}
};
@@ -1942,12 +2101,12 @@ static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
static void syscall_acquire(uptr pc, uptr addr) {
TSAN_SYSCALL();
Acquire(thr, pc, addr);
- Printf("syscall_acquire(%p)\n", addr);
+ DPrintf("syscall_acquire(%p)\n", addr);
}
static void syscall_release(uptr pc, uptr addr) {
TSAN_SYSCALL();
- Printf("syscall_release(%p)\n", addr);
+ DPrintf("syscall_release(%p)\n", addr);
Release(thr, pc, addr);
}
@@ -1959,26 +2118,32 @@ static void syscall_fd_close(uptr pc, int fd) {
static USED void syscall_fd_acquire(uptr pc, int fd) {
TSAN_SYSCALL();
FdAcquire(thr, pc, fd);
- Printf("syscall_fd_acquire(%p)\n", fd);
+ DPrintf("syscall_fd_acquire(%p)\n", fd);
}
static USED void syscall_fd_release(uptr pc, int fd) {
TSAN_SYSCALL();
- Printf("syscall_fd_release(%p)\n", fd);
+ DPrintf("syscall_fd_release(%p)\n", fd);
FdRelease(thr, pc, fd);
}
static void syscall_pre_fork(uptr pc) {
TSAN_SYSCALL();
+ ForkBefore(thr, pc);
}
-static void syscall_post_fork(uptr pc, int res) {
+static void syscall_post_fork(uptr pc, int pid) {
TSAN_SYSCALL();
- if (res == 0) {
+ if (pid == 0) {
// child
+ ForkChildAfter(thr, pc);
FdOnFork(thr, pc);
- } else if (res > 0) {
+ } else if (pid > 0) {
// parent
+ ForkParentAfter(thr, pc);
+ } else {
+ // error
+ ForkParentAfter(thr, pc);
}
}
@@ -2027,68 +2192,21 @@ static void finalize(void *arg) {
uptr pc = 0;
atexit_ctx->exit(thr, pc);
int status = Finalize(thr);
- REAL(fflush)(0);
+ // Make sure the output is not lost.
+ // Flushing all the streams here may freeze the process if a child thread is
+ // performing file stream operations at the same time.
+ REAL(fflush)(stdout);
+ REAL(fflush)(stderr);
if (status)
REAL(_exit)(status);
}
-void ProcessPendingSignals(ThreadState *thr) {
- CHECK_EQ(thr->in_rtl, 0);
- SignalContext *sctx = SigCtx(thr);
- if (sctx == 0 || sctx->pending_signal_count == 0 || thr->in_signal_handler)
- return;
- Context *ctx = CTX();
- thr->in_signal_handler = true;
- sctx->pending_signal_count = 0;
- // These are too big for stack.
- static THREADLOCAL __sanitizer_sigset_t emptyset, oldset;
- REAL(sigfillset)(&emptyset);
- pthread_sigmask(SIG_SETMASK, &emptyset, &oldset);
- for (int sig = 0; sig < kSigCount; sig++) {
- SignalDesc *signal = &sctx->pending_signals[sig];
- if (signal->armed) {
- signal->armed = false;
- if (sigactions[sig].sa_handler != SIG_DFL
- && sigactions[sig].sa_handler != SIG_IGN) {
- // Insure that the handler does not spoil errno.
- const int saved_errno = errno;
- errno = 0;
- if (signal->sigaction)
- sigactions[sig].sa_sigaction(sig, &signal->siginfo, &signal->ctx);
- else
- sigactions[sig].sa_handler(sig);
- if (flags()->report_bugs && errno != 0) {
- ScopedInRtl in_rtl;
- __tsan::StackTrace stack;
- uptr pc = signal->sigaction ?
- (uptr)sigactions[sig].sa_sigaction :
- (uptr)sigactions[sig].sa_handler;
- pc += 1; // return address is expected, OutputReport() will undo this
- stack.Init(&pc, 1);
- ThreadRegistryLock l(ctx->thread_registry);
- ScopedReport rep(ReportTypeErrnoInSignal);
- if (!IsFiredSuppression(ctx, rep, stack)) {
- rep.AddStack(&stack);
- OutputReport(ctx, rep, rep.GetReport()->stacks[0]);
- }
- }
- errno = saved_errno;
- }
- }
- }
- pthread_sigmask(SIG_SETMASK, &oldset, 0);
- CHECK_EQ(thr->in_signal_handler, true);
- thr->in_signal_handler = false;
-}
-
static void unreachable() {
- Printf("FATAL: ThreadSanitizer: unreachable called\n");
+ Report("FATAL: ThreadSanitizer: unreachable called\n");
Die();
}
void InitializeInterceptors() {
- CHECK_GT(cur_thread()->in_rtl, 0);
-
// We need to setup it early, because functions like dlsym() can call it.
REAL(memset) = internal_memset;
REAL(memcpy) = internal_memcpy;
@@ -2098,7 +2216,7 @@ void InitializeInterceptors() {
mallopt(1, 0); // M_MXFAST
mallopt(-3, 32*1024); // M_MMAP_THRESHOLD
- SANITIZER_COMMON_INTERCEPTORS_INIT;
+ InitializeCommonInterceptors();
// We can not use TSAN_INTERCEPT to get setjmp addr,
// because it does &setjmp and setjmp is not present in some versions of libc.
@@ -2128,8 +2246,6 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(strlen);
TSAN_INTERCEPT(memset);
TSAN_INTERCEPT(memcpy);
- TSAN_INTERCEPT(memchr);
- TSAN_INTERCEPT(memrchr);
TSAN_INTERCEPT(memmove);
TSAN_INTERCEPT(memcmp);
TSAN_INTERCEPT(strchr);
@@ -2144,6 +2260,13 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(pthread_join);
TSAN_INTERCEPT(pthread_detach);
+ TSAN_INTERCEPT_VER(pthread_cond_init, "GLIBC_2.3.2");
+ TSAN_INTERCEPT_VER(pthread_cond_signal, "GLIBC_2.3.2");
+ TSAN_INTERCEPT_VER(pthread_cond_broadcast, "GLIBC_2.3.2");
+ TSAN_INTERCEPT_VER(pthread_cond_wait, "GLIBC_2.3.2");
+ TSAN_INTERCEPT_VER(pthread_cond_timedwait, "GLIBC_2.3.2");
+ TSAN_INTERCEPT_VER(pthread_cond_destroy, "GLIBC_2.3.2");
+
TSAN_INTERCEPT(pthread_mutex_init);
TSAN_INTERCEPT(pthread_mutex_destroy);
TSAN_INTERCEPT(pthread_mutex_trylock);
@@ -2165,9 +2288,6 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(pthread_rwlock_timedwrlock);
TSAN_INTERCEPT(pthread_rwlock_unlock);
- INTERCEPT_FUNCTION_VER(pthread_cond_destroy, "GLIBC_2.3.2");
- INTERCEPT_FUNCTION_VER(pthread_cond_timedwait, "GLIBC_2.3.2");
-
TSAN_INTERCEPT(pthread_barrier_init);
TSAN_INTERCEPT(pthread_barrier_destroy);
TSAN_INTERCEPT(pthread_barrier_wait);
@@ -2223,12 +2343,10 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(recv);
TSAN_INTERCEPT(unlink);
- TSAN_INTERCEPT(fopen);
- TSAN_INTERCEPT(freopen);
- TSAN_INTERCEPT(fclose);
+ TSAN_INTERCEPT(tmpfile);
+ TSAN_INTERCEPT(tmpfile64);
TSAN_INTERCEPT(fread);
TSAN_INTERCEPT(fwrite);
- TSAN_INTERCEPT(fflush);
TSAN_INTERCEPT(abort);
TSAN_INTERCEPT(puts);
TSAN_INTERCEPT(rmdir);
@@ -2255,6 +2373,7 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(munlockall);
TSAN_INTERCEPT(fork);
+ TSAN_INTERCEPT(vfork);
TSAN_INTERCEPT(dlopen);
TSAN_INTERCEPT(dlclose);
TSAN_INTERCEPT(on_exit);
@@ -2280,16 +2399,19 @@ void InitializeInterceptors() {
FdInit();
}
-void internal_start_thread(void(*func)(void *arg), void *arg) {
- // Start the thread with signals blocked, otherwise it can steal users
- // signals.
- __sanitizer_kernel_sigset_t set, old;
+void *internal_start_thread(void(*func)(void *arg), void *arg) {
+ // Start the thread with signals blocked, otherwise it can steal user signals.
+ __sanitizer_sigset_t set, old;
internal_sigfillset(&set);
internal_sigprocmask(SIG_SETMASK, &set, &old);
void *th;
REAL(pthread_create)(&th, 0, (void*(*)(void *arg))func, arg);
- REAL(pthread_detach)(th);
internal_sigprocmask(SIG_SETMASK, &old, 0);
+ return th;
+}
+
+void internal_join_thread(void *th) {
+ REAL(pthread_join)(th, 0);
}
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_interface_ann.cc b/libsanitizer/tsan/tsan_interface_ann.cc
index 38224f429cd0..20e3d9a35ac7 100644
--- a/libsanitizer/tsan/tsan_interface_ann.cc
+++ b/libsanitizer/tsan/tsan_interface_ann.cc
@@ -31,22 +31,16 @@ class ScopedAnnotation {
public:
ScopedAnnotation(ThreadState *thr, const char *aname, const char *f, int l,
uptr pc)
- : thr_(thr)
- , in_rtl_(thr->in_rtl) {
- CHECK_EQ(thr_->in_rtl, 0);
+ : thr_(thr) {
FuncEntry(thr_, pc);
- thr_->in_rtl++;
DPrintf("#%d: annotation %s() %s:%d\n", thr_->tid, aname, f, l);
}
~ScopedAnnotation() {
- thr_->in_rtl--;
- CHECK_EQ(in_rtl_, thr_->in_rtl);
FuncExit(thr_);
}
private:
ThreadState *const thr_;
- const int in_rtl_;
};
#define SCOPED_ANNOTATION(typ) \
@@ -56,7 +50,7 @@ class ScopedAnnotation {
const uptr caller_pc = (uptr)__builtin_return_address(0); \
StatInc(thr, StatAnnotation); \
StatInc(thr, Stat##typ); \
- ScopedAnnotation sa(thr, __FUNCTION__, f, l, caller_pc); \
+ ScopedAnnotation sa(thr, __func__, f, l, caller_pc); \
const uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
(void)pc; \
/**/
@@ -309,7 +303,7 @@ void INTERFACE_ATTRIBUTE AnnotateFlushExpectedRaces(char *f, int l) {
while (dyn_ann_ctx->expect.next != &dyn_ann_ctx->expect) {
ExpectRace *race = dyn_ann_ctx->expect.next;
if (race->hitcount == 0) {
- CTX()->nmissed_expected++;
+ ctx->nmissed_expected++;
ReportMissedExpectedRace(race);
}
race->prev->next = race->next;
diff --git a/libsanitizer/tsan/tsan_interface_atomic.cc b/libsanitizer/tsan/tsan_interface_atomic.cc
index 180d87b79934..3f5a4ccc9f71 100644
--- a/libsanitizer/tsan/tsan_interface_atomic.cc
+++ b/libsanitizer/tsan/tsan_interface_atomic.cc
@@ -10,7 +10,7 @@
//===----------------------------------------------------------------------===//
// ThreadSanitizer atomic operations are based on C++11/C1x standards.
-// For background see C++11 standard. A slightly older, publically
+// For background see C++11 standard. A slightly older, publicly
// available draft of the standard (not entirely up-to-date, but close enough
// for casual browsing) is available here:
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
@@ -19,7 +19,7 @@
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
-#include "tsan_interface_atomic.h"
+#include "sanitizer_common/sanitizer_mutex.h"
#include "tsan_flags.h"
#include "tsan_rtl.h"
@@ -28,42 +28,52 @@ using namespace __tsan; // NOLINT
#define SCOPED_ATOMIC(func, ...) \
const uptr callpc = (uptr)__builtin_return_address(0); \
uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
- mo = ConvertOrder(mo); \
mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
ThreadState *const thr = cur_thread(); \
+ if (thr->ignore_interceptors) \
+ return NoTsanAtomic##func(__VA_ARGS__); \
AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
- ScopedAtomic sa(thr, callpc, a, mo, __FUNCTION__); \
+ ScopedAtomic sa(thr, callpc, a, mo, __func__); \
return Atomic##func(thr, pc, __VA_ARGS__); \
/**/
-// Some shortcuts.
-typedef __tsan_memory_order morder;
-typedef __tsan_atomic8 a8;
-typedef __tsan_atomic16 a16;
-typedef __tsan_atomic32 a32;
-typedef __tsan_atomic64 a64;
-typedef __tsan_atomic128 a128;
-const morder mo_relaxed = __tsan_memory_order_relaxed;
-const morder mo_consume = __tsan_memory_order_consume;
-const morder mo_acquire = __tsan_memory_order_acquire;
-const morder mo_release = __tsan_memory_order_release;
-const morder mo_acq_rel = __tsan_memory_order_acq_rel;
-const morder mo_seq_cst = __tsan_memory_order_seq_cst;
+// These should match declarations from public tsan_interface_atomic.h header.
+typedef unsigned char a8;
+typedef unsigned short a16; // NOLINT
+typedef unsigned int a32;
+typedef unsigned long long a64; // NOLINT
+#if defined(__SIZEOF_INT128__) \
+ || (__clang_major__ * 100 + __clang_minor__ >= 302)
+__extension__ typedef __int128 a128;
+# define __TSAN_HAS_INT128 1
+#else
+# define __TSAN_HAS_INT128 0
+#endif
+
+// Protects emulation of 128-bit atomic operations.
+static StaticSpinMutex mutex128;
+
+// Part of ABI, do not change.
+// http://llvm.org/viewvc/llvm-project/libcxx/trunk/include/atomic?view=markup
+typedef enum {
+ mo_relaxed,
+ mo_consume,
+ mo_acquire,
+ mo_release,
+ mo_acq_rel,
+ mo_seq_cst
+} morder;
class ScopedAtomic {
public:
ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
morder mo, const char *func)
: thr_(thr) {
- CHECK_EQ(thr_->in_rtl, 0);
- ProcessPendingSignals(thr);
FuncEntry(thr_, pc);
DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
- thr_->in_rtl++;
}
~ScopedAtomic() {
- thr_->in_rtl--;
- CHECK_EQ(thr_->in_rtl, 0);
+ ProcessPendingSignals(thr_);
FuncExit(thr_);
}
private:
@@ -108,27 +118,6 @@ static bool IsAcqRelOrder(morder mo) {
return mo == mo_acq_rel || mo == mo_seq_cst;
}
-static morder ConvertOrder(morder mo) {
- if (mo > (morder)100500) {
- mo = morder(mo - 100500);
- if (mo == morder(1 << 0))
- mo = mo_relaxed;
- else if (mo == morder(1 << 1))
- mo = mo_consume;
- else if (mo == morder(1 << 2))
- mo = mo_acquire;
- else if (mo == morder(1 << 3))
- mo = mo_release;
- else if (mo == morder(1 << 4))
- mo = mo_acq_rel;
- else if (mo == morder(1 << 5))
- mo = mo_seq_cst;
- }
- CHECK_GE(mo, mo_relaxed);
- CHECK_LE(mo, mo_seq_cst);
- return mo;
-}
-
template<typename T> T func_xchg(volatile T *v, T op) {
T res = __sync_lock_test_and_set(v, op);
// __sync_lock_test_and_set does not contain full barrier.
@@ -178,48 +167,56 @@ template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
// from non-instrumented code.
#ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
a128 func_xchg(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
a128 cmp = *v;
*v = op;
return cmp;
}
a128 func_add(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
a128 cmp = *v;
*v = cmp + op;
return cmp;
}
a128 func_sub(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
a128 cmp = *v;
*v = cmp - op;
return cmp;
}
a128 func_and(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
a128 cmp = *v;
*v = cmp & op;
return cmp;
}
a128 func_or(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
a128 cmp = *v;
*v = cmp | op;
return cmp;
}
a128 func_xor(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
a128 cmp = *v;
*v = cmp ^ op;
return cmp;
}
a128 func_nand(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
a128 cmp = *v;
*v = ~(cmp & op);
return cmp;
}
a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
+ SpinMutexLock lock(&mutex128);
a128 cur = *v;
if (cur == cmp)
*v = xch;
@@ -241,26 +238,74 @@ static int SizeLog() {
// this leads to false negatives only in very obscure cases.
}
+static atomic_uint8_t *to_atomic(const volatile a8 *a) {
+ return (atomic_uint8_t*)a;
+}
+
+static atomic_uint16_t *to_atomic(const volatile a16 *a) {
+ return (atomic_uint16_t*)a;
+}
+
+static atomic_uint32_t *to_atomic(const volatile a32 *a) {
+ return (atomic_uint32_t*)a;
+}
+
+static atomic_uint64_t *to_atomic(const volatile a64 *a) {
+ return (atomic_uint64_t*)a;
+}
+
+static memory_order to_mo(morder mo) {
+ switch (mo) {
+ case mo_relaxed: return memory_order_relaxed;
+ case mo_consume: return memory_order_consume;
+ case mo_acquire: return memory_order_acquire;
+ case mo_release: return memory_order_release;
+ case mo_acq_rel: return memory_order_acq_rel;
+ case mo_seq_cst: return memory_order_seq_cst;
+ }
+ CHECK(0);
+ return memory_order_seq_cst;
+}
+
+template<typename T>
+static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
+ return atomic_load(to_atomic(a), to_mo(mo));
+}
+
+static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
+ SpinMutexLock lock(&mutex128);
+ return *a;
+}
+
template<typename T>
static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
morder mo) {
CHECK(IsLoadOrder(mo));
// This fast-path is critical for performance.
// Assume the access is atomic.
- if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a)) {
+ if (!IsAcquireOrder(mo)) {
MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
- return *a; // as if atomic
+ return NoTsanAtomicLoad(a, mo);
}
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, false);
+ SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, false);
AcquireImpl(thr, pc, &s->clock);
- T v = *a;
+ T v = NoTsanAtomicLoad(a, mo);
s->mtx.ReadUnlock();
- __sync_synchronize();
MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
return v;
}
template<typename T>
+static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
+ atomic_store(to_atomic(a), v, to_mo(mo));
+}
+
+static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
+ SpinMutexLock lock(&mutex128);
+ *a = v;
+}
+
+template<typename T>
static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
morder mo) {
CHECK(IsStoreOrder(mo));
@@ -269,21 +314,18 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
// Assume the access is atomic.
// Strictly saying even relaxed store cuts off release sequence,
// so must reset the clock.
- if (!IsReleaseOrder(mo) && sizeof(T) <= sizeof(a)) {
- *a = v; // as if atomic
+ if (!IsReleaseOrder(mo)) {
+ NoTsanAtomicStore(a, v, mo);
return;
}
__sync_synchronize();
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
+ SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
ReleaseImpl(thr, pc, &s->clock);
- *a = v;
+ NoTsanAtomicStore(a, v, mo);
s->mtx.Unlock();
- // Trainling memory barrier to provide sequential consistency
- // for Dekker-like store-load synchronization.
- __sync_synchronize();
}
template<typename T, T (*F)(volatile T *v, T op)>
@@ -291,7 +333,7 @@ static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
SyncVar *s = 0;
if (mo != mo_relaxed) {
- s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
+ s = ctx->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
@@ -309,6 +351,41 @@ static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
}
template<typename T>
+static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
+ return func_xchg(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
+ return func_add(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
+ return func_sub(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
+ return func_and(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
+ return func_or(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
+ return func_xor(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
+ return func_nand(a, v);
+}
+
+template<typename T>
static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
morder mo) {
return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
@@ -351,13 +428,34 @@ static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
}
template<typename T>
+static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
+ return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
+}
+
+static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v,
+ morder mo, morder fmo) {
+ a128 old = *c;
+ a128 cur = func_cas(a, old, v);
+ if (cur == old)
+ return true;
+ *c = cur;
+ return false;
+}
+
+template<typename T>
+static bool NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
+ return NoTsanAtomicCAS(a, &c, v, mo, fmo);
+}
+
+template<typename T>
static bool AtomicCAS(ThreadState *thr, uptr pc,
volatile T *a, T *c, T v, morder mo, morder fmo) {
(void)fmo; // Unused because llvm does not pass it yet.
MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
SyncVar *s = 0;
+ bool write_lock = mo != mo_acquire && mo != mo_consume;
if (mo != mo_relaxed) {
- s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
+ s = ctx->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock);
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
@@ -370,8 +468,12 @@ static bool AtomicCAS(ThreadState *thr, uptr pc,
}
T cc = *c;
T pr = func_cas(a, cc, v);
- if (s)
- s->mtx.Unlock();
+ if (s) {
+ if (write_lock)
+ s->mtx.Unlock();
+ else
+ s->mtx.ReadUnlock();
+ }
if (pr == cc)
return true;
*c = pr;
@@ -385,293 +487,362 @@ static T AtomicCAS(ThreadState *thr, uptr pc,
return c;
}
+static void NoTsanAtomicFence(morder mo) {
+ __sync_synchronize();
+}
+
static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
// FIXME(dvyukov): not implemented.
__sync_synchronize();
}
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
SCOPED_ATOMIC(Load, a, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
SCOPED_ATOMIC(Load, a, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
SCOPED_ATOMIC(Load, a, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
SCOPED_ATOMIC(Load, a, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
SCOPED_ATOMIC(Load, a, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(Store, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
SCOPED_ATOMIC(Store, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
SCOPED_ATOMIC(Store, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(Store, a, v, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
SCOPED_ATOMIC(Store, a, v, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(Exchange, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
SCOPED_ATOMIC(Exchange, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
SCOPED_ATOMIC(Exchange, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(Exchange, a, v, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
SCOPED_ATOMIC(Exchange, a, v, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(FetchAdd, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
SCOPED_ATOMIC(FetchAdd, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
SCOPED_ATOMIC(FetchAdd, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(FetchAdd, a, v, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
SCOPED_ATOMIC(FetchAdd, a, v, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(FetchSub, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
SCOPED_ATOMIC(FetchSub, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
SCOPED_ATOMIC(FetchSub, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(FetchSub, a, v, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
SCOPED_ATOMIC(FetchSub, a, v, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(FetchAnd, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
SCOPED_ATOMIC(FetchAnd, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
SCOPED_ATOMIC(FetchAnd, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(FetchAnd, a, v, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
SCOPED_ATOMIC(FetchAnd, a, v, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(FetchOr, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
SCOPED_ATOMIC(FetchOr, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
SCOPED_ATOMIC(FetchOr, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(FetchOr, a, v, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
SCOPED_ATOMIC(FetchOr, a, v, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(FetchXor, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
SCOPED_ATOMIC(FetchXor, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
SCOPED_ATOMIC(FetchXor, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(FetchXor, a, v, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
SCOPED_ATOMIC(FetchXor, a, v, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(FetchNand, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
SCOPED_ATOMIC(FetchNand, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
SCOPED_ATOMIC(FetchNand, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(FetchNand, a, v, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
SCOPED_ATOMIC(FetchNand, a, v, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic_thread_fence(morder mo) {
char* a = 0;
SCOPED_ATOMIC(Fence, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic_signal_fence(morder mo) {
}
+} // extern "C"
diff --git a/libsanitizer/tsan/tsan_interface_java.cc b/libsanitizer/tsan/tsan_interface_java.cc
index 70b5e5fcc653..7f451690946e 100644
--- a/libsanitizer/tsan/tsan_interface_java.cc
+++ b/libsanitizer/tsan/tsan_interface_java.cc
@@ -77,13 +77,9 @@ class ScopedJavaFunc {
: thr_(thr) {
Initialize(thr_);
FuncEntry(thr, pc);
- CHECK_EQ(thr_->in_rtl, 0);
- thr_->in_rtl++;
}
~ScopedJavaFunc() {
- thr_->in_rtl--;
- CHECK_EQ(thr_->in_rtl, 0);
FuncExit(thr_);
// FIXME(dvyukov): process pending signals.
}
@@ -134,7 +130,7 @@ SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr,
}
if (s == 0 && create) {
DPrintf("#%d: creating new sync for %p\n", thr->tid, addr);
- s = CTX()->synctab.Create(thr, pc, addr);
+ s = ctx->synctab.Create(thr, pc, addr);
s->next = b->head;
b->head = s;
}
diff --git a/libsanitizer/tsan/tsan_mman.cc b/libsanitizer/tsan/tsan_mman.cc
index 832374becf58..bd30cd52e637 100644
--- a/libsanitizer/tsan/tsan_mman.cc
+++ b/libsanitizer/tsan/tsan_mman.cc
@@ -88,7 +88,6 @@ void AllocatorPrintStats() {
static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
if (!thr->in_signal_handler || !flags()->report_signal_unsafe)
return;
- Context *ctx = CTX();
StackTrace stack;
stack.ObtainCurrent(thr, pc);
ThreadRegistryLock l(ctx->thread_registry);
@@ -100,7 +99,6 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
}
void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align) {
- CHECK_GT(thr->in_rtl, 0);
if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
return AllocatorReturnNull();
void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
@@ -108,7 +106,7 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align) {
return 0;
MBlock *b = new(allocator()->GetMetaData(p)) MBlock;
b->Init(sz, thr->tid, CurrentStackId(thr, pc));
- if (CTX() && CTX()->initialized) {
+ if (ctx && ctx->initialized) {
if (thr->ignore_reads_and_writes == 0)
MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
else
@@ -120,7 +118,6 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align) {
}
void user_free(ThreadState *thr, uptr pc, void *p) {
- CHECK_GT(thr->in_rtl, 0);
CHECK_NE(p, (void*)0);
DPrintf("#%d: free(%p)\n", thr->tid, p);
MBlock *b = (MBlock*)allocator()->GetMetaData(p);
@@ -136,7 +133,7 @@ void user_free(ThreadState *thr, uptr pc, void *p) {
}
b->ListReset();
}
- if (CTX() && CTX()->initialized && thr->in_rtl == 1) {
+ if (ctx && ctx->initialized) {
if (thr->ignore_reads_and_writes == 0)
MemoryRangeFreed(thr, pc, (uptr)p, b->Size());
}
@@ -145,7 +142,6 @@ void user_free(ThreadState *thr, uptr pc, void *p) {
}
void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
- CHECK_GT(thr->in_rtl, 0);
void *p2 = 0;
// FIXME: Handle "shrinking" more efficiently,
// it seems that some software actually does this.
@@ -165,7 +161,6 @@ void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
}
uptr user_alloc_usable_size(ThreadState *thr, uptr pc, void *p) {
- CHECK_GT(thr->in_rtl, 0);
if (p == 0)
return 0;
MBlock *b = (MBlock*)allocator()->GetMetaData(p);
@@ -182,24 +177,21 @@ MBlock *user_mblock(ThreadState *thr, void *p) {
}
void invoke_malloc_hook(void *ptr, uptr size) {
- Context *ctx = CTX();
ThreadState *thr = cur_thread();
- if (ctx == 0 || !ctx->initialized || thr->in_rtl)
+ if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
return;
__tsan_malloc_hook(ptr, size);
}
void invoke_free_hook(void *ptr) {
- Context *ctx = CTX();
ThreadState *thr = cur_thread();
- if (ctx == 0 || !ctx->initialized || thr->in_rtl)
+ if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
return;
__tsan_free_hook(ptr);
}
void *internal_alloc(MBlockType typ, uptr sz) {
ThreadState *thr = cur_thread();
- CHECK_GT(thr->in_rtl, 0);
CHECK_LE(sz, InternalSizeClassMap::kMaxSize);
if (thr->nomalloc) {
thr->nomalloc = 0; // CHECK calls internal_malloc().
@@ -210,7 +202,6 @@ void *internal_alloc(MBlockType typ, uptr sz) {
void internal_free(void *p) {
ThreadState *thr = cur_thread();
- CHECK_GT(thr->in_rtl, 0);
if (thr->nomalloc) {
thr->nomalloc = 0; // CHECK calls internal_malloc().
CHECK(0);
diff --git a/libsanitizer/tsan/tsan_mutex.cc b/libsanitizer/tsan/tsan_mutex.cc
index e7846c53e4af..0c3bb4a67213 100644
--- a/libsanitizer/tsan/tsan_mutex.cc
+++ b/libsanitizer/tsan/tsan_mutex.cc
@@ -31,13 +31,14 @@ static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = {
/*2 MutexTypeThreads*/ {MutexTypeReport},
/*3 MutexTypeReport*/ {MutexTypeSyncTab, MutexTypeSyncVar,
MutexTypeMBlock, MutexTypeJavaMBlock},
- /*4 MutexTypeSyncVar*/ {},
+ /*4 MutexTypeSyncVar*/ {MutexTypeDDetector},
/*5 MutexTypeSyncTab*/ {MutexTypeSyncVar},
/*6 MutexTypeSlab*/ {MutexTypeLeaf},
/*7 MutexTypeAnnotations*/ {},
/*8 MutexTypeAtExit*/ {MutexTypeSyncTab},
/*9 MutexTypeMBlock*/ {MutexTypeSyncVar},
/*10 MutexTypeJavaMBlock*/ {MutexTypeSyncVar},
+ /*11 MutexTypeDDetector*/ {},
};
static bool CanLockAdj[MutexTypeCount][MutexTypeCount];
@@ -121,12 +122,12 @@ void InitializeMutex() {
#endif
}
-DeadlockDetector::DeadlockDetector() {
+InternalDeadlockDetector::InternalDeadlockDetector() {
// Rely on zero initialization because some mutexes can be locked before ctor.
}
#if TSAN_DEBUG && !TSAN_GO
-void DeadlockDetector::Lock(MutexType t) {
+void InternalDeadlockDetector::Lock(MutexType t) {
// Printf("LOCK %d @%zu\n", t, seq_ + 1);
CHECK_GT(t, MutexTypeInvalid);
CHECK_LT(t, MutexTypeCount);
@@ -153,7 +154,7 @@ void DeadlockDetector::Lock(MutexType t) {
}
}
-void DeadlockDetector::Unlock(MutexType t) {
+void InternalDeadlockDetector::Unlock(MutexType t) {
// Printf("UNLO %d @%zu #%zu\n", t, seq_, locked_[t]);
CHECK(locked_[t]);
locked_[t] = 0;
@@ -208,7 +209,7 @@ Mutex::~Mutex() {
void Mutex::Lock() {
#if TSAN_DEBUG && !TSAN_GO
- cur_thread()->deadlock_detector.Lock(type_);
+ cur_thread()->internal_deadlock_detector.Lock(type_);
#endif
uptr cmp = kUnlocked;
if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
@@ -233,13 +234,13 @@ void Mutex::Unlock() {
(void)prev;
DCHECK_NE(prev & kWriteLock, 0);
#if TSAN_DEBUG && !TSAN_GO
- cur_thread()->deadlock_detector.Unlock(type_);
+ cur_thread()->internal_deadlock_detector.Unlock(type_);
#endif
}
void Mutex::ReadLock() {
#if TSAN_DEBUG && !TSAN_GO
- cur_thread()->deadlock_detector.Lock(type_);
+ cur_thread()->internal_deadlock_detector.Lock(type_);
#endif
uptr prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
if ((prev & kWriteLock) == 0)
@@ -261,7 +262,7 @@ void Mutex::ReadUnlock() {
DCHECK_EQ(prev & kWriteLock, 0);
DCHECK_GT(prev & ~kWriteLock, 0);
#if TSAN_DEBUG && !TSAN_GO
- cur_thread()->deadlock_detector.Unlock(type_);
+ cur_thread()->internal_deadlock_detector.Unlock(type_);
#endif
}
diff --git a/libsanitizer/tsan/tsan_mutex.h b/libsanitizer/tsan/tsan_mutex.h
index 6d1450593301..f075ce831e6e 100644
--- a/libsanitizer/tsan/tsan_mutex.h
+++ b/libsanitizer/tsan/tsan_mutex.h
@@ -29,6 +29,7 @@ enum MutexType {
MutexTypeAtExit,
MutexTypeMBlock,
MutexTypeJavaMBlock,
+ MutexTypeDDetector,
// This must be the last.
MutexTypeCount
@@ -63,9 +64,9 @@ class Mutex {
typedef GenericScopedLock<Mutex> Lock;
typedef GenericScopedReadLock<Mutex> ReadLock;
-class DeadlockDetector {
+class InternalDeadlockDetector {
public:
- DeadlockDetector();
+ InternalDeadlockDetector();
void Lock(MutexType t);
void Unlock(MutexType t);
private:
diff --git a/libsanitizer/tsan/tsan_mutexset.h b/libsanitizer/tsan/tsan_mutexset.h
index df36b462c60a..7fdc194109fa 100644
--- a/libsanitizer/tsan/tsan_mutexset.h
+++ b/libsanitizer/tsan/tsan_mutexset.h
@@ -36,6 +36,10 @@ class MutexSet {
uptr Size() const;
Desc Get(uptr i) const;
+ void operator=(const MutexSet &other) {
+ internal_memcpy(this, &other, sizeof(*this));
+ }
+
private:
#ifndef TSAN_GO
uptr size_;
@@ -43,6 +47,7 @@ class MutexSet {
#endif
void RemovePos(uptr i);
+ MutexSet(const MutexSet&);
};
// Go does not have mutexes, so do not spend memory and time.
diff --git a/libsanitizer/tsan/tsan_platform.h b/libsanitizer/tsan/tsan_platform.h
index 164ee45d6c6b..60eb1a849954 100644
--- a/libsanitizer/tsan/tsan_platform.h
+++ b/libsanitizer/tsan/tsan_platform.h
@@ -75,6 +75,8 @@ static const uptr kLinuxShadowMsk = 0x200000000000ULL;
#elif defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW
static const uptr kLinuxAppMemBeg = 0x290000000000ULL;
static const uptr kLinuxAppMemEnd = 0x7fffffffffffULL;
+static const uptr kAppMemGapBeg = 0x2c0000000000ULL;
+static const uptr kAppMemGapEnd = 0x7d0000000000ULL;
#else
static const uptr kLinuxAppMemBeg = 0x7cf000000000ULL;
static const uptr kLinuxAppMemEnd = 0x7fffffffffffULL;
@@ -103,7 +105,12 @@ static const uptr kLinuxShadowEnd =
MemToShadow(kLinuxAppMemEnd) | 0xff;
static inline bool IsAppMem(uptr mem) {
+#if defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW
+ return (mem >= kLinuxAppMemBeg && mem < kAppMemGapBeg) ||
+ (mem >= kAppMemGapEnd && mem <= kLinuxAppMemEnd);
+#else
return mem >= kLinuxAppMemBeg && mem <= kLinuxAppMemEnd;
+#endif
}
static inline bool IsShadowMem(uptr mem) {
@@ -138,8 +145,9 @@ const char *InitializePlatform();
void FinalizePlatform();
// The additional page is to catch shadow stack overflow as paging fault.
-const uptr kTotalTraceSize = (kTraceSize * sizeof(Event) + sizeof(Trace) + 4096
- + 4095) & ~4095;
+// Windows wants 64K alignment for mmaps.
+const uptr kTotalTraceSize = (kTraceSize * sizeof(Event) + sizeof(Trace)
+ + (64 << 10) + (64 << 10) - 1) & ~((64 << 10) - 1);
uptr ALWAYS_INLINE GetThreadTrace(int tid) {
uptr p = kTraceMemBegin + (uptr)tid * kTotalTraceSize;
@@ -154,7 +162,8 @@ uptr ALWAYS_INLINE GetThreadTraceHeader(int tid) {
return p;
}
-void internal_start_thread(void(*func)(void*), void *arg);
+void *internal_start_thread(void(*func)(void*), void *arg);
+void internal_join_thread(void *th);
// Says whether the addr relates to a global var.
// Guesses with high probability, may yield both false positives and negatives.
@@ -162,6 +171,10 @@ bool IsGlobalVar(uptr addr);
int ExtractResolvFDs(void *state, int *fds, int nfd);
int ExtractRecvmsgFDs(void *msg, int *fds, int nfd);
+int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
+ void *abstime), void *c, void *m, void *abstime,
+ void(*cleanup)(void *arg), void *arg);
+
} // namespace __tsan
#else // defined(__LP64__) || defined(_WIN64)
diff --git a/libsanitizer/tsan/tsan_platform_linux.cc b/libsanitizer/tsan/tsan_platform_linux.cc
index fe69430b711c..062e84615cc7 100644
--- a/libsanitizer/tsan/tsan_platform_linux.cc
+++ b/libsanitizer/tsan/tsan_platform_linux.cc
@@ -59,27 +59,6 @@ namespace __tsan {
const uptr kPageSize = 4096;
-#ifndef TSAN_GO
-ScopedInRtl::ScopedInRtl()
- : thr_(cur_thread()) {
- in_rtl_ = thr_->in_rtl;
- thr_->in_rtl++;
- errno_ = errno;
-}
-
-ScopedInRtl::~ScopedInRtl() {
- thr_->in_rtl--;
- errno = errno_;
- CHECK_EQ(in_rtl_, thr_->in_rtl);
-}
-#else
-ScopedInRtl::ScopedInRtl() {
-}
-
-ScopedInRtl::~ScopedInRtl() {
-}
-#endif
-
void FillProfileCallback(uptr start, uptr rss, bool file,
uptr *mem, uptr stats_size) {
CHECK_EQ(7, stats_size);
@@ -133,7 +112,6 @@ void FlushShadowMemory() {
#ifndef TSAN_GO
static void ProtectRange(uptr beg, uptr end) {
- ScopedInRtl in_rtl;
CHECK_LE(beg, end);
if (beg == end)
return;
@@ -159,17 +137,19 @@ static void MapRodata() {
#endif
if (tmpdir == 0)
return;
- char filename[256];
- internal_snprintf(filename, sizeof(filename), "%s/tsan.rodata.%d",
+ char name[256];
+ internal_snprintf(name, sizeof(name), "%s/tsan.rodata.%d",
tmpdir, (int)internal_getpid());
- uptr openrv = internal_open(filename, O_RDWR | O_CREAT | O_EXCL, 0600);
+ uptr openrv = internal_open(name, O_RDWR | O_CREAT | O_EXCL, 0600);
if (internal_iserror(openrv))
return;
+ internal_unlink(name); // Unlink it now, so that we can reuse the buffer.
fd_t fd = openrv;
// Fill the file with kShadowRodata.
const uptr kMarkerSize = 512 * 1024 / sizeof(u64);
InternalScopedBuffer<u64> marker(kMarkerSize);
- for (u64 *p = marker.data(); p < marker.data() + kMarkerSize; p++)
+ // volatile to prevent insertion of memset
+ for (volatile u64 *p = marker.data(); p < marker.data() + kMarkerSize; p++)
*p = kShadowRodata;
internal_write(fd, marker.data(), marker.size());
// Map the file into memory.
@@ -177,13 +157,12 @@ static void MapRodata() {
MAP_PRIVATE | MAP_ANONYMOUS, fd, 0);
if (internal_iserror(page)) {
internal_close(fd);
- internal_unlink(filename);
return;
}
// Map the file into shadow of .rodata sections.
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
uptr start, end, offset, prot;
- char name[128];
+ // Reusing the buffer 'name'.
while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name), &prot)) {
if (name[0] != 0 && name[0] != '['
&& (prot & MemoryMappingLayout::kProtectionRead)
@@ -200,7 +179,6 @@ static void MapRodata() {
}
}
internal_close(fd);
- internal_unlink(filename);
}
void InitializeShadowMemory() {
@@ -314,10 +292,10 @@ const char *InitializePlatform() {
// we re-exec the program with limited stack size as a best effort.
if (getlim(RLIMIT_STACK) == (rlim_t)-1) {
const uptr kMaxStackSize = 32 * 1024 * 1024;
- Report("WARNING: Program is run with unlimited stack size, which "
- "wouldn't work with ThreadSanitizer.\n");
- Report("Re-execing with stack size limited to %zd bytes.\n",
- kMaxStackSize);
+ VReport(1, "Program is run with unlimited stack size, which wouldn't "
+ "work with ThreadSanitizer.\n"
+ "Re-execing with stack size limited to %zd bytes.\n",
+ kMaxStackSize);
SetStackSizeLimitInBytes(kMaxStackSize);
reexec = true;
}
@@ -378,8 +356,19 @@ int ExtractRecvmsgFDs(void *msgp, int *fds, int nfd) {
}
return res;
}
-#endif
+int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
+ void *abstime), void *c, void *m, void *abstime,
+ void(*cleanup)(void *arg), void *arg) {
+ // pthread_cleanup_push/pop are hardcore macros mess.
+ // We can't intercept nor call them w/o including pthread.h.
+ int res;
+ pthread_cleanup_push(cleanup, arg);
+ res = fn(c, m, abstime);
+ pthread_cleanup_pop(0);
+ return res;
+}
+#endif
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_platform_mac.cc b/libsanitizer/tsan/tsan_platform_mac.cc
index 3dca611dc928..c3d4d905219c 100644
--- a/libsanitizer/tsan/tsan_platform_mac.cc
+++ b/libsanitizer/tsan/tsan_platform_mac.cc
@@ -38,17 +38,18 @@
namespace __tsan {
-ScopedInRtl::ScopedInRtl() {
+uptr GetShadowMemoryConsumption() {
+ return 0;
}
-ScopedInRtl::~ScopedInRtl() {
+void FlushShadowMemory() {
}
-uptr GetShadowMemoryConsumption() {
- return 0;
+void WriteMemoryProfile(char *buf, uptr buf_size) {
}
-void FlushShadowMemory() {
+uptr GetRSS() {
+ return 0;
}
#ifndef TSAN_GO
@@ -88,6 +89,20 @@ void FinalizePlatform() {
fflush(0);
}
+#ifndef TSAN_GO
+int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
+ void *abstime), void *c, void *m, void *abstime,
+ void(*cleanup)(void *arg), void *arg) {
+ // pthread_cleanup_push/pop are hardcore macros mess.
+ // We can't intercept nor call them w/o including pthread.h.
+ int res;
+ pthread_cleanup_push(cleanup, arg);
+ res = fn(c, m, abstime);
+ pthread_cleanup_pop(0);
+ return res;
+}
+#endif
+
} // namespace __tsan
#endif // SANITIZER_MAC
diff --git a/libsanitizer/tsan/tsan_platform_windows.cc b/libsanitizer/tsan/tsan_platform_windows.cc
index 6e49ef42f0c0..f16bebf7f4c5 100644
--- a/libsanitizer/tsan/tsan_platform_windows.cc
+++ b/libsanitizer/tsan/tsan_platform_windows.cc
@@ -19,17 +19,18 @@
namespace __tsan {
-ScopedInRtl::ScopedInRtl() {
+uptr GetShadowMemoryConsumption() {
+ return 0;
}
-ScopedInRtl::~ScopedInRtl() {
+void FlushShadowMemory() {
}
-uptr GetShadowMemoryConsumption() {
- return 0;
+void WriteMemoryProfile(char *buf, uptr buf_size) {
}
-void FlushShadowMemory() {
+uptr GetRSS() {
+ return 0;
}
const char *InitializePlatform() {
diff --git a/libsanitizer/tsan/tsan_report.cc b/libsanitizer/tsan/tsan_report.cc
index f2484166e819..00a512e0cf86 100644
--- a/libsanitizer/tsan/tsan_report.cc
+++ b/libsanitizer/tsan/tsan_report.cc
@@ -38,6 +38,7 @@ ReportDesc::ReportDesc()
, locs(MBlockReportLoc)
, mutexes(MBlockReportMutex)
, threads(MBlockReportThread)
+ , unique_tids(MBlockReportThread)
, sleep()
, count() {
}
@@ -71,10 +72,20 @@ static const char *ReportTypeString(ReportType typ) {
return "thread leak";
if (typ == ReportTypeMutexDestroyLocked)
return "destroy of a locked mutex";
+ if (typ == ReportTypeMutexDoubleLock)
+ return "double lock of a mutex";
+ if (typ == ReportTypeMutexBadUnlock)
+ return "unlock of an unlocked mutex (or by a wrong thread)";
+ if (typ == ReportTypeMutexBadReadLock)
+ return "read lock of a write locked mutex";
+ if (typ == ReportTypeMutexBadReadUnlock)
+ return "read unlock of a write locked mutex";
if (typ == ReportTypeSignalUnsafe)
return "signal-unsafe call inside of a signal";
if (typ == ReportTypeErrnoInSignal)
return "signal handler spoils errno";
+ if (typ == ReportTypeDeadlock)
+ return "lock-order-inversion (potential deadlock)";
return "";
}
@@ -153,6 +164,17 @@ static void PrintLocation(const ReportLocation *loc) {
PrintStack(loc->stack);
}
+static void PrintMutexShort(const ReportMutex *rm, const char *after) {
+ Decorator d;
+ Printf("%sM%zd%s%s", d.Mutex(), rm->id, d.EndMutex(), after);
+}
+
+static void PrintMutexShortWithAddress(const ReportMutex *rm,
+ const char *after) {
+ Decorator d;
+ Printf("%sM%zd (%p)%s%s", d.Mutex(), rm->id, rm->addr, d.EndMutex(), after);
+}
+
static void PrintMutex(const ReportMutex *rm) {
Decorator d;
if (rm->destroyed) {
@@ -161,7 +183,7 @@ static void PrintMutex(const ReportMutex *rm) {
Printf("%s", d.EndMutex());
} else {
Printf("%s", d.Mutex());
- Printf(" Mutex M%llu created at:\n", rm->id);
+ Printf(" Mutex M%llu (%p) created at:\n", rm->id, rm->addr);
Printf("%s", d.EndMutex());
PrintStack(rm->stack);
}
@@ -221,10 +243,42 @@ void PrintReport(const ReportDesc *rep) {
(int)internal_getpid());
Printf("%s", d.EndWarning());
- for (uptr i = 0; i < rep->stacks.Size(); i++) {
- if (i)
- Printf(" and:\n");
- PrintStack(rep->stacks[i]);
+ if (rep->typ == ReportTypeDeadlock) {
+ char thrbuf[kThreadBufSize];
+ Printf(" Cycle in lock order graph: ");
+ for (uptr i = 0; i < rep->mutexes.Size(); i++)
+ PrintMutexShortWithAddress(rep->mutexes[i], " => ");
+ PrintMutexShort(rep->mutexes[0], "\n\n");
+ CHECK_GT(rep->mutexes.Size(), 0U);
+ CHECK_EQ(rep->mutexes.Size() * (flags()->second_deadlock_stack ? 2 : 1),
+ rep->stacks.Size());
+ for (uptr i = 0; i < rep->mutexes.Size(); i++) {
+ Printf(" Mutex ");
+ PrintMutexShort(rep->mutexes[(i + 1) % rep->mutexes.Size()],
+ " acquired here while holding mutex ");
+ PrintMutexShort(rep->mutexes[i], " in ");
+ Printf("%s", d.ThreadDescription());
+ Printf("%s:\n", thread_name(thrbuf, rep->unique_tids[i]));
+ Printf("%s", d.EndThreadDescription());
+ if (flags()->second_deadlock_stack) {
+ PrintStack(rep->stacks[2*i]);
+ Printf(" Mutex ");
+ PrintMutexShort(rep->mutexes[i],
+ " previously acquired by the same thread here:\n");
+ PrintStack(rep->stacks[2*i+1]);
+ } else {
+ PrintStack(rep->stacks[i]);
+ if (i == 0)
+ Printf(" Hint: use TSAN_OPTIONS=second_deadlock_stack=1 "
+ "to get more informative warning message\n\n");
+ }
+ }
+ } else {
+ for (uptr i = 0; i < rep->stacks.Size(); i++) {
+ if (i)
+ Printf(" and:\n");
+ PrintStack(rep->stacks[i]);
+ }
}
for (uptr i = 0; i < rep->mops.Size(); i++)
@@ -236,8 +290,10 @@ void PrintReport(const ReportDesc *rep) {
for (uptr i = 0; i < rep->locs.Size(); i++)
PrintLocation(rep->locs[i]);
- for (uptr i = 0; i < rep->mutexes.Size(); i++)
- PrintMutex(rep->mutexes[i]);
+ if (rep->typ != ReportTypeDeadlock) {
+ for (uptr i = 0; i < rep->mutexes.Size(); i++)
+ PrintMutex(rep->mutexes[i]);
+ }
for (uptr i = 0; i < rep->threads.Size(); i++)
PrintThread(rep->threads[i]);
@@ -289,11 +345,26 @@ static void PrintThread(const ReportThread *rt) {
void PrintReport(const ReportDesc *rep) {
Printf("==================\n");
- Printf("WARNING: DATA RACE");
- for (uptr i = 0; i < rep->mops.Size(); i++)
- PrintMop(rep->mops[i], i == 0);
- for (uptr i = 0; i < rep->threads.Size(); i++)
- PrintThread(rep->threads[i]);
+ if (rep->typ == ReportTypeRace) {
+ Printf("WARNING: DATA RACE");
+ for (uptr i = 0; i < rep->mops.Size(); i++)
+ PrintMop(rep->mops[i], i == 0);
+ for (uptr i = 0; i < rep->threads.Size(); i++)
+ PrintThread(rep->threads[i]);
+ } else if (rep->typ == ReportTypeDeadlock) {
+ Printf("WARNING: DEADLOCK\n");
+ for (uptr i = 0; i < rep->mutexes.Size(); i++) {
+ Printf("Goroutine %d lock mutex %d while holding mutex %d:\n",
+ 999, rep->mutexes[i]->id,
+ rep->mutexes[(i+1) % rep->mutexes.Size()]->id);
+ PrintStack(rep->stacks[2*i]);
+ Printf("\n");
+ Printf("Mutex %d was previously locked here:\n",
+ rep->mutexes[(i+1) % rep->mutexes.Size()]->id);
+ PrintStack(rep->stacks[2*i + 1]);
+ Printf("\n");
+ }
+ }
Printf("==================\n");
}
diff --git a/libsanitizer/tsan/tsan_report.h b/libsanitizer/tsan/tsan_report.h
index c0eef9eb023b..d773e057af0a 100644
--- a/libsanitizer/tsan/tsan_report.h
+++ b/libsanitizer/tsan/tsan_report.h
@@ -22,8 +22,13 @@ enum ReportType {
ReportTypeUseAfterFree,
ReportTypeThreadLeak,
ReportTypeMutexDestroyLocked,
+ ReportTypeMutexDoubleLock,
+ ReportTypeMutexBadUnlock,
+ ReportTypeMutexBadReadLock,
+ ReportTypeMutexBadReadUnlock,
ReportTypeSignalUnsafe,
- ReportTypeErrnoInSignal
+ ReportTypeErrnoInSignal,
+ ReportTypeDeadlock
};
struct ReportStack {
@@ -87,6 +92,7 @@ struct ReportThread {
struct ReportMutex {
u64 id;
+ uptr addr;
bool destroyed;
ReportStack *stack;
};
@@ -99,6 +105,7 @@ class ReportDesc {
Vector<ReportLocation*> locs;
Vector<ReportMutex*> mutexes;
Vector<ReportThread*> threads;
+ Vector<int> unique_tids;
ReportStack *sleep;
int count;
diff --git a/libsanitizer/tsan/tsan_rtl.cc b/libsanitizer/tsan/tsan_rtl.cc
index 573eeb8a9188..7932a6d9e042 100644
--- a/libsanitizer/tsan/tsan_rtl.cc
+++ b/libsanitizer/tsan/tsan_rtl.cc
@@ -35,22 +35,21 @@ namespace __tsan {
THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
#endif
static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
+Context *ctx;
// Can be overriden by a front-end.
#ifdef TSAN_EXTERNAL_HOOKS
bool OnFinalize(bool failed);
+void OnInitialize();
#else
SANITIZER_INTERFACE_ATTRIBUTE
bool WEAK OnFinalize(bool failed) {
return failed;
}
+SANITIZER_INTERFACE_ATTRIBUTE
+void WEAK OnInitialize() {}
#endif
-static Context *ctx;
-Context *CTX() {
- return ctx;
-}
-
static char thread_registry_placeholder[sizeof(ThreadRegistry)];
static ThreadContextBase *CreateThreadContext(u32 tid) {
@@ -74,7 +73,7 @@ Context::Context()
, nreported()
, nmissed_expected()
, thread_registry(new(thread_registry_placeholder) ThreadRegistry(
- CreateThreadContext, kMaxTid, kThreadQuarantineSize))
+ CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse))
, racy_stacks(MBlockRacyStacks)
, racy_addresses(MBlockRacyAddresses)
, fired_suppressions(8) {
@@ -82,13 +81,15 @@ Context::Context()
// The objects are allocated in TLS, so one may rely on zero-initialization.
ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
+ unsigned reuse_count,
uptr stk_addr, uptr stk_size,
uptr tls_addr, uptr tls_size)
: fast_state(tid, epoch)
// Do not touch these, rely on zero initialization,
// they may be accessed before the ctor.
// , ignore_reads_and_writes()
- // , in_rtl()
+ // , ignore_interceptors()
+ , clock(tid, reuse_count)
#ifndef TSAN_GO
, jmp_bufs(MBlockJmpBuf)
#endif
@@ -97,7 +98,11 @@ ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
, stk_addr(stk_addr)
, stk_size(stk_size)
, tls_addr(tls_addr)
- , tls_size(tls_size) {
+ , tls_size(tls_size)
+#ifndef TSAN_GO
+ , last_sleep_clock(tid)
+#endif
+{
}
static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
@@ -113,8 +118,13 @@ static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
}
static void BackgroundThread(void *arg) {
- ScopedInRtl in_rtl;
- Context *ctx = CTX();
+#ifndef TSAN_GO
+ // This is a non-initialized non-user thread, nothing to see here.
+ // We don't use ScopedIgnoreInterceptors, because we want ignores to be
+ // enabled even when the thread function exits (e.g. during pthread thread
+ // shutdown code).
+ cur_thread()->ignore_interceptors++;
+#endif
const u64 kMs2Ns = 1000 * 1000;
fd_t mprof_fd = kInvalidFd;
@@ -133,8 +143,10 @@ static void BackgroundThread(void *arg) {
u64 last_flush = NanoTime();
uptr last_rss = 0;
- for (int i = 0; ; i++) {
- SleepForSeconds(1);
+ for (int i = 0;
+ atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0;
+ i++) {
+ SleepForMillis(100);
u64 now = NanoTime();
// Flush memory if requested.
@@ -185,6 +197,16 @@ static void BackgroundThread(void *arg) {
}
}
+static void StartBackgroundThread() {
+ ctx->background_thread = internal_start_thread(&BackgroundThread, 0);
+}
+
+static void StopBackgroundThread() {
+ atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed);
+ internal_join_thread(ctx->background_thread);
+ ctx->background_thread = 0;
+}
+
void DontNeedShadowFor(uptr addr, uptr size) {
uptr shadow_beg = MemToShadow(addr);
uptr shadow_end = MemToShadow(addr + size);
@@ -192,6 +214,9 @@ void DontNeedShadowFor(uptr addr, uptr size) {
}
void MapShadow(uptr addr, uptr size) {
+ // Global data is not 64K aligned, but there are no adjacent mappings,
+ // so we can get away with unaligned mapping.
+ // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier);
}
@@ -199,6 +224,7 @@ void MapThreadTrace(uptr addr, uptr size) {
DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
CHECK_GE(addr, kTraceMemBegin);
CHECK_LE(addr + size, kTraceMemBegin + kTraceMemSize);
+ CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
uptr addr1 = (uptr)MmapFixedNoReserve(addr, size);
if (addr1 != addr) {
Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p->%p)\n",
@@ -213,11 +239,12 @@ void Initialize(ThreadState *thr) {
if (is_initialized)
return;
is_initialized = true;
+ // We are not ready to handle interceptors yet.
+ ScopedIgnoreInterceptors ignore;
SanitizerToolName = "ThreadSanitizer";
// Install tool-specific callbacks in sanitizer_common.
SetCheckFailedCallback(TsanCheckFailed);
- ScopedInRtl in_rtl;
#ifndef TSAN_GO
InitializeAllocator();
#endif
@@ -235,19 +262,13 @@ void Initialize(ThreadState *thr) {
InitializeSuppressions();
#ifndef TSAN_GO
InitializeLibIgnore();
- // Initialize external symbolizer before internal threads are started.
- const char *external_symbolizer = flags()->external_symbolizer_path;
- bool external_symbolizer_started =
- Symbolizer::Init(external_symbolizer)->IsExternalAvailable();
- if (external_symbolizer != 0 && external_symbolizer[0] != '\0' &&
- !external_symbolizer_started) {
- Printf("Failed to start external symbolizer: '%s'\n",
- external_symbolizer);
- Die();
- }
+ Symbolizer::Init(common_flags()->external_symbolizer_path);
Symbolizer::Get()->AddHooks(EnterSymbolizer, ExitSymbolizer);
#endif
- internal_start_thread(&BackgroundThread, 0);
+ StartBackgroundThread();
+ SetSandboxingCallback(StopBackgroundThread);
+ if (flags()->detect_deadlocks)
+ ctx->dd = DDetector::Create(flags());
if (ctx->flags.verbosity)
Printf("***** Running under ThreadSanitizer v2 (pid %d) *****\n",
@@ -257,7 +278,6 @@ void Initialize(ThreadState *thr) {
int tid = ThreadCreate(thr, 0, 0, true);
CHECK_EQ(tid, 0);
ThreadStart(thr, tid, internal_getpid());
- CHECK_EQ(thr->in_rtl, 1);
ctx->initialized = true;
if (flags()->stop_on_start) {
@@ -266,10 +286,11 @@ void Initialize(ThreadState *thr) {
(int)internal_getpid());
while (__tsan_resumed == 0) {}
}
+
+ OnInitialize();
}
int Finalize(ThreadState *thr) {
- ScopedInRtl in_rtl;
Context *ctx = __tsan::ctx;
bool failed = false;
@@ -319,6 +340,38 @@ int Finalize(ThreadState *thr) {
}
#ifndef TSAN_GO
+void ForkBefore(ThreadState *thr, uptr pc) {
+ ctx->thread_registry->Lock();
+ ctx->report_mtx.Lock();
+}
+
+void ForkParentAfter(ThreadState *thr, uptr pc) {
+ ctx->report_mtx.Unlock();
+ ctx->thread_registry->Unlock();
+}
+
+void ForkChildAfter(ThreadState *thr, uptr pc) {
+ ctx->report_mtx.Unlock();
+ ctx->thread_registry->Unlock();
+
+ uptr nthread = 0;
+ ctx->thread_registry->GetNumberOfThreads(0, 0, &nthread /* alive threads */);
+ VPrintf(1, "ThreadSanitizer: forked new process with pid %d,"
+ " parent had %d threads\n", (int)internal_getpid(), (int)nthread);
+ if (nthread == 1) {
+ internal_start_thread(&BackgroundThread, 0);
+ } else {
+ // We've just forked a multi-threaded process. We cannot reasonably function
+ // after that (some mutexes may be locked before fork). So just enable
+ // ignores for everything in the hope that we will exec soon.
+ ctx->after_multithreaded_fork = true;
+ thr->ignore_interceptors++;
+ ThreadIgnoreBegin(thr, pc);
+ ThreadIgnoreSyncBegin(thr, pc);
+ }
+}
+#endif
+
u32 CurrentStackId(ThreadState *thr, uptr pc) {
if (thr->shadow_stack_pos == 0) // May happen during bootstrap.
return 0;
@@ -332,11 +385,9 @@ u32 CurrentStackId(ThreadState *thr, uptr pc) {
thr->shadow_stack_pos--;
return id;
}
-#endif
void TraceSwitch(ThreadState *thr) {
thr->nomalloc++;
- ScopedInRtl in_rtl;
Trace *thr_trace = ThreadTrace(thr->tid);
Lock l(&thr_trace->mtx);
unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
@@ -541,17 +592,19 @@ void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
FastState fast_state = thr->fast_state;
if (fast_state.GetIgnoreBit())
return;
- fast_state.IncrementEpoch();
- thr->fast_state = fast_state;
+ if (kCollectHistory) {
+ fast_state.IncrementEpoch();
+ thr->fast_state = fast_state;
+ // We must not store to the trace if we do not store to the shadow.
+ // That is, this call must be moved somewhere below.
+ TraceAddEvent(thr, fast_state, EventTypeMop, pc);
+ }
+
Shadow cur(fast_state);
cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
cur.SetWrite(kAccessIsWrite);
cur.SetAtomic(kIsAtomic);
- // We must not store to the trace if we do not store to the shadow.
- // That is, this call must be moved somewhere below.
- TraceAddEvent(thr, fast_state, EventTypeMop, pc);
-
MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
shadow_mem, cur);
}
@@ -581,7 +634,7 @@ static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
// UnmapOrDie/MmapFixedNoReserve does not work on Windows,
// so we do it only for C/C++.
- if (kGoMode || size < 64*1024) {
+ if (kGoMode || size < common_flags()->clear_shadow_mmap_threshold) {
u64 *p = (u64*)MemToShadow(addr);
CHECK(IsShadowMem((uptr)p));
CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
@@ -631,8 +684,10 @@ void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
thr->is_freeing = true;
MemoryAccessRange(thr, pc, addr, size, true);
thr->is_freeing = false;
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
+ if (kCollectHistory) {
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
+ }
Shadow s(thr->fast_state);
s.ClearIgnoreBit();
s.MarkAsFreed();
@@ -642,8 +697,10 @@ void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
}
void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
+ if (kCollectHistory) {
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
+ }
Shadow s(thr->fast_state);
s.ClearIgnoreBit();
s.SetWrite(true);
@@ -653,11 +710,12 @@ void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
ALWAYS_INLINE USED
void FuncEntry(ThreadState *thr, uptr pc) {
- DCHECK_EQ(thr->in_rtl, 0);
StatInc(thr, StatFuncEnter);
DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
+ if (kCollectHistory) {
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
+ }
// Shadow stack maintenance can be replaced with
// stack unwinding during trace switch (which presumably must be faster).
@@ -683,11 +741,12 @@ void FuncEntry(ThreadState *thr, uptr pc) {
ALWAYS_INLINE USED
void FuncExit(ThreadState *thr) {
- DCHECK_EQ(thr->in_rtl, 0);
StatInc(thr, StatFuncExit);
DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
+ if (kCollectHistory) {
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
+ }
DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
#ifndef TSAN_GO
@@ -702,7 +761,8 @@ void ThreadIgnoreBegin(ThreadState *thr, uptr pc) {
CHECK_GT(thr->ignore_reads_and_writes, 0);
thr->fast_state.SetIgnoreBit();
#ifndef TSAN_GO
- thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
+ if (!ctx->after_multithreaded_fork)
+ thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
#endif
}
@@ -723,7 +783,8 @@ void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) {
thr->ignore_sync++;
CHECK_GT(thr->ignore_sync, 0);
#ifndef TSAN_GO
- thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
+ if (!ctx->after_multithreaded_fork)
+ thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
#endif
}
@@ -733,7 +794,7 @@ void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) {
CHECK_GE(thr->ignore_sync, 0);
#ifndef TSAN_GO
if (thr->ignore_sync == 0)
- thr->mop_ignore_set.Reset();
+ thr->sync_ignore_set.Reset();
#endif
}
diff --git a/libsanitizer/tsan/tsan_rtl.h b/libsanitizer/tsan/tsan_rtl.h
index 20493ea4d3b9..e6bc8b285955 100644
--- a/libsanitizer/tsan/tsan_rtl.h
+++ b/libsanitizer/tsan/tsan_rtl.h
@@ -28,6 +28,7 @@
#include "sanitizer_common/sanitizer_allocator_internal.h"
#include "sanitizer_common/sanitizer_asm.h"
#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
#include "sanitizer_common/sanitizer_libignore.h"
#include "sanitizer_common/sanitizer_suppressions.h"
#include "sanitizer_common/sanitizer_thread_registry.h"
@@ -431,11 +432,11 @@ struct ThreadState {
AllocatorCache alloc_cache;
InternalAllocatorCache internal_alloc_cache;
Vector<JmpBuf> jmp_bufs;
+ int ignore_interceptors;
#endif
u64 stat[StatCnt];
const int tid;
const int unique_id;
- int in_rtl;
bool in_symbolizer;
bool in_ignored_lib;
bool is_alive;
@@ -447,7 +448,9 @@ struct ThreadState {
const uptr tls_size;
ThreadContext *tctx;
- DeadlockDetector deadlock_detector;
+ InternalDeadlockDetector internal_deadlock_detector;
+ DDPhysicalThread *dd_pt;
+ DDLogicalThread *dd_lt;
bool in_signal_handler;
SignalContext *signal_ctx;
@@ -462,13 +465,13 @@ struct ThreadState {
int nomalloc;
explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
+ unsigned reuse_count,
uptr stk_addr, uptr stk_size,
uptr tls_addr, uptr tls_size);
};
-Context *CTX();
-
#ifndef TSAN_GO
+__attribute__((tls_model("initial-exec")))
extern THREADLOCAL char cur_thread_placeholder[];
INLINE ThreadState *cur_thread() {
return reinterpret_cast<ThreadState *>(&cur_thread_placeholder);
@@ -480,11 +483,7 @@ class ThreadContext : public ThreadContextBase {
explicit ThreadContext(int tid);
~ThreadContext();
ThreadState *thr;
-#ifdef TSAN_GO
- StackTrace creation_stack;
-#else
u32 creation_stack_id;
-#endif
SyncClock sync;
// Epoch at which the thread had started.
// If we see an event from the thread stamped by an older epoch,
@@ -527,6 +526,7 @@ struct Context {
Context();
bool initialized;
+ bool after_multithreaded_fork;
SyncTab synctab;
@@ -535,12 +535,16 @@ struct Context {
int nmissed_expected;
atomic_uint64_t last_symbolize_time_ns;
+ void *background_thread;
+ atomic_uint32_t stop_background_thread;
+
ThreadRegistry *thread_registry;
Vector<RacyStacks> racy_stacks;
Vector<RacyAddress> racy_addresses;
// Number of fired suppressions may be large enough.
InternalMmapVector<FiredSuppression> fired_suppressions;
+ DDetector *dd;
Flags flags;
@@ -549,14 +553,20 @@ struct Context {
u64 int_alloc_siz[MBlockTypeCount];
};
-class ScopedInRtl {
- public:
- ScopedInRtl();
- ~ScopedInRtl();
- private:
- ThreadState*thr_;
- int in_rtl_;
- int errno_;
+extern Context *ctx; // The one and the only global runtime context.
+
+struct ScopedIgnoreInterceptors {
+ ScopedIgnoreInterceptors() {
+#ifndef TSAN_GO
+ cur_thread()->ignore_interceptors++;
+#endif
+ }
+
+ ~ScopedIgnoreInterceptors() {
+#ifndef TSAN_GO
+ cur_thread()->ignore_interceptors--;
+#endif
+ }
};
class ScopedReport {
@@ -568,7 +578,10 @@ class ScopedReport {
void AddMemoryAccess(uptr addr, Shadow s, const StackTrace *stack,
const MutexSet *mset);
void AddThread(const ThreadContext *tctx);
+ void AddThread(int unique_tid);
+ void AddUniqueTid(int unique_tid);
void AddMutex(const SyncVar *s);
+ u64 AddMutex(u64 id);
void AddLocation(uptr addr, uptr size);
void AddSleep(u32 stack_id);
void SetCount(int count);
@@ -576,10 +589,12 @@ class ScopedReport {
const ReportDesc *GetReport() const;
private:
- Context *ctx_;
ReportDesc *rep_;
+ // Symbolizer makes lots of intercepted calls. If we try to process them,
+ // at best it will cause deadlocks on internal mutexes.
+ ScopedIgnoreInterceptors ignore_interceptors_;
- void AddMutex(u64 id);
+ void AddDeadMutex(u64 id);
ScopedReport(const ScopedReport&);
void operator = (const ScopedReport&);
@@ -606,10 +621,14 @@ void InitializeInterceptors();
void InitializeLibIgnore();
void InitializeDynamicAnnotations();
+void ForkBefore(ThreadState *thr, uptr pc);
+void ForkParentAfter(ThreadState *thr, uptr pc);
+void ForkChildAfter(ThreadState *thr, uptr pc);
+
void ReportRace(ThreadState *thr);
bool OutputReport(Context *ctx,
const ScopedReport &srep,
- const ReportStack *suppress_stack1 = 0,
+ const ReportStack *suppress_stack1,
const ReportStack *suppress_stack2 = 0,
const ReportLocation *suppress_loc = 0);
bool IsFiredSuppression(Context *ctx,
@@ -707,9 +726,10 @@ void ProcessPendingSignals(ThreadState *thr);
void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
bool rw, bool recursive, bool linker_init);
void MutexDestroy(ThreadState *thr, uptr pc, uptr addr);
-void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec = 1);
+void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec = 1,
+ bool try_lock = false);
int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all = false);
-void MutexReadLock(ThreadState *thr, uptr pc, uptr addr);
+void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool try_lock = false);
void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD
@@ -754,6 +774,8 @@ Trace *ThreadTrace(int tid);
extern "C" void __tsan_trace_switch();
void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
EventType typ, u64 addr) {
+ if (!kCollectHistory)
+ return;
DCHECK_GE((int)typ, 0);
DCHECK_LE((int)typ, 7);
DCHECK_EQ(GetLsb(addr, 61), addr);
diff --git a/libsanitizer/tsan/tsan_rtl_mutex.cc b/libsanitizer/tsan/tsan_rtl_mutex.cc
index d9a3a3baa3fe..3724571cfff2 100644
--- a/libsanitizer/tsan/tsan_rtl_mutex.cc
+++ b/libsanitizer/tsan/tsan_rtl_mutex.cc
@@ -9,6 +9,9 @@
//
//===----------------------------------------------------------------------===//
+#include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
+#include <sanitizer_common/sanitizer_stackdepot.h>
+
#include "tsan_rtl.h"
#include "tsan_flags.h"
#include "tsan_sync.h"
@@ -18,10 +21,47 @@
namespace __tsan {
+void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
+
+struct Callback : DDCallback {
+ ThreadState *thr;
+ uptr pc;
+
+ Callback(ThreadState *thr, uptr pc)
+ : thr(thr)
+ , pc(pc) {
+ DDCallback::pt = thr->dd_pt;
+ DDCallback::lt = thr->dd_lt;
+ }
+
+ virtual u32 Unwind() {
+ return CurrentStackId(thr, pc);
+ }
+ virtual int UniqueTid() {
+ return thr->unique_id;
+ }
+};
+
+void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexInit(&cb, &s->dd);
+ s->dd.ctx = s->GetId();
+}
+
+static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
+ uptr addr, u64 mid) {
+ ThreadRegistryLock l(ctx->thread_registry);
+ ScopedReport rep(typ);
+ rep.AddMutex(mid);
+ StackTrace trace;
+ trace.ObtainCurrent(thr, pc);
+ rep.AddStack(&trace);
+ rep.AddLocation(addr, 1);
+ OutputReport(ctx, rep, rep.GetReport()->stacks[0]);
+}
+
void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
bool rw, bool recursive, bool linker_init) {
- Context *ctx = CTX();
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr);
StatInc(thr, StatMutexCreate);
if (!linker_init && IsAppMem(addr)) {
@@ -38,8 +78,6 @@ void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
}
void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
- Context *ctx = CTX();
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
StatInc(thr, StatMutexDestroy);
#ifndef TSAN_GO
@@ -51,6 +89,10 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
SyncVar *s = ctx->synctab.GetAndRemove(thr, pc, addr);
if (s == 0)
return;
+ if (flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexDestroy(&cb, &s->dd);
+ }
if (IsAppMem(addr)) {
CHECK(!thr->is_freeing);
thr->is_freeing = true;
@@ -71,30 +113,30 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
RestoreStack(last.tid(), last.epoch(), &trace, 0);
rep.AddStack(&trace);
rep.AddLocation(s->addr, 1);
- OutputReport(ctx, rep);
+ OutputReport(ctx, rep, rep.GetReport()->stacks[0]);
}
thr->mset.Remove(s->GetId());
DestroyAndFree(s);
}
-void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec) {
- CHECK_GT(thr->in_rtl, 0);
+void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec, bool try_lock) {
DPrintf("#%d: MutexLock %zx rec=%d\n", thr->tid, addr, rec);
CHECK_GT(rec, 0);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
+ bool report_double_lock = false;
if (s->owner_tid == SyncVar::kInvalidTid) {
CHECK_EQ(s->recursion, 0);
s->owner_tid = thr->tid;
s->last_lock = thr->fast_state.raw();
} else if (s->owner_tid == thr->tid) {
CHECK_GT(s->recursion, 0);
- } else {
- Printf("ThreadSanitizer WARNING: double lock of mutex %p\n", addr);
- PrintCurrentStack(thr, pc);
+ } else if (flags()->report_mutex_bugs && !s->is_broken) {
+ s->is_broken = true;
+ report_double_lock = true;
}
if (s->recursion == 0) {
StatInc(thr, StatMutexLock);
@@ -105,30 +147,36 @@ void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec) {
}
s->recursion += rec;
thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
+ if (flags()->detect_deadlocks && s->recursion == 1) {
+ Callback cb(thr, pc);
+ if (!try_lock)
+ ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
+ ctx->dd->MutexAfterLock(&cb, &s->dd, true, try_lock);
+ }
+ u64 mid = s->GetId();
s->mtx.Unlock();
+ // Can't touch s after this point.
+ if (report_double_lock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
+ if (flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
}
int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: MutexUnlock %zx all=%d\n", thr->tid, addr, all);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
int rec = 0;
- if (s->recursion == 0) {
- if (!s->is_broken) {
- s->is_broken = true;
- Printf("ThreadSanitizer WARNING: unlock of unlocked mutex %p\n", addr);
- PrintCurrentStack(thr, pc);
- }
- } else if (s->owner_tid != thr->tid) {
- if (!s->is_broken) {
+ bool report_bad_unlock = false;
+ if (s->recursion == 0 || s->owner_tid != thr->tid) {
+ if (flags()->report_mutex_bugs && !s->is_broken) {
s->is_broken = true;
- Printf("ThreadSanitizer WARNING: mutex %p is unlocked by wrong thread\n",
- addr);
- PrintCurrentStack(thr, pc);
+ report_bad_unlock = true;
}
} else {
rec = all ? s->recursion : 1;
@@ -142,56 +190,96 @@ int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
}
}
thr->mset.Del(s->GetId(), true);
+ if (flags()->detect_deadlocks && s->recursion == 0) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
+ }
+ u64 mid = s->GetId();
s->mtx.Unlock();
+ // Can't touch s after this point.
+ if (report_bad_unlock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
+ if (flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
return rec;
}
-void MutexReadLock(ThreadState *thr, uptr pc, uptr addr) {
- CHECK_GT(thr->in_rtl, 0);
+void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool trylock) {
DPrintf("#%d: MutexReadLock %zx\n", thr->tid, addr);
StatInc(thr, StatMutexReadLock);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, false);
+ SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, false);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
+ bool report_bad_lock = false;
if (s->owner_tid != SyncVar::kInvalidTid) {
- Printf("ThreadSanitizer WARNING: read lock of a write locked mutex %p\n",
- addr);
- PrintCurrentStack(thr, pc);
+ if (flags()->report_mutex_bugs && !s->is_broken) {
+ s->is_broken = true;
+ report_bad_lock = true;
+ }
}
AcquireImpl(thr, pc, &s->clock);
s->last_lock = thr->fast_state.raw();
thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
+ if (flags()->detect_deadlocks && s->recursion == 0) {
+ Callback cb(thr, pc);
+ if (!trylock)
+ ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
+ ctx->dd->MutexAfterLock(&cb, &s->dd, false, trylock);
+ }
+ u64 mid = s->GetId();
s->mtx.ReadUnlock();
+ // Can't touch s after this point.
+ if (report_bad_lock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
+ if (flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
}
void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
StatInc(thr, StatMutexReadUnlock);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
+ bool report_bad_unlock = false;
if (s->owner_tid != SyncVar::kInvalidTid) {
- Printf("ThreadSanitizer WARNING: read unlock of a write locked mutex %p\n",
- addr);
- PrintCurrentStack(thr, pc);
+ if (flags()->report_mutex_bugs && !s->is_broken) {
+ s->is_broken = true;
+ report_bad_unlock = true;
+ }
}
ReleaseImpl(thr, pc, &s->read_clock);
+ if (flags()->detect_deadlocks && s->recursion == 0) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
+ }
+ u64 mid = s->GetId();
s->mtx.Unlock();
- thr->mset.Del(s->GetId(), false);
+ // Can't touch s after this point.
+ thr->mset.Del(mid, false);
+ if (report_bad_unlock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid);
+ if (flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
}
void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
bool write = true;
+ bool report_bad_unlock = false;
if (s->owner_tid == SyncVar::kInvalidTid) {
// Seems to be read unlock.
write = false;
@@ -214,17 +302,25 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
}
} else if (!s->is_broken) {
s->is_broken = true;
- Printf("ThreadSanitizer WARNING: mutex %p is unlock by wrong thread\n",
- addr);
- PrintCurrentStack(thr, pc);
+ report_bad_unlock = true;
}
thr->mset.Del(s->GetId(), write);
+ if (flags()->detect_deadlocks && s->recursion == 0) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
+ }
+ u64 mid = s->GetId();
s->mtx.Unlock();
+ // Can't touch s after this point.
+ if (report_bad_unlock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
+ if (flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
}
void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
- Context *ctx = CTX();
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
s->owner_tid = SyncVar::kInvalidTid;
@@ -233,11 +329,10 @@ void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
}
void Acquire(ThreadState *thr, uptr pc, uptr addr) {
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, false);
+ SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, false);
AcquireImpl(thr, pc, &s->clock);
s->mtx.ReadUnlock();
}
@@ -255,17 +350,16 @@ void AcquireGlobal(ThreadState *thr, uptr pc) {
DPrintf("#%d: AcquireGlobal\n", thr->tid);
if (thr->ignore_sync)
return;
- ThreadRegistryLock l(CTX()->thread_registry);
- CTX()->thread_registry->RunCallbackForEachThreadLocked(
+ ThreadRegistryLock l(ctx->thread_registry);
+ ctx->thread_registry->RunCallbackForEachThreadLocked(
UpdateClockCallback, thr);
}
void Release(ThreadState *thr, uptr pc, uptr addr) {
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: Release %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
@@ -274,11 +368,10 @@ void Release(ThreadState *thr, uptr pc, uptr addr) {
}
void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
@@ -301,8 +394,8 @@ void AfterSleep(ThreadState *thr, uptr pc) {
if (thr->ignore_sync)
return;
thr->last_sleep_stack_id = CurrentStackId(thr, pc);
- ThreadRegistryLock l(CTX()->thread_registry);
- CTX()->thread_registry->RunCallbackForEachThreadLocked(
+ ThreadRegistryLock l(ctx->thread_registry);
+ ctx->thread_registry->RunCallbackForEachThreadLocked(
UpdateSleepClockCallback, thr);
}
#endif
@@ -310,7 +403,7 @@ void AfterSleep(ThreadState *thr, uptr pc) {
void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
if (thr->ignore_sync)
return;
- thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->clock.set(thr->fast_state.epoch());
thr->clock.acquire(c);
StatInc(thr, StatSyncAcquire);
}
@@ -318,7 +411,7 @@ void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
if (thr->ignore_sync)
return;
- thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->clock.set(thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
thr->clock.release(c);
StatInc(thr, StatSyncRelease);
@@ -327,7 +420,7 @@ void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
if (thr->ignore_sync)
return;
- thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->clock.set(thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
thr->clock.ReleaseStore(c);
StatInc(thr, StatSyncRelease);
@@ -336,11 +429,43 @@ void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
if (thr->ignore_sync)
return;
- thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->clock.set(thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
thr->clock.acq_rel(c);
StatInc(thr, StatSyncAcquire);
StatInc(thr, StatSyncRelease);
}
+void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
+ if (r == 0)
+ return;
+ ThreadRegistryLock l(ctx->thread_registry);
+ ScopedReport rep(ReportTypeDeadlock);
+ for (int i = 0; i < r->n; i++) {
+ rep.AddMutex(r->loop[i].mtx_ctx0);
+ rep.AddUniqueTid((int)r->loop[i].thr_ctx);
+ rep.AddThread((int)r->loop[i].thr_ctx);
+ }
+ StackTrace stacks[2 * DDReport::kMaxLoopSize];
+ uptr dummy_pc = 0x42;
+ for (int i = 0; i < r->n; i++) {
+ uptr size;
+ for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
+ u32 stk = r->loop[i].stk[j];
+ if (stk) {
+ const uptr *trace = StackDepotGet(stk, &size);
+ stacks[i].Init(const_cast<uptr *>(trace), size);
+ } else {
+ // Sometimes we fail to extract the stack trace (FIXME: investigate),
+ // but we should still produce some stack trace in the report.
+ stacks[i].Init(&dummy_pc, 1);
+ }
+ rep.AddStack(&stacks[i]);
+ }
+ }
+ // FIXME: use all stacks for suppressions, not just the second stack of the
+ // first edge.
+ OutputReport(ctx, rep, rep.GetReport()->stacks[0]);
+}
+
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_rtl_report.cc b/libsanitizer/tsan/tsan_rtl_report.cc
index f2248afeab23..d19deb066dff 100644
--- a/libsanitizer/tsan/tsan_rtl_report.cc
+++ b/libsanitizer/tsan/tsan_rtl_report.cc
@@ -32,7 +32,10 @@ static ReportStack *SymbolizeStack(const StackTrace& trace);
void TsanCheckFailed(const char *file, int line, const char *cond,
u64 v1, u64 v2) {
- ScopedInRtl in_rtl;
+ // There is high probability that interceptors will check-fail as well,
+ // on the other hand there is no sense in processing interceptors
+ // since we are going to die soon.
+ ScopedIgnoreInterceptors ignore;
Printf("FATAL: ThreadSanitizer CHECK failed: "
"%s:%d \"%s\" (0x%zx, 0x%zx)\n",
file, line, cond, (uptr)v1, (uptr)v2);
@@ -99,8 +102,9 @@ static void StackStripMain(ReportStack *stack) {
#endif
}
-#ifndef TSAN_GO
ReportStack *SymbolizeStackId(u32 stack_id) {
+ if (stack_id == 0)
+ return 0;
uptr ssz = 0;
const uptr *stack = StackDepotGet(stack_id, &ssz);
if (stack == 0)
@@ -109,7 +113,6 @@ ReportStack *SymbolizeStackId(u32 stack_id) {
trace.Init(stack, ssz);
return SymbolizeStack(trace);
}
-#endif
static ReportStack *SymbolizeStack(const StackTrace& trace) {
if (trace.IsEmpty())
@@ -143,18 +146,17 @@ static ReportStack *SymbolizeStack(const StackTrace& trace) {
}
ScopedReport::ScopedReport(ReportType typ) {
- ctx_ = CTX();
- ctx_->thread_registry->CheckLocked();
+ ctx->thread_registry->CheckLocked();
void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
rep_ = new(mem) ReportDesc;
rep_->typ = typ;
- ctx_->report_mtx.Lock();
+ ctx->report_mtx.Lock();
CommonSanitizerReportMutex.Lock();
}
ScopedReport::~ScopedReport() {
CommonSanitizerReportMutex.Unlock();
- ctx_->report_mtx.Unlock();
+ ctx->report_mtx.Unlock();
DestroyAndFree(rep_);
}
@@ -176,26 +178,16 @@ void ScopedReport::AddMemoryAccess(uptr addr, Shadow s,
mop->stack = SymbolizeStack(*stack);
for (uptr i = 0; i < mset->Size(); i++) {
MutexSet::Desc d = mset->Get(i);
- u64 uid = 0;
- uptr addr = SyncVar::SplitId(d.id, &uid);
- SyncVar *s = ctx_->synctab.GetIfExistsAndLock(addr, false);
- // Check that the mutex is still alive.
- // Another mutex can be created at the same address,
- // so check uid as well.
- if (s && s->CheckId(uid)) {
- ReportMopMutex mtx = {s->uid, d.write};
- mop->mset.PushBack(mtx);
- AddMutex(s);
- } else {
- ReportMopMutex mtx = {d.id, d.write};
- mop->mset.PushBack(mtx);
- AddMutex(d.id);
- }
- if (s)
- s->mtx.ReadUnlock();
+ u64 mid = this->AddMutex(d.id);
+ ReportMopMutex mtx = {mid, d.write};
+ mop->mset.PushBack(mtx);
}
}
+void ScopedReport::AddUniqueTid(int unique_tid) {
+ rep_->unique_tids.PushBack(unique_tid);
+}
+
void ScopedReport::AddThread(const ThreadContext *tctx) {
for (uptr i = 0; i < rep_->threads.Size(); i++) {
if ((u32)rep_->threads[i]->id == tctx->tid)
@@ -207,19 +199,14 @@ void ScopedReport::AddThread(const ThreadContext *tctx) {
rt->id = tctx->tid;
rt->pid = tctx->os_id;
rt->running = (tctx->status == ThreadStatusRunning);
- rt->name = tctx->name ? internal_strdup(tctx->name) : 0;
+ rt->name = internal_strdup(tctx->name);
rt->parent_tid = tctx->parent_tid;
rt->stack = 0;
-#ifdef TSAN_GO
- rt->stack = SymbolizeStack(tctx->creation_stack);
-#else
rt->stack = SymbolizeStackId(tctx->creation_stack_id);
-#endif
}
#ifndef TSAN_GO
static ThreadContext *FindThreadByUidLocked(int unique_id) {
- Context *ctx = CTX();
ctx->thread_registry->CheckLocked();
for (unsigned i = 0; i < kMaxTid; i++) {
ThreadContext *tctx = static_cast<ThreadContext*>(
@@ -232,7 +219,6 @@ static ThreadContext *FindThreadByUidLocked(int unique_id) {
}
static ThreadContext *FindThreadByTidLocked(int tid) {
- Context *ctx = CTX();
ctx->thread_registry->CheckLocked();
return static_cast<ThreadContext*>(
ctx->thread_registry->GetThreadLocked(tid));
@@ -250,7 +236,6 @@ static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
}
ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
- Context *ctx = CTX();
ctx->thread_registry->CheckLocked();
ThreadContext *tctx = static_cast<ThreadContext*>(
ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls,
@@ -264,6 +249,12 @@ ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
}
#endif
+void ScopedReport::AddThread(int unique_tid) {
+#ifndef TSAN_GO
+ AddThread(FindThreadByUidLocked(unique_tid));
+#endif
+}
+
void ScopedReport::AddMutex(const SyncVar *s) {
for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
if (rep_->mutexes[i]->id == s->uid)
@@ -273,14 +264,31 @@ void ScopedReport::AddMutex(const SyncVar *s) {
ReportMutex *rm = new(mem) ReportMutex();
rep_->mutexes.PushBack(rm);
rm->id = s->uid;
+ rm->addr = s->addr;
rm->destroyed = false;
- rm->stack = 0;
-#ifndef TSAN_GO
rm->stack = SymbolizeStackId(s->creation_stack_id);
-#endif
}
-void ScopedReport::AddMutex(u64 id) {
+u64 ScopedReport::AddMutex(u64 id) {
+ u64 uid = 0;
+ u64 mid = id;
+ uptr addr = SyncVar::SplitId(id, &uid);
+ SyncVar *s = ctx->synctab.GetIfExistsAndLock(addr, false);
+ // Check that the mutex is still alive.
+ // Another mutex can be created at the same address,
+ // so check uid as well.
+ if (s && s->CheckId(uid)) {
+ mid = s->uid;
+ AddMutex(s);
+ } else {
+ AddDeadMutex(id);
+ }
+ if (s)
+ s->mtx.ReadUnlock();
+ return mid;
+}
+
+void ScopedReport::AddDeadMutex(u64 id) {
for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
if (rep_->mutexes[i]->id == id)
return;
@@ -289,6 +297,7 @@ void ScopedReport::AddMutex(u64 id) {
ReportMutex *rm = new(mem) ReportMutex();
rep_->mutexes.PushBack(rm);
rm->id = id;
+ rm->addr = 0;
rm->destroyed = true;
rm->stack = 0;
}
@@ -369,7 +378,6 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) {
// This function restores stack trace and mutex set for the thread/epoch.
// It does so by getting stack trace and mutex set at the beginning of
// trace part, and then replaying the trace till the given epoch.
- Context *ctx = CTX();
ctx->thread_registry->CheckLocked();
ThreadContext *tctx = static_cast<ThreadContext*>(
ctx->thread_registry->GetThreadLocked(tid));
@@ -434,7 +442,6 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) {
static bool HandleRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
uptr addr_min, uptr addr_max) {
- Context *ctx = CTX();
bool equal_stack = false;
RacyStacks hash;
if (flags()->suppress_equal_stacks) {
@@ -474,7 +481,6 @@ static bool HandleRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
static void AddRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
uptr addr_min, uptr addr_max) {
- Context *ctx = CTX();
if (flags()->suppress_equal_stacks) {
RacyStacks hash;
hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr));
@@ -579,7 +585,7 @@ static bool IsJavaNonsense(const ReportDesc *rep) {
&& frame->module == 0)) {
if (frame) {
FiredSuppression supp = {rep->typ, frame->pc, 0};
- CTX()->fired_suppressions.push_back(supp);
+ ctx->fired_suppressions.push_back(supp);
}
return true;
}
@@ -603,10 +609,12 @@ static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
}
void ReportRace(ThreadState *thr) {
+ // Symbolizer makes lots of intercepted calls. If we try to process them,
+ // at best it will cause deadlocks on internal mutexes.
+ ScopedIgnoreInterceptors ignore;
+
if (!flags()->report_bugs)
return;
- ScopedInRtl in_rtl;
-
if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
return;
@@ -631,7 +639,6 @@ void ReportRace(ThreadState *thr) {
return;
}
- Context *ctx = CTX();
ThreadRegistryLock l0(ctx->thread_registry);
ReportType typ = ReportTypeRace;
@@ -706,8 +713,8 @@ void PrintCurrentStackSlow() {
#ifndef TSAN_GO
__sanitizer::StackTrace *ptrace = new(internal_alloc(MBlockStackTrace,
sizeof(__sanitizer::StackTrace))) __sanitizer::StackTrace;
- ptrace->Unwind(kStackTraceMax, __sanitizer::StackTrace::GetCurrentPc(),
- 0, 0, 0, false);
+ ptrace->Unwind(kStackTraceMax, __sanitizer::StackTrace::GetCurrentPc(), 0, 0,
+ 0, 0, false);
for (uptr i = 0; i < ptrace->size / 2; i++) {
uptr tmp = ptrace->trace[i];
ptrace->trace[i] = ptrace->trace[ptrace->size - i - 1];
diff --git a/libsanitizer/tsan/tsan_rtl_thread.cc b/libsanitizer/tsan/tsan_rtl_thread.cc
index dea669835359..385af7e1fa67 100644
--- a/libsanitizer/tsan/tsan_rtl_thread.cc
+++ b/libsanitizer/tsan/tsan_rtl_thread.cc
@@ -57,11 +57,7 @@ void ThreadContext::OnCreated(void *arg) {
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(args->thr, args->thr->fast_state, EventTypeMop, 0);
ReleaseImpl(args->thr, 0, &sync);
-#ifdef TSAN_GO
- creation_stack.ObtainCurrent(args->thr, args->pc);
-#else
creation_stack_id = CurrentStackId(args->thr, args->pc);
-#endif
if (reuse_count == 0)
StatInc(args->thr, StatThreadMaxTid);
}
@@ -87,8 +83,8 @@ void ThreadContext::OnStarted(void *arg) {
// from different threads.
epoch0 = RoundUp(epoch1 + 1, kTracePartSize);
epoch1 = (u64)-1;
- new(thr) ThreadState(CTX(), tid, unique_id,
- epoch0, args->stk_addr, args->stk_size, args->tls_addr, args->tls_size);
+ new(thr) ThreadState(ctx, tid, unique_id, epoch0, reuse_count,
+ args->stk_addr, args->stk_size, args->tls_addr, args->tls_size);
#ifndef TSAN_GO
thr->shadow_stack = &ThreadTrace(thr->tid)->shadow_stack[0];
thr->shadow_stack_pos = thr->shadow_stack;
@@ -104,6 +100,10 @@ void ThreadContext::OnStarted(void *arg) {
#ifndef TSAN_GO
AllocatorThreadStart(thr);
#endif
+ if (flags()->detect_deadlocks) {
+ thr->dd_pt = ctx->dd->CreatePhysicalThread();
+ thr->dd_lt = ctx->dd->CreateLogicalThread(unique_id);
+ }
thr->fast_synch_epoch = epoch0;
AcquireImpl(thr, 0, &sync);
thr->fast_state.SetHistorySize(flags()->history_size);
@@ -128,11 +128,15 @@ void ThreadContext::OnFinished() {
}
epoch1 = thr->fast_state.epoch();
+ if (flags()->detect_deadlocks) {
+ ctx->dd->DestroyPhysicalThread(thr->dd_pt);
+ ctx->dd->DestroyLogicalThread(thr->dd_lt);
+ }
#ifndef TSAN_GO
AllocatorThreadFinish(thr);
#endif
thr->~ThreadState();
- StatAggregate(CTX()->stat, thr->stat);
+ StatAggregate(ctx->stat, thr->stat);
thr = 0;
}
@@ -177,6 +181,8 @@ static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) {
}
static void ThreadCheckIgnore(ThreadState *thr) {
+ if (ctx->after_multithreaded_fork)
+ return;
if (thr->ignore_reads_and_writes)
ReportIgnoresEnabled(thr->tctx, &thr->mop_ignore_set);
if (thr->ignore_sync)
@@ -187,36 +193,31 @@ static void ThreadCheckIgnore(ThreadState *thr) {}
#endif
void ThreadFinalize(ThreadState *thr) {
- CHECK_GT(thr->in_rtl, 0);
ThreadCheckIgnore(thr);
#ifndef TSAN_GO
if (!flags()->report_thread_leaks)
return;
- ThreadRegistryLock l(CTX()->thread_registry);
+ ThreadRegistryLock l(ctx->thread_registry);
Vector<ThreadLeak> leaks(MBlockScopedBuf);
- CTX()->thread_registry->RunCallbackForEachThreadLocked(
+ ctx->thread_registry->RunCallbackForEachThreadLocked(
MaybeReportThreadLeak, &leaks);
for (uptr i = 0; i < leaks.Size(); i++) {
ScopedReport rep(ReportTypeThreadLeak);
rep.AddThread(leaks[i].tctx);
rep.SetCount(leaks[i].count);
- OutputReport(CTX(), rep);
+ OutputReport(ctx, rep, rep.GetReport()->threads[0]->stack);
}
#endif
}
int ThreadCount(ThreadState *thr) {
- CHECK_GT(thr->in_rtl, 0);
- Context *ctx = CTX();
uptr result;
ctx->thread_registry->GetNumberOfThreads(0, 0, &result);
return (int)result;
}
int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
- CHECK_GT(thr->in_rtl, 0);
StatInc(thr, StatThreadCreate);
- Context *ctx = CTX();
OnCreatedArgs args = { thr, pc };
int tid = ctx->thread_registry->CreateThread(uid, detached, thr->tid, &args);
DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", thr->tid, tid, uid);
@@ -225,8 +226,6 @@ int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
}
void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
- Context *ctx = CTX();
- CHECK_GT(thr->in_rtl, 0);
uptr stk_addr = 0;
uptr stk_size = 0;
uptr tls_addr = 0;
@@ -259,10 +258,17 @@ void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
tr->Lock();
thr->tctx = (ThreadContext*)tr->GetThreadLocked(tid);
tr->Unlock();
+
+#ifndef TSAN_GO
+ if (ctx->after_multithreaded_fork) {
+ thr->ignore_interceptors++;
+ ThreadIgnoreBegin(thr, 0);
+ ThreadIgnoreSyncBegin(thr, 0);
+ }
+#endif
}
void ThreadFinish(ThreadState *thr) {
- CHECK_GT(thr->in_rtl, 0);
ThreadCheckIgnore(thr);
StatInc(thr, StatThreadFinish);
if (thr->stk_addr && thr->stk_size)
@@ -270,7 +276,6 @@ void ThreadFinish(ThreadState *thr) {
if (thr->tls_addr && thr->tls_size)
DontNeedShadowFor(thr->tls_addr, thr->tls_size);
thr->is_alive = false;
- Context *ctx = CTX();
ctx->thread_registry->FinishThread(thr->tid);
}
@@ -284,33 +289,26 @@ static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) {
}
int ThreadTid(ThreadState *thr, uptr pc, uptr uid) {
- CHECK_GT(thr->in_rtl, 0);
- Context *ctx = CTX();
int res = ctx->thread_registry->FindThread(FindThreadByUid, (void*)uid);
DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, res);
return res;
}
void ThreadJoin(ThreadState *thr, uptr pc, int tid) {
- CHECK_GT(thr->in_rtl, 0);
CHECK_GT(tid, 0);
CHECK_LT(tid, kMaxTid);
DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid);
- Context *ctx = CTX();
ctx->thread_registry->JoinThread(tid, thr);
}
void ThreadDetach(ThreadState *thr, uptr pc, int tid) {
- CHECK_GT(thr->in_rtl, 0);
CHECK_GT(tid, 0);
CHECK_LT(tid, kMaxTid);
- Context *ctx = CTX();
ctx->thread_registry->DetachThread(tid);
}
void ThreadSetName(ThreadState *thr, const char *name) {
- CHECK_GT(thr->in_rtl, 0);
- CTX()->thread_registry->SetThreadName(thr->tid, name);
+ ctx->thread_registry->SetThreadName(thr->tid, name);
}
void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
diff --git a/libsanitizer/tsan/tsan_stat.cc b/libsanitizer/tsan/tsan_stat.cc
index 6bc345397adb..e8d3a790b1c7 100644
--- a/libsanitizer/tsan/tsan_stat.cc
+++ b/libsanitizer/tsan/tsan_stat.cc
@@ -72,6 +72,28 @@ void StatOutput(u64 *stat) {
name[StatSyncAcquire] = " acquired ";
name[StatSyncRelease] = " released ";
+ name[StatClockAcquire] = "Clock acquire ";
+ name[StatClockAcquireEmpty] = " empty clock ";
+ name[StatClockAcquireFastRelease] = " fast from release-store ";
+ name[StatClockAcquireLarge] = " contains my tid ";
+ name[StatClockAcquireRepeat] = " repeated (fast) ";
+ name[StatClockAcquireFull] = " full (slow) ";
+ name[StatClockAcquiredSomething] = " acquired something ";
+ name[StatClockRelease] = "Clock release ";
+ name[StatClockReleaseResize] = " resize ";
+ name[StatClockReleaseFast1] = " fast1 ";
+ name[StatClockReleaseFast2] = " fast2 ";
+ name[StatClockReleaseSlow] = " dirty overflow (slow) ";
+ name[StatClockReleaseFull] = " full (slow) ";
+ name[StatClockReleaseAcquired] = " was acquired ";
+ name[StatClockReleaseClearTail] = " clear tail ";
+ name[StatClockStore] = "Clock release store ";
+ name[StatClockStoreResize] = " resize ";
+ name[StatClockStoreFast] = " fast ";
+ name[StatClockStoreFull] = " slow ";
+ name[StatClockStoreTail] = " clear tail ";
+ name[StatClockAcquireRelease] = "Clock acquire-release ";
+
name[StatAtomic] = "Atomic operations ";
name[StatAtomicLoad] = " Including load ";
name[StatAtomicStore] = " store ";
@@ -96,338 +118,6 @@ void StatOutput(u64 *stat) {
name[StatAtomic8] = " size 8 ";
name[StatAtomic16] = " size 16 ";
- name[StatInterceptor] = "Interceptors ";
- name[StatInt_longjmp] = " longjmp ";
- name[StatInt_siglongjmp] = " siglongjmp ";
- name[StatInt_malloc] = " malloc ";
- name[StatInt___libc_memalign] = " __libc_memalign ";
- name[StatInt_calloc] = " calloc ";
- name[StatInt_realloc] = " realloc ";
- name[StatInt_free] = " free ";
- name[StatInt_cfree] = " cfree ";
- name[StatInt_malloc_usable_size] = " malloc_usable_size ";
- name[StatInt_mmap] = " mmap ";
- name[StatInt_mmap64] = " mmap64 ";
- name[StatInt_munmap] = " munmap ";
- name[StatInt_memalign] = " memalign ";
- name[StatInt_valloc] = " valloc ";
- name[StatInt_pvalloc] = " pvalloc ";
- name[StatInt_posix_memalign] = " posix_memalign ";
- name[StatInt__Znwm] = " _Znwm ";
- name[StatInt__ZnwmRKSt9nothrow_t] = " _ZnwmRKSt9nothrow_t ";
- name[StatInt__Znam] = " _Znam ";
- name[StatInt__ZnamRKSt9nothrow_t] = " _ZnamRKSt9nothrow_t ";
- name[StatInt__ZdlPv] = " _ZdlPv ";
- name[StatInt__ZdlPvRKSt9nothrow_t] = " _ZdlPvRKSt9nothrow_t ";
- name[StatInt__ZdaPv] = " _ZdaPv ";
- name[StatInt__ZdaPvRKSt9nothrow_t] = " _ZdaPvRKSt9nothrow_t ";
- name[StatInt_strlen] = " strlen ";
- name[StatInt_memset] = " memset ";
- name[StatInt_memcpy] = " memcpy ";
- name[StatInt_textdomain] = " textdomain ";
- name[StatInt_strcmp] = " strcmp ";
- name[StatInt_memchr] = " memchr ";
- name[StatInt_memrchr] = " memrchr ";
- name[StatInt_memmove] = " memmove ";
- name[StatInt_memcmp] = " memcmp ";
- name[StatInt_strchr] = " strchr ";
- name[StatInt_strchrnul] = " strchrnul ";
- name[StatInt_strrchr] = " strrchr ";
- name[StatInt_strncmp] = " strncmp ";
- name[StatInt_strcpy] = " strcpy ";
- name[StatInt_strncpy] = " strncpy ";
- name[StatInt_strstr] = " strstr ";
- name[StatInt_strdup] = " strdup ";
- name[StatInt_strcasecmp] = " strcasecmp ";
- name[StatInt_strncasecmp] = " strncasecmp ";
- name[StatInt_atexit] = " atexit ";
- name[StatInt__exit] = " _exit ";
- name[StatInt___cxa_guard_acquire] = " __cxa_guard_acquire ";
- name[StatInt___cxa_guard_release] = " __cxa_guard_release ";
- name[StatInt___cxa_guard_abort] = " __cxa_guard_abort ";
- name[StatInt_pthread_create] = " pthread_create ";
- name[StatInt_pthread_join] = " pthread_join ";
- name[StatInt_pthread_detach] = " pthread_detach ";
- name[StatInt_pthread_mutex_init] = " pthread_mutex_init ";
- name[StatInt_pthread_mutex_destroy] = " pthread_mutex_destroy ";
- name[StatInt_pthread_mutex_lock] = " pthread_mutex_lock ";
- name[StatInt_pthread_mutex_trylock] = " pthread_mutex_trylock ";
- name[StatInt_pthread_mutex_timedlock] = " pthread_mutex_timedlock ";
- name[StatInt_pthread_mutex_unlock] = " pthread_mutex_unlock ";
- name[StatInt_pthread_spin_init] = " pthread_spin_init ";
- name[StatInt_pthread_spin_destroy] = " pthread_spin_destroy ";
- name[StatInt_pthread_spin_lock] = " pthread_spin_lock ";
- name[StatInt_pthread_spin_trylock] = " pthread_spin_trylock ";
- name[StatInt_pthread_spin_unlock] = " pthread_spin_unlock ";
- name[StatInt_pthread_rwlock_init] = " pthread_rwlock_init ";
- name[StatInt_pthread_rwlock_destroy] = " pthread_rwlock_destroy ";
- name[StatInt_pthread_rwlock_rdlock] = " pthread_rwlock_rdlock ";
- name[StatInt_pthread_rwlock_tryrdlock] = " pthread_rwlock_tryrdlock ";
- name[StatInt_pthread_rwlock_timedrdlock]
- = " pthread_rwlock_timedrdlock ";
- name[StatInt_pthread_rwlock_wrlock] = " pthread_rwlock_wrlock ";
- name[StatInt_pthread_rwlock_trywrlock] = " pthread_rwlock_trywrlock ";
- name[StatInt_pthread_rwlock_timedwrlock]
- = " pthread_rwlock_timedwrlock ";
- name[StatInt_pthread_rwlock_unlock] = " pthread_rwlock_unlock ";
- name[StatInt_pthread_cond_init] = " pthread_cond_init ";
- name[StatInt_pthread_cond_destroy] = " pthread_cond_destroy ";
- name[StatInt_pthread_cond_signal] = " pthread_cond_signal ";
- name[StatInt_pthread_cond_broadcast] = " pthread_cond_broadcast ";
- name[StatInt_pthread_cond_wait] = " pthread_cond_wait ";
- name[StatInt_pthread_cond_timedwait] = " pthread_cond_timedwait ";
- name[StatInt_pthread_barrier_init] = " pthread_barrier_init ";
- name[StatInt_pthread_barrier_destroy] = " pthread_barrier_destroy ";
- name[StatInt_pthread_barrier_wait] = " pthread_barrier_wait ";
- name[StatInt_pthread_once] = " pthread_once ";
- name[StatInt_pthread_getschedparam] = " pthread_getschedparam ";
- name[StatInt_pthread_setname_np] = " pthread_setname_np ";
- name[StatInt_sem_init] = " sem_init ";
- name[StatInt_sem_destroy] = " sem_destroy ";
- name[StatInt_sem_wait] = " sem_wait ";
- name[StatInt_sem_trywait] = " sem_trywait ";
- name[StatInt_sem_timedwait] = " sem_timedwait ";
- name[StatInt_sem_post] = " sem_post ";
- name[StatInt_sem_getvalue] = " sem_getvalue ";
- name[StatInt_stat] = " stat ";
- name[StatInt___xstat] = " __xstat ";
- name[StatInt_stat64] = " stat64 ";
- name[StatInt___xstat64] = " __xstat64 ";
- name[StatInt_lstat] = " lstat ";
- name[StatInt___lxstat] = " __lxstat ";
- name[StatInt_lstat64] = " lstat64 ";
- name[StatInt___lxstat64] = " __lxstat64 ";
- name[StatInt_fstat] = " fstat ";
- name[StatInt___fxstat] = " __fxstat ";
- name[StatInt_fstat64] = " fstat64 ";
- name[StatInt___fxstat64] = " __fxstat64 ";
- name[StatInt_open] = " open ";
- name[StatInt_open64] = " open64 ";
- name[StatInt_creat] = " creat ";
- name[StatInt_creat64] = " creat64 ";
- name[StatInt_dup] = " dup ";
- name[StatInt_dup2] = " dup2 ";
- name[StatInt_dup3] = " dup3 ";
- name[StatInt_eventfd] = " eventfd ";
- name[StatInt_signalfd] = " signalfd ";
- name[StatInt_inotify_init] = " inotify_init ";
- name[StatInt_inotify_init1] = " inotify_init1 ";
- name[StatInt_socket] = " socket ";
- name[StatInt_socketpair] = " socketpair ";
- name[StatInt_connect] = " connect ";
- name[StatInt_bind] = " bind ";
- name[StatInt_listen] = " listen ";
- name[StatInt_accept] = " accept ";
- name[StatInt_accept4] = " accept4 ";
- name[StatInt_epoll_create] = " epoll_create ";
- name[StatInt_epoll_create1] = " epoll_create1 ";
- name[StatInt_close] = " close ";
- name[StatInt___close] = " __close ";
- name[StatInt___res_iclose] = " __res_iclose ";
- name[StatInt_pipe] = " pipe ";
- name[StatInt_pipe2] = " pipe2 ";
- name[StatInt_read] = " read ";
- name[StatInt_prctl] = " prctl ";
- name[StatInt_pread] = " pread ";
- name[StatInt_pread64] = " pread64 ";
- name[StatInt_readv] = " readv ";
- name[StatInt_preadv] = " preadv ";
- name[StatInt_preadv64] = " preadv64 ";
- name[StatInt_write] = " write ";
- name[StatInt_pwrite] = " pwrite ";
- name[StatInt_pwrite64] = " pwrite64 ";
- name[StatInt_writev] = " writev ";
- name[StatInt_pwritev] = " pwritev ";
- name[StatInt_pwritev64] = " pwritev64 ";
- name[StatInt_send] = " send ";
- name[StatInt_sendmsg] = " sendmsg ";
- name[StatInt_recv] = " recv ";
- name[StatInt_recvmsg] = " recvmsg ";
- name[StatInt_unlink] = " unlink ";
- name[StatInt_fopen] = " fopen ";
- name[StatInt_freopen] = " freopen ";
- name[StatInt_fclose] = " fclose ";
- name[StatInt_fread] = " fread ";
- name[StatInt_fwrite] = " fwrite ";
- name[StatInt_fflush] = " fflush ";
- name[StatInt_abort] = " abort ";
- name[StatInt_puts] = " puts ";
- name[StatInt_rmdir] = " rmdir ";
- name[StatInt_opendir] = " opendir ";
- name[StatInt_epoll_ctl] = " epoll_ctl ";
- name[StatInt_epoll_wait] = " epoll_wait ";
- name[StatInt_poll] = " poll ";
- name[StatInt_ppoll] = " ppoll ";
- name[StatInt_sigaction] = " sigaction ";
- name[StatInt_signal] = " signal ";
- name[StatInt_sigsuspend] = " sigsuspend ";
- name[StatInt_raise] = " raise ";
- name[StatInt_kill] = " kill ";
- name[StatInt_pthread_kill] = " pthread_kill ";
- name[StatInt_sleep] = " sleep ";
- name[StatInt_usleep] = " usleep ";
- name[StatInt_nanosleep] = " nanosleep ";
- name[StatInt_gettimeofday] = " gettimeofday ";
- name[StatInt_fork] = " fork ";
- name[StatInt_vscanf] = " vscanf ";
- name[StatInt_vsscanf] = " vsscanf ";
- name[StatInt_vfscanf] = " vfscanf ";
- name[StatInt_scanf] = " scanf ";
- name[StatInt_sscanf] = " sscanf ";
- name[StatInt_fscanf] = " fscanf ";
- name[StatInt___isoc99_vscanf] = " vscanf ";
- name[StatInt___isoc99_vsscanf] = " vsscanf ";
- name[StatInt___isoc99_vfscanf] = " vfscanf ";
- name[StatInt___isoc99_scanf] = " scanf ";
- name[StatInt___isoc99_sscanf] = " sscanf ";
- name[StatInt___isoc99_fscanf] = " fscanf ";
- name[StatInt_on_exit] = " on_exit ";
- name[StatInt___cxa_atexit] = " __cxa_atexit ";
- name[StatInt_localtime] = " localtime ";
- name[StatInt_localtime_r] = " localtime_r ";
- name[StatInt_gmtime] = " gmtime ";
- name[StatInt_gmtime_r] = " gmtime_r ";
- name[StatInt_ctime] = " ctime ";
- name[StatInt_ctime_r] = " ctime_r ";
- name[StatInt_asctime] = " asctime ";
- name[StatInt_asctime_r] = " asctime_r ";
- name[StatInt_strptime] = " strptime ";
- name[StatInt_frexp] = " frexp ";
- name[StatInt_frexpf] = " frexpf ";
- name[StatInt_frexpl] = " frexpl ";
- name[StatInt_getpwnam] = " getpwnam ";
- name[StatInt_getpwuid] = " getpwuid ";
- name[StatInt_getgrnam] = " getgrnam ";
- name[StatInt_getgrgid] = " getgrgid ";
- name[StatInt_getpwnam_r] = " getpwnam_r ";
- name[StatInt_getpwuid_r] = " getpwuid_r ";
- name[StatInt_getgrnam_r] = " getgrnam_r ";
- name[StatInt_getgrgid_r] = " getgrgid_r ";
- name[StatInt_clock_getres] = " clock_getres ";
- name[StatInt_clock_gettime] = " clock_gettime ";
- name[StatInt_clock_settime] = " clock_settime ";
- name[StatInt_getitimer] = " getitimer ";
- name[StatInt_setitimer] = " setitimer ";
- name[StatInt_time] = " time ";
- name[StatInt_glob] = " glob ";
- name[StatInt_glob64] = " glob64 ";
- name[StatInt_wait] = " wait ";
- name[StatInt_waitid] = " waitid ";
- name[StatInt_waitpid] = " waitpid ";
- name[StatInt_wait3] = " wait3 ";
- name[StatInt_wait4] = " wait4 ";
- name[StatInt_inet_ntop] = " inet_ntop ";
- name[StatInt_inet_pton] = " inet_pton ";
- name[StatInt_inet_aton] = " inet_aton ";
- name[StatInt_getaddrinfo] = " getaddrinfo ";
- name[StatInt_getnameinfo] = " getnameinfo ";
- name[StatInt_getsockname] = " getsockname ";
- name[StatInt_gethostent] = " gethostent ";
- name[StatInt_gethostbyname] = " gethostbyname ";
- name[StatInt_gethostbyname2] = " gethostbyname2 ";
- name[StatInt_gethostbyaddr] = " gethostbyaddr ";
- name[StatInt_gethostent_r] = " gethostent_r ";
- name[StatInt_gethostbyname_r] = " gethostbyname_r ";
- name[StatInt_gethostbyname2_r] = " gethostbyname2_r ";
- name[StatInt_gethostbyaddr_r] = " gethostbyaddr_r ";
- name[StatInt_getsockopt] = " getsockopt ";
- name[StatInt_modf] = " modf ";
- name[StatInt_modff] = " modff ";
- name[StatInt_modfl] = " modfl ";
- name[StatInt_getpeername] = " getpeername ";
- name[StatInt_ioctl] = " ioctl ";
- name[StatInt_sysinfo] = " sysinfo ";
- name[StatInt_readdir] = " readdir ";
- name[StatInt_readdir64] = " readdir64 ";
- name[StatInt_readdir_r] = " readdir_r ";
- name[StatInt_readdir64_r] = " readdir64_r ";
- name[StatInt_ptrace] = " ptrace ";
- name[StatInt_setlocale] = " setlocale ";
- name[StatInt_getcwd] = " getcwd ";
- name[StatInt_get_current_dir_name] = " get_current_dir_name ";
- name[StatInt_strtoimax] = " strtoimax ";
- name[StatInt_strtoumax] = " strtoumax ";
- name[StatInt_mbstowcs] = " mbstowcs ";
- name[StatInt_mbsrtowcs] = " mbsrtowcs ";
- name[StatInt_mbsnrtowcs] = " mbsnrtowcs ";
- name[StatInt_wcstombs] = " wcstombs ";
- name[StatInt_wcsrtombs] = " wcsrtombs ";
- name[StatInt_wcsnrtombs] = " wcsnrtombs ";
- name[StatInt_tcgetattr] = " tcgetattr ";
- name[StatInt_realpath] = " realpath ";
- name[StatInt_canonicalize_file_name] = " canonicalize_file_name ";
- name[StatInt_confstr] = " confstr ";
- name[StatInt_sched_getaffinity] = " sched_getaffinity ";
- name[StatInt_strerror] = " strerror ";
- name[StatInt_strerror_r] = " strerror_r ";
- name[StatInt___xpg_strerror_r] = " __xpg_strerror_r ";
- name[StatInt_scandir] = " scandir ";
- name[StatInt_scandir64] = " scandir64 ";
- name[StatInt_getgroups] = " getgroups ";
- name[StatInt_wordexp] = " wordexp ";
- name[StatInt_sigwait] = " sigwait ";
- name[StatInt_sigwaitinfo] = " sigwaitinfo ";
- name[StatInt_sigtimedwait] = " sigtimedwait ";
- name[StatInt_sigemptyset] = " sigemptyset ";
- name[StatInt_sigfillset] = " sigfillset ";
- name[StatInt_sigpending] = " sigpending ";
- name[StatInt_sigprocmask] = " sigprocmask ";
- name[StatInt_backtrace] = " backtrace ";
- name[StatInt_backtrace_symbols] = " backtrace_symbols ";
- name[StatInt_dlopen] = " dlopen ";
- name[StatInt_dlclose] = " dlclose ";
- name[StatInt_getmntent] = " getmntent ";
- name[StatInt_getmntent_r] = " getmntent_r ";
- name[StatInt_statfs] = " statfs ";
- name[StatInt_statfs64] = " statfs64 ";
- name[StatInt_fstatfs] = " fstatfs ";
- name[StatInt_fstatfs64] = " fstatfs64 ";
- name[StatInt_statvfs] = " statvfs ";
- name[StatInt_statvfs64] = " statvfs64 ";
- name[StatInt_fstatvfs] = " fstatvfs ";
- name[StatInt_fstatvfs64] = " fstatvfs64 ";
- name[StatInt_initgroups] = " initgroups ";
- name[StatInt_ether_ntoa] = " ether_ntoa ";
- name[StatInt_ether_aton] = " ether_aton ";
- name[StatInt_ether_ntoa_r] = " ether_ntoa_r ";
- name[StatInt_ether_aton_r] = " ether_aton_r ";
- name[StatInt_ether_ntohost] = " ether_ntohost ";
- name[StatInt_ether_hostton] = " ether_hostton ";
- name[StatInt_ether_line] = " ether_line ";
- name[StatInt_shmctl] = " shmctl ";
- name[StatInt_random_r] = " random_r ";
- name[StatInt_tmpnam] = " tmpnam ";
- name[StatInt_tmpnam_r] = " tmpnam_r ";
- name[StatInt_tempnam] = " tempnam ";
- name[StatInt_sincos] = " sincos ";
- name[StatInt_sincosf] = " sincosf ";
- name[StatInt_sincosl] = " sincosl ";
- name[StatInt_remquo] = " remquo ";
- name[StatInt_remquof] = " remquof ";
- name[StatInt_remquol] = " remquol ";
- name[StatInt_lgamma] = " lgamma ";
- name[StatInt_lgammaf] = " lgammaf ";
- name[StatInt_lgammal] = " lgammal ";
- name[StatInt_lgamma_r] = " lgamma_r ";
- name[StatInt_lgammaf_r] = " lgammaf_r ";
- name[StatInt_lgammal_r] = " lgammal_r ";
- name[StatInt_drand48_r] = " drand48_r ";
- name[StatInt_lrand48_r] = " lrand48_r ";
- name[StatInt_getline] = " getline ";
- name[StatInt_getdelim] = " getdelim ";
- name[StatInt_iconv] = " iconv ";
- name[StatInt_times] = " times ";
-
- name[StatInt_pthread_attr_getdetachstate] = " pthread_addr_getdetachstate "; // NOLINT
- name[StatInt_pthread_attr_getguardsize] = " pthread_addr_getguardsize "; // NOLINT
- name[StatInt_pthread_attr_getschedparam] = " pthread_addr_getschedparam "; // NOLINT
- name[StatInt_pthread_attr_getschedpolicy] = " pthread_addr_getschedpolicy "; // NOLINT
- name[StatInt_pthread_attr_getinheritsched] = " pthread_addr_getinheritsched "; // NOLINT
- name[StatInt_pthread_attr_getscope] = " pthread_addr_getscope "; // NOLINT
- name[StatInt_pthread_attr_getstacksize] = " pthread_addr_getstacksize "; // NOLINT
- name[StatInt_pthread_attr_getstack] = " pthread_addr_getstack "; // NOLINT
- name[StatInt_pthread_attr_getaffinity_np] = " pthread_addr_getaffinity_np "; // NOLINT
-
name[StatAnnotation] = "Dynamic annotations ";
name[StatAnnotateHappensBefore] = " HappensBefore ";
name[StatAnnotateHappensAfter] = " HappensAfter ";
@@ -475,11 +165,12 @@ void StatOutput(u64 *stat) {
name[StatMtxAnnotations] = " Annotations ";
name[StatMtxMBlock] = " MBlock ";
name[StatMtxJavaMBlock] = " JavaMBlock ";
+ name[StatMtxDeadlockDetector] = " DeadlockDetector ";
name[StatMtxFD] = " FD ";
Printf("Statistics:\n");
for (int i = 0; i < StatCnt; i++)
- Printf("%s: %zu\n", name[i], (uptr)stat[i]);
+ Printf("%s: %16zu\n", name[i], (uptr)stat[i]);
}
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_stat.h b/libsanitizer/tsan/tsan_stat.h
index 3e08313d1a5f..5bdd9de82132 100644
--- a/libsanitizer/tsan/tsan_stat.h
+++ b/libsanitizer/tsan/tsan_stat.h
@@ -67,6 +67,32 @@ enum StatType {
StatSyncAcquire,
StatSyncRelease,
+ // Clocks - acquire.
+ StatClockAcquire,
+ StatClockAcquireEmpty,
+ StatClockAcquireFastRelease,
+ StatClockAcquireLarge,
+ StatClockAcquireRepeat,
+ StatClockAcquireFull,
+ StatClockAcquiredSomething,
+ // Clocks - release.
+ StatClockRelease,
+ StatClockReleaseResize,
+ StatClockReleaseFast1,
+ StatClockReleaseFast2,
+ StatClockReleaseSlow,
+ StatClockReleaseFull,
+ StatClockReleaseAcquired,
+ StatClockReleaseClearTail,
+ // Clocks - release store.
+ StatClockStore,
+ StatClockStoreResize,
+ StatClockStoreFast,
+ StatClockStoreFull,
+ StatClockStoreTail,
+ // Clocks - acquire-release.
+ StatClockAcquireRelease,
+
// Atomics.
StatAtomic,
StatAtomicLoad,
@@ -92,337 +118,6 @@ enum StatType {
StatAtomic8,
StatAtomic16,
- // Interceptors.
- StatInterceptor,
- StatInt_longjmp,
- StatInt_siglongjmp,
- StatInt_malloc,
- StatInt___libc_memalign,
- StatInt_calloc,
- StatInt_realloc,
- StatInt_free,
- StatInt_cfree,
- StatInt_malloc_usable_size,
- StatInt_mmap,
- StatInt_mmap64,
- StatInt_munmap,
- StatInt_memalign,
- StatInt_valloc,
- StatInt_pvalloc,
- StatInt_posix_memalign,
- StatInt__Znwm,
- StatInt__ZnwmRKSt9nothrow_t,
- StatInt__Znam,
- StatInt__ZnamRKSt9nothrow_t,
- StatInt__ZdlPv,
- StatInt__ZdlPvRKSt9nothrow_t,
- StatInt__ZdaPv,
- StatInt__ZdaPvRKSt9nothrow_t,
- StatInt_strlen,
- StatInt_memset,
- StatInt_memcpy,
- StatInt_textdomain,
- StatInt_strcmp,
- StatInt_memchr,
- StatInt_memrchr,
- StatInt_memmove,
- StatInt_memcmp,
- StatInt_strchr,
- StatInt_strchrnul,
- StatInt_strrchr,
- StatInt_strncmp,
- StatInt_strcpy,
- StatInt_strncpy,
- StatInt_strcasecmp,
- StatInt_strncasecmp,
- StatInt_strstr,
- StatInt_strdup,
- StatInt_atexit,
- StatInt__exit,
- StatInt___cxa_guard_acquire,
- StatInt___cxa_guard_release,
- StatInt___cxa_guard_abort,
- StatInt_pthread_create,
- StatInt_pthread_join,
- StatInt_pthread_detach,
- StatInt_pthread_mutex_init,
- StatInt_pthread_mutex_destroy,
- StatInt_pthread_mutex_lock,
- StatInt_pthread_mutex_trylock,
- StatInt_pthread_mutex_timedlock,
- StatInt_pthread_mutex_unlock,
- StatInt_pthread_spin_init,
- StatInt_pthread_spin_destroy,
- StatInt_pthread_spin_lock,
- StatInt_pthread_spin_trylock,
- StatInt_pthread_spin_unlock,
- StatInt_pthread_rwlock_init,
- StatInt_pthread_rwlock_destroy,
- StatInt_pthread_rwlock_rdlock,
- StatInt_pthread_rwlock_tryrdlock,
- StatInt_pthread_rwlock_timedrdlock,
- StatInt_pthread_rwlock_wrlock,
- StatInt_pthread_rwlock_trywrlock,
- StatInt_pthread_rwlock_timedwrlock,
- StatInt_pthread_rwlock_unlock,
- StatInt_pthread_cond_init,
- StatInt_pthread_cond_destroy,
- StatInt_pthread_cond_signal,
- StatInt_pthread_cond_broadcast,
- StatInt_pthread_cond_wait,
- StatInt_pthread_cond_timedwait,
- StatInt_pthread_barrier_init,
- StatInt_pthread_barrier_destroy,
- StatInt_pthread_barrier_wait,
- StatInt_pthread_once,
- StatInt_pthread_getschedparam,
- StatInt_pthread_setname_np,
- StatInt_sem_init,
- StatInt_sem_destroy,
- StatInt_sem_wait,
- StatInt_sem_trywait,
- StatInt_sem_timedwait,
- StatInt_sem_post,
- StatInt_sem_getvalue,
- StatInt_stat,
- StatInt___xstat,
- StatInt_stat64,
- StatInt___xstat64,
- StatInt_lstat,
- StatInt___lxstat,
- StatInt_lstat64,
- StatInt___lxstat64,
- StatInt_fstat,
- StatInt___fxstat,
- StatInt_fstat64,
- StatInt___fxstat64,
- StatInt_open,
- StatInt_open64,
- StatInt_creat,
- StatInt_creat64,
- StatInt_dup,
- StatInt_dup2,
- StatInt_dup3,
- StatInt_eventfd,
- StatInt_signalfd,
- StatInt_inotify_init,
- StatInt_inotify_init1,
- StatInt_socket,
- StatInt_socketpair,
- StatInt_connect,
- StatInt_bind,
- StatInt_listen,
- StatInt_accept,
- StatInt_accept4,
- StatInt_epoll_create,
- StatInt_epoll_create1,
- StatInt_close,
- StatInt___close,
- StatInt___res_iclose,
- StatInt_pipe,
- StatInt_pipe2,
- StatInt_read,
- StatInt_prctl,
- StatInt_pread,
- StatInt_pread64,
- StatInt_readv,
- StatInt_preadv,
- StatInt_preadv64,
- StatInt_write,
- StatInt_pwrite,
- StatInt_pwrite64,
- StatInt_writev,
- StatInt_pwritev,
- StatInt_pwritev64,
- StatInt_send,
- StatInt_sendmsg,
- StatInt_recv,
- StatInt_recvmsg,
- StatInt_unlink,
- StatInt_fopen,
- StatInt_freopen,
- StatInt_fclose,
- StatInt_fread,
- StatInt_fwrite,
- StatInt_fflush,
- StatInt_abort,
- StatInt_puts,
- StatInt_rmdir,
- StatInt_opendir,
- StatInt_epoll_ctl,
- StatInt_epoll_wait,
- StatInt_poll,
- StatInt_ppoll,
- StatInt_sigaction,
- StatInt_signal,
- StatInt_sigsuspend,
- StatInt_raise,
- StatInt_kill,
- StatInt_pthread_kill,
- StatInt_sleep,
- StatInt_usleep,
- StatInt_nanosleep,
- StatInt_gettimeofday,
- StatInt_fork,
- StatInt_vscanf,
- StatInt_vsscanf,
- StatInt_vfscanf,
- StatInt_scanf,
- StatInt_sscanf,
- StatInt_fscanf,
- StatInt___isoc99_vscanf,
- StatInt___isoc99_vsscanf,
- StatInt___isoc99_vfscanf,
- StatInt___isoc99_scanf,
- StatInt___isoc99_sscanf,
- StatInt___isoc99_fscanf,
- StatInt_on_exit,
- StatInt___cxa_atexit,
- StatInt_localtime,
- StatInt_localtime_r,
- StatInt_gmtime,
- StatInt_gmtime_r,
- StatInt_ctime,
- StatInt_ctime_r,
- StatInt_asctime,
- StatInt_asctime_r,
- StatInt_strptime,
- StatInt_frexp,
- StatInt_frexpf,
- StatInt_frexpl,
- StatInt_getpwnam,
- StatInt_getpwuid,
- StatInt_getgrnam,
- StatInt_getgrgid,
- StatInt_getpwnam_r,
- StatInt_getpwuid_r,
- StatInt_getgrnam_r,
- StatInt_getgrgid_r,
- StatInt_clock_getres,
- StatInt_clock_gettime,
- StatInt_clock_settime,
- StatInt_getitimer,
- StatInt_setitimer,
- StatInt_time,
- StatInt_glob,
- StatInt_glob64,
- StatInt_wait,
- StatInt_waitid,
- StatInt_waitpid,
- StatInt_wait3,
- StatInt_wait4,
- StatInt_inet_ntop,
- StatInt_inet_pton,
- StatInt_inet_aton,
- StatInt_getaddrinfo,
- StatInt_getnameinfo,
- StatInt_getsockname,
- StatInt_gethostent,
- StatInt_gethostbyname,
- StatInt_gethostbyname2,
- StatInt_gethostbyaddr,
- StatInt_gethostent_r,
- StatInt_gethostbyname_r,
- StatInt_gethostbyname2_r,
- StatInt_gethostbyaddr_r,
- StatInt_getsockopt,
- StatInt_modf,
- StatInt_modff,
- StatInt_modfl,
- StatInt_getpeername,
- StatInt_ioctl,
- StatInt_sysinfo,
- StatInt_readdir,
- StatInt_readdir64,
- StatInt_readdir_r,
- StatInt_readdir64_r,
- StatInt_ptrace,
- StatInt_setlocale,
- StatInt_getcwd,
- StatInt_get_current_dir_name,
- StatInt_strtoimax,
- StatInt_strtoumax,
- StatInt_mbstowcs,
- StatInt_mbsrtowcs,
- StatInt_mbsnrtowcs,
- StatInt_wcstombs,
- StatInt_wcsrtombs,
- StatInt_wcsnrtombs,
- StatInt_tcgetattr,
- StatInt_realpath,
- StatInt_canonicalize_file_name,
- StatInt_confstr,
- StatInt_sched_getaffinity,
- StatInt_strerror,
- StatInt_strerror_r,
- StatInt___xpg_strerror_r,
- StatInt_scandir,
- StatInt_scandir64,
- StatInt_getgroups,
- StatInt_wordexp,
- StatInt_sigwait,
- StatInt_sigwaitinfo,
- StatInt_sigtimedwait,
- StatInt_sigemptyset,
- StatInt_sigfillset,
- StatInt_sigpending,
- StatInt_sigprocmask,
- StatInt_backtrace,
- StatInt_backtrace_symbols,
- StatInt_dlopen,
- StatInt_dlclose,
- StatInt_getmntent,
- StatInt_getmntent_r,
- StatInt_statfs,
- StatInt_statfs64,
- StatInt_fstatfs,
- StatInt_fstatfs64,
- StatInt_statvfs,
- StatInt_statvfs64,
- StatInt_fstatvfs,
- StatInt_fstatvfs64,
- StatInt_initgroups,
- StatInt_ether_ntoa,
- StatInt_ether_aton,
- StatInt_ether_ntoa_r,
- StatInt_ether_aton_r,
- StatInt_ether_ntohost,
- StatInt_ether_hostton,
- StatInt_ether_line,
- StatInt_shmctl,
- StatInt_random_r,
- StatInt_tmpnam,
- StatInt_tmpnam_r,
- StatInt_tempnam,
- StatInt_sincos,
- StatInt_sincosf,
- StatInt_sincosl,
- StatInt_remquo,
- StatInt_remquof,
- StatInt_remquol,
- StatInt_lgamma,
- StatInt_lgammaf,
- StatInt_lgammal,
- StatInt_lgamma_r,
- StatInt_lgammaf_r,
- StatInt_lgammal_r,
- StatInt_drand48_r,
- StatInt_lrand48_r,
- StatInt_getline,
- StatInt_getdelim,
- StatInt_iconv,
- StatInt_times,
-
- StatInt_pthread_attr_getdetachstate,
- StatInt_pthread_attr_getguardsize,
- StatInt_pthread_attr_getschedparam,
- StatInt_pthread_attr_getschedpolicy,
- StatInt_pthread_attr_getinheritsched,
- StatInt_pthread_attr_getscope,
- StatInt_pthread_attr_getstacksize,
- StatInt_pthread_attr_getstack,
- StatInt_pthread_attr_getaffinity_np,
-
// Dynamic annotations.
StatAnnotation,
StatAnnotateHappensBefore,
@@ -472,6 +167,7 @@ enum StatType {
StatMtxAtExit,
StatMtxMBlock,
StatMtxJavaMBlock,
+ StatMtxDeadlockDetector,
StatMtxFD,
// This must be the last.
diff --git a/libsanitizer/tsan/tsan_suppressions.cc b/libsanitizer/tsan/tsan_suppressions.cc
index fa0c30dc28c9..ce8d5fe86113 100644
--- a/libsanitizer/tsan/tsan_suppressions.cc
+++ b/libsanitizer/tsan/tsan_suppressions.cc
@@ -101,10 +101,20 @@ SuppressionType conv(ReportType typ) {
return SuppressionThread;
else if (typ == ReportTypeMutexDestroyLocked)
return SuppressionMutex;
+ else if (typ == ReportTypeMutexDoubleLock)
+ return SuppressionMutex;
+ else if (typ == ReportTypeMutexBadUnlock)
+ return SuppressionMutex;
+ else if (typ == ReportTypeMutexBadReadLock)
+ return SuppressionMutex;
+ else if (typ == ReportTypeMutexBadReadUnlock)
+ return SuppressionMutex;
else if (typ == ReportTypeSignalUnsafe)
return SuppressionSignal;
else if (typ == ReportTypeErrnoInSignal)
return SuppressionNone;
+ else if (typ == ReportTypeDeadlock)
+ return SuppressionDeadlock;
Printf("ThreadSanitizer: unknown report type %d\n", typ),
Die();
}
diff --git a/libsanitizer/tsan/tsan_symbolize.cc b/libsanitizer/tsan/tsan_symbolize.cc
index c0e794be2d2e..fa36017846cd 100644
--- a/libsanitizer/tsan/tsan_symbolize.cc
+++ b/libsanitizer/tsan/tsan_symbolize.cc
@@ -24,12 +24,14 @@ void EnterSymbolizer() {
ThreadState *thr = cur_thread();
CHECK(!thr->in_symbolizer);
thr->in_symbolizer = true;
+ thr->ignore_interceptors++;
}
void ExitSymbolizer() {
ThreadState *thr = cur_thread();
CHECK(thr->in_symbolizer);
thr->in_symbolizer = false;
+ thr->ignore_interceptors--;
}
ReportStack *NewReportStackEntry(uptr addr) {
@@ -103,13 +105,11 @@ ReportStack *SymbolizeCode(uptr addr) {
ent->col = col;
return ent;
}
- if (!Symbolizer::Get()->IsAvailable())
- return SymbolizeCodeAddr2Line(addr);
static const uptr kMaxAddrFrames = 16;
InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
for (uptr i = 0; i < kMaxAddrFrames; i++)
new(&addr_frames[i]) AddressInfo();
- uptr addr_frames_num = Symbolizer::Get()->SymbolizeCode(
+ uptr addr_frames_num = Symbolizer::Get()->SymbolizePC(
addr, addr_frames.data(), kMaxAddrFrames);
if (addr_frames_num == 0)
return NewReportStackEntry(addr);
@@ -129,8 +129,6 @@ ReportStack *SymbolizeCode(uptr addr) {
}
ReportLocation *SymbolizeData(uptr addr) {
- if (!Symbolizer::Get()->IsAvailable())
- return 0;
DataInfo info;
if (!Symbolizer::Get()->SymbolizeData(addr, &info))
return 0;
@@ -148,8 +146,6 @@ ReportLocation *SymbolizeData(uptr addr) {
}
void SymbolizeFlush() {
- if (!Symbolizer::Get()->IsAvailable())
- return;
Symbolizer::Get()->Flush();
}
diff --git a/libsanitizer/tsan/tsan_symbolize.h b/libsanitizer/tsan/tsan_symbolize.h
index 892c11c0667c..0d9077ed379a 100644
--- a/libsanitizer/tsan/tsan_symbolize.h
+++ b/libsanitizer/tsan/tsan_symbolize.h
@@ -22,8 +22,6 @@ ReportStack *SymbolizeCode(uptr addr);
ReportLocation *SymbolizeData(uptr addr);
void SymbolizeFlush();
-ReportStack *SymbolizeCodeAddr2Line(uptr addr);
-
ReportStack *NewReportStackEntry(uptr addr);
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_symbolize_addr2line_linux.cc b/libsanitizer/tsan/tsan_symbolize_addr2line_linux.cc
deleted file mode 100644
index c278a42f3178..000000000000
--- a/libsanitizer/tsan/tsan_symbolize_addr2line_linux.cc
+++ /dev/null
@@ -1,191 +0,0 @@
-//===-- tsan_symbolize_addr2line.cc ---------------------------------------===//
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of ThreadSanitizer (TSan), a race detector.
-//
-//===----------------------------------------------------------------------===//
-#include "sanitizer_common/sanitizer_common.h"
-#include "sanitizer_common/sanitizer_libc.h"
-#include "tsan_symbolize.h"
-#include "tsan_mman.h"
-#include "tsan_rtl.h"
-#include "tsan_platform.h"
-
-#include <unistd.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <errno.h>
-#include <link.h>
-#include <linux/limits.h>
-#include <sys/types.h>
-
-namespace __tsan {
-
-struct ModuleDesc {
- const char *fullname;
- const char *name;
- uptr base;
- int inp_fd;
- int out_fd;
-};
-
-struct SectionDesc {
- SectionDesc *next;
- ModuleDesc *module;
- uptr base;
- uptr end;
-};
-
-struct DlIteratePhdrCtx {
- SectionDesc *sections;
- bool is_first;
-};
-
-static void NOINLINE InitModule(ModuleDesc *m) {
- int outfd[2] = {};
- if (pipe(&outfd[0])) {
- Printf("ThreadSanitizer: outfd pipe() failed (%d)\n", errno);
- Die();
- }
- int infd[2] = {};
- if (pipe(&infd[0])) {
- Printf("ThreadSanitizer: infd pipe() failed (%d)\n", errno);
- Die();
- }
- int pid = fork();
- if (pid == 0) {
- internal_close(STDOUT_FILENO);
- internal_close(STDIN_FILENO);
- internal_dup2(outfd[0], STDIN_FILENO);
- internal_dup2(infd[1], STDOUT_FILENO);
- internal_close(outfd[0]);
- internal_close(outfd[1]);
- internal_close(infd[0]);
- internal_close(infd[1]);
- for (int fd = getdtablesize(); fd > 2; fd--)
- internal_close(fd);
- execl("/usr/bin/addr2line", "/usr/bin/addr2line", "-Cfe", m->fullname, 0);
- _exit(0);
- } else if (pid < 0) {
- Printf("ThreadSanitizer: failed to fork symbolizer\n");
- Die();
- }
- internal_close(outfd[0]);
- internal_close(infd[1]);
- m->inp_fd = infd[0];
- m->out_fd = outfd[1];
-}
-
-static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
- DlIteratePhdrCtx *ctx = (DlIteratePhdrCtx*)arg;
- InternalScopedBuffer<char> tmp(128);
- if (ctx->is_first) {
- internal_snprintf(tmp.data(), tmp.size(), "/proc/%d/exe",
- (int)internal_getpid());
- info->dlpi_name = tmp.data();
- }
- ctx->is_first = false;
- if (info->dlpi_name == 0 || info->dlpi_name[0] == 0)
- return 0;
- ModuleDesc *m = (ModuleDesc*)internal_alloc(MBlockReportStack,
- sizeof(ModuleDesc));
- m->fullname = internal_strdup(info->dlpi_name);
- m->name = internal_strrchr(m->fullname, '/');
- if (m->name)
- m->name += 1;
- else
- m->name = m->fullname;
- m->base = (uptr)info->dlpi_addr;
- m->inp_fd = -1;
- m->out_fd = -1;
- DPrintf2("Module %s %zx\n", m->name, m->base);
- for (int i = 0; i < info->dlpi_phnum; i++) {
- const Elf64_Phdr *s = &info->dlpi_phdr[i];
- DPrintf2(" Section p_type=%zx p_offset=%zx p_vaddr=%zx p_paddr=%zx"
- " p_filesz=%zx p_memsz=%zx p_flags=%zx p_align=%zx\n",
- (uptr)s->p_type, (uptr)s->p_offset, (uptr)s->p_vaddr,
- (uptr)s->p_paddr, (uptr)s->p_filesz, (uptr)s->p_memsz,
- (uptr)s->p_flags, (uptr)s->p_align);
- if (s->p_type != PT_LOAD)
- continue;
- SectionDesc *sec = (SectionDesc*)internal_alloc(MBlockReportStack,
- sizeof(SectionDesc));
- sec->module = m;
- sec->base = info->dlpi_addr + s->p_vaddr;
- sec->end = sec->base + s->p_memsz;
- sec->next = ctx->sections;
- ctx->sections = sec;
- DPrintf2(" Section %zx-%zx\n", sec->base, sec->end);
- }
- return 0;
-}
-
-static SectionDesc *InitSections() {
- DlIteratePhdrCtx ctx = {0, true};
- dl_iterate_phdr(dl_iterate_phdr_cb, &ctx);
- return ctx.sections;
-}
-
-static SectionDesc *GetSectionDesc(uptr addr) {
- static SectionDesc *sections = 0;
- if (sections == 0)
- sections = InitSections();
- for (SectionDesc *s = sections; s; s = s->next) {
- if (addr >= s->base && addr < s->end) {
- if (s->module->inp_fd == -1)
- InitModule(s->module);
- return s;
- }
- }
- return 0;
-}
-
-ReportStack *SymbolizeCodeAddr2Line(uptr addr) {
- SectionDesc *s = GetSectionDesc(addr);
- if (s == 0)
- return NewReportStackEntry(addr);
- ModuleDesc *m = s->module;
- uptr offset = addr - m->base;
- char addrstr[32];
- internal_snprintf(addrstr, sizeof(addrstr), "%p\n", (void*)offset);
- if (0 >= internal_write(m->out_fd, addrstr, internal_strlen(addrstr))) {
- Printf("ThreadSanitizer: can't write from symbolizer (%d, %d)\n",
- m->out_fd, errno);
- Die();
- }
- InternalScopedBuffer<char> func(1024);
- ssize_t len = internal_read(m->inp_fd, func.data(), func.size() - 1);
- if (len <= 0) {
- Printf("ThreadSanitizer: can't read from symbolizer (%d, %d)\n",
- m->inp_fd, errno);
- Die();
- }
- func.data()[len] = 0;
- ReportStack *res = NewReportStackEntry(addr);
- res->module = internal_strdup(m->name);
- res->offset = offset;
- char *pos = (char*)internal_strchr(func.data(), '\n');
- if (pos && func[0] != '?') {
- res->func = (char*)internal_alloc(MBlockReportStack, pos - func.data() + 1);
- internal_memcpy(res->func, func.data(), pos - func.data());
- res->func[pos - func.data()] = 0;
- char *pos2 = (char*)internal_strchr(pos, ':');
- if (pos2) {
- res->file = (char*)internal_alloc(MBlockReportStack, pos2 - pos - 1 + 1);
- internal_memcpy(res->file, pos + 1, pos2 - pos - 1);
- res->file[pos2 - pos - 1] = 0;
- res->line = atoi(pos2 + 1);
- }
- }
- return res;
-}
-
-ReportStack *SymbolizeDataAddr2Line(uptr addr) {
- return 0;
-}
-
-} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_sync.cc b/libsanitizer/tsan/tsan_sync.cc
index 0c5be105f676..f6f2cb731e7a 100644
--- a/libsanitizer/tsan/tsan_sync.cc
+++ b/libsanitizer/tsan/tsan_sync.cc
@@ -15,10 +15,13 @@
namespace __tsan {
+void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
+
SyncVar::SyncVar(uptr addr, u64 uid)
: mtx(MutexTypeSyncVar, StatMtxSyncVar)
, addr(addr)
, uid(uid)
+ , creation_stack_id()
, owner_tid(kInvalidTid)
, last_lock()
, recursion()
@@ -60,9 +63,11 @@ SyncVar* SyncTab::Create(ThreadState *thr, uptr pc, uptr addr) {
void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
SyncVar *res = new(mem) SyncVar(addr, uid);
-#ifndef TSAN_GO
- res->creation_stack_id = CurrentStackId(thr, pc);
-#endif
+ res->creation_stack_id = 0;
+ if (!kGoMode) // Go does not use them
+ res->creation_stack_id = CurrentStackId(thr, pc);
+ if (flags()->detect_deadlocks)
+ DDMutexInit(thr, pc, res);
return res;
}
diff --git a/libsanitizer/tsan/tsan_sync.h b/libsanitizer/tsan/tsan_sync.h
index 2867a8ac79ef..3838df91d757 100644
--- a/libsanitizer/tsan/tsan_sync.h
+++ b/libsanitizer/tsan/tsan_sync.h
@@ -13,14 +13,13 @@
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
#include "tsan_clock.h"
#include "tsan_defs.h"
#include "tsan_mutex.h"
namespace __tsan {
-class SlabCache;
-
class StackTrace {
public:
StackTrace();
@@ -55,8 +54,6 @@ struct SyncVar {
Mutex mtx;
uptr addr;
const u64 uid; // Globally unique id.
- SyncClock clock;
- SyncClock read_clock; // Used for rw mutexes only.
u32 creation_stack_id;
int owner_tid; // Set only by exclusive owners.
u64 last_lock;
@@ -66,8 +63,12 @@ struct SyncVar {
bool is_broken;
bool is_linker_init;
SyncVar *next; // In SyncTab hashtable.
+ DDMutex dd;
+ SyncClock read_clock; // Used for rw mutexes only.
+ // The clock is placed last, so that it is situated on a different cache line
+ // with the mtx. This reduces contention for hot sync objects.
+ SyncClock clock;
- uptr GetMemoryConsumption();
u64 GetId() const {
// 47 lsb is addr, then 14 bits is low part of uid, then 3 zero bits.
return GetLsb((u64)addr | (uid << 47), 61);
@@ -96,8 +97,6 @@ class SyncTab {
SyncVar* Create(ThreadState *thr, uptr pc, uptr addr);
- uptr GetMemoryConsumption(uptr *nsync);
-
private:
struct Part {
Mutex mtx;
diff --git a/libsanitizer/tsan/tsan_vector.h b/libsanitizer/tsan/tsan_vector.h
index 4da8b83d5c3f..f65ad2b55608 100644
--- a/libsanitizer/tsan/tsan_vector.h
+++ b/libsanitizer/tsan/tsan_vector.h
@@ -56,10 +56,18 @@ class Vector {
return begin_[i];
}
- T *PushBack(T v = T()) {
+ T *PushBack() {
EnsureSize(Size() + 1);
- end_[-1] = v;
- return &end_[-1];
+ T *p = &end_[-1];
+ internal_memset(p, 0, sizeof(*p));
+ return p;
+ }
+
+ T *PushBack(const T& v) {
+ EnsureSize(Size() + 1);
+ T *p = &end_[-1];
+ internal_memcpy(p, &v, sizeof(*p));
+ return p;
}
void PopBack() {
@@ -72,7 +80,7 @@ class Vector {
EnsureSize(size);
if (old_size < size) {
for (uptr i = old_size; i < size; i++)
- begin_[i] = T();
+ internal_memset(&begin_[i], 0, sizeof(begin_[i]));
}
}
diff --git a/libsanitizer/ubsan/ubsan_diag.cc b/libsanitizer/ubsan/ubsan_diag.cc
index 786ffa7254f5..1dfe7255f683 100644
--- a/libsanitizer/ubsan/ubsan_diag.cc
+++ b/libsanitizer/ubsan/ubsan_diag.cc
@@ -11,6 +11,7 @@
#include "ubsan_diag.h"
#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
@@ -19,6 +20,22 @@
using namespace __ubsan;
+static void InitializeSanitizerCommon() {
+ static StaticSpinMutex init_mu;
+ SpinMutexLock l(&init_mu);
+ static bool initialized;
+ if (initialized)
+ return;
+ if (0 == internal_strcmp(SanitizerToolName, "SanitizerTool")) {
+ // UBSan is run in a standalone mode. Initialize it now.
+ SanitizerToolName = "UndefinedBehaviorSanitizer";
+ CommonFlags *cf = common_flags();
+ SetCommonFlagsDefaults(cf);
+ cf->print_summary = false;
+ }
+ initialized = true;
+}
+
Location __ubsan::getCallerLocation(uptr CallerLoc) {
if (!CallerLoc)
return Location();
@@ -30,9 +47,11 @@ Location __ubsan::getCallerLocation(uptr CallerLoc) {
Location __ubsan::getFunctionLocation(uptr Loc, const char **FName) {
if (!Loc)
return Location();
+ // FIXME: We may need to run initialization earlier.
+ InitializeSanitizerCommon();
AddressInfo Info;
- if (!Symbolizer::GetOrInit()->SymbolizeCode(Loc, &Info, 1) ||
+ if (!Symbolizer::GetOrInit()->SymbolizePC(Loc, &Info, 1) ||
!Info.module || !*Info.module)
return Location(Loc);
diff --git a/libsanitizer/ubsan/ubsan_value.h b/libsanitizer/ubsan/ubsan_value.h
index 6ca0f56c99d0..abfd31fbd996 100644
--- a/libsanitizer/ubsan/ubsan_value.h
+++ b/libsanitizer/ubsan/ubsan_value.h
@@ -12,9 +12,9 @@
#ifndef UBSAN_VALUE_H
#define UBSAN_VALUE_H
-// For now, only support linux and darwin. Other platforms should be easy to
-// add, and probably work as-is.
-#if !defined(__linux__) && !defined(__APPLE__)
+// For now, only support Linux, FreeBSD and Darwin. Other platforms should
+// be easy to add, and probably work as-is.
+#if !defined(__linux__) && !defined(__FreeBSD__) && !defined(__APPLE__)
#error "UBSan not supported for this platform!"
#endif