summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSagar Thakur <sagar.thakur@imgtec.com>2017-06-19 11:28:59 +0000
committerSagar Thakur <sagar.thakur@imgtec.com>2017-06-19 11:28:59 +0000
commit80ed4944ba672f0f7a2143f149b59e833b94d2f6 (patch)
tree063a250aa4b0d15732d04d81e1fdf71b67a50916
parent366176ff1bf74e8fe330cdc5b712845b94650078 (diff)
[scudo] Enabling MIPS support for Scudo
Adding MIPS 32-bit and 64-bit support for Scudo. Reviewed by cryptoad, sdardis. Differential: D31803 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@305682 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--cmake/config-ix.cmake2
-rw-r--r--lib/sanitizer_common/sanitizer_atomic_clang.h22
-rw-r--r--lib/sanitizer_common/sanitizer_atomic_clang_other.h65
-rw-r--r--test/scudo/random_shuffle.cpp2
4 files changed, 83 insertions, 8 deletions
diff --git a/cmake/config-ix.cmake b/cmake/config-ix.cmake
index ae2a262a1..0da98b9a4 100644
--- a/cmake/config-ix.cmake
+++ b/cmake/config-ix.cmake
@@ -179,7 +179,7 @@ set(ALL_UBSAN_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64}
set(ALL_SAFESTACK_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM64} ${MIPS32} ${MIPS64})
set(ALL_CFI_SUPPORTED_ARCH ${X86} ${X86_64} ${MIPS64})
set(ALL_ESAN_SUPPORTED_ARCH ${X86_64} ${MIPS64})
-set(ALL_SCUDO_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64})
+set(ALL_SCUDO_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${MIPS32} ${MIPS64})
set(ALL_XRAY_SUPPORTED_ARCH ${X86_64} ${ARM32} ${ARM64} ${MIPS32} ${MIPS64} powerpc64le)
if(APPLE)
diff --git a/lib/sanitizer_common/sanitizer_atomic_clang.h b/lib/sanitizer_common/sanitizer_atomic_clang.h
index 38363e875..47581282a 100644
--- a/lib/sanitizer_common/sanitizer_atomic_clang.h
+++ b/lib/sanitizer_common/sanitizer_atomic_clang.h
@@ -71,16 +71,26 @@ INLINE typename T::Type atomic_exchange(volatile T *a,
return v;
}
-template<typename T>
-INLINE bool atomic_compare_exchange_strong(volatile T *a,
- typename T::Type *cmp,
+template <typename T>
+INLINE bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp,
typename T::Type xchg,
memory_order mo) {
typedef typename T::Type Type;
Type cmpv = *cmp;
- Type prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg);
- if (prev == cmpv)
- return true;
+ Type prev;
+#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
+ if (sizeof(*a) == 8) {
+ Type volatile *val_ptr = const_cast<Type volatile *>(&a->val_dont_use);
+ prev = __mips_sync_val_compare_and_swap<u64>(
+ reinterpret_cast<u64 volatile *>(val_ptr), reinterpret_cast<u64> cmpv,
+ reinterpret_cast<u64> xchg);
+ } else {
+ prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg);
+ }
+#else
+ prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg);
+#endif
+ if (prev == cmpv) return true;
*cmp = prev;
return false;
}
diff --git a/lib/sanitizer_common/sanitizer_atomic_clang_other.h b/lib/sanitizer_common/sanitizer_atomic_clang_other.h
index 099b9f7ec..2825c7400 100644
--- a/lib/sanitizer_common/sanitizer_atomic_clang_other.h
+++ b/lib/sanitizer_common/sanitizer_atomic_clang_other.h
@@ -17,6 +17,56 @@
namespace __sanitizer {
+// MIPS32 does not support atomic > 4 bytes. To address this lack of
+// functionality, the sanitizer library provides helper methods which use an
+// internal spin lock mechanism to emulate atomic oprations when the size is
+// 8 bytes.
+#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
+static void __spin_lock(volatile int *lock) {
+ while (__sync_lock_test_and_set(lock, 1))
+ while (*lock) {
+ }
+}
+
+static void __spin_unlock(volatile int *lock) { __sync_lock_release(lock); }
+
+
+// Make sure the lock is on its own cache line to prevent false sharing.
+// Put it inside a struct that is aligned and padded to the typical MIPS
+// cacheline which is 32 bytes.
+static struct {
+ int lock;
+ char pad[32 - sizeof(int)];
+} __attribute__((aligned(32))) lock = {0};
+
+template <class T>
+T __mips_sync_fetch_and_add(volatile T *ptr, T val) {
+ T ret;
+
+ __spin_lock(&lock.lock);
+
+ ret = *ptr;
+ *ptr = ret + val;
+
+ __spin_unlock(&lock.lock);
+
+ return ret;
+}
+
+template <class T>
+T __mips_sync_val_compare_and_swap(volatile T *ptr, T oldval, T newval) {
+ T ret;
+ __spin_lock(&lock.lock);
+
+ ret = *ptr;
+ if (ret == oldval) *ptr = newval;
+
+ __spin_unlock(&lock.lock);
+
+ return ret;
+}
+#endif
+
INLINE void proc_yield(int cnt) {
__asm__ __volatile__("" ::: "memory");
}
@@ -53,8 +103,15 @@ INLINE typename T::Type atomic_load(
// 64-bit load on 32-bit platform.
// Gross, but simple and reliable.
// Assume that it is not in read-only memory.
+#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
+ typename T::Type volatile *val_ptr =
+ const_cast<typename T::Type volatile *>(&a->val_dont_use);
+ v = __mips_sync_fetch_and_add<u64>(
+ reinterpret_cast<u64 volatile *>(val_ptr), 0);
+#else
v = __sync_fetch_and_add(
const_cast<typename T::Type volatile *>(&a->val_dont_use), 0);
+#endif
}
return v;
}
@@ -84,7 +141,15 @@ INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
typename T::Type cmp = a->val_dont_use;
typename T::Type cur;
for (;;) {
+#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
+ typename T::Type volatile *val_ptr =
+ const_cast<typename T::Type volatile *>(&a->val_dont_use);
+ cur = __mips_sync_val_compare_and_swap<u64>(
+ reinterpret_cast<u64 volatile *>(val_ptr), reinterpret_cast<u64> cmp,
+ reinterpret_cast<u64> v);
+#else
cur = __sync_val_compare_and_swap(&a->val_dont_use, cmp, v);
+#endif
if (cmp == v)
break;
cmp = cur;
diff --git a/test/scudo/random_shuffle.cpp b/test/scudo/random_shuffle.cpp
index 41e67ded6..05a432615 100644
--- a/test/scudo/random_shuffle.cpp
+++ b/test/scudo/random_shuffle.cpp
@@ -7,7 +7,7 @@
// RUN: %run %t 10000 > %T/random_shuffle_tmp_dir/out2
// RUN: not diff %T/random_shuffle_tmp_dir/out?
// RUN: rm -rf %T/random_shuffle_tmp_dir
-// UNSUPPORTED: i386-linux,i686-linux,arm-linux,armhf-linux,aarch64-linux
+// UNSUPPORTED: i386-linux,i686-linux,arm-linux,armhf-linux,aarch64-linux,mips-linux,mipsel-linux,mips64-linux,mips64el-linux
// Tests that the allocator shuffles the chunks before returning to the user.