summaryrefslogtreecommitdiff
path: root/lib/sanitizer_common/sanitizer_atomic_clang_other.h
diff options
context:
space:
mode:
authorSimon Dardis <simon.dardis@mips.com>2017-12-22 20:31:07 +0000
committerSimon Dardis <simon.dardis@mips.com>2017-12-22 20:31:07 +0000
commit4d53d6d104c990fb5c2626ddab37a693b6d4f749 (patch)
treea4fb2281b53c2adde2b779292465883e1f512ce6 /lib/sanitizer_common/sanitizer_atomic_clang_other.h
parent2e1adb04781b8eddfe1666727e029f6a05c60131 (diff)
Reland "[mips][compiler-rt] Provide 64bit atomic add and sub"
r318733 introduced a build failure for native MIPS32 systems for xray due to the lack of __sync_fetch_and_add / __syn_fetch_and_sub support. This patch extends the existing support providing atomics so that xray can be successfully built. The initial patch was reverted in r321292, as I suspected it may have caused the buildbot failure. Another patch in the updates the bot fetched caused the test failures which was reverted. Reviewers: atanasyan, dberris Differential Revision: https://reviews.llvm.org/D40385 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@321383 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/sanitizer_common/sanitizer_atomic_clang_other.h')
-rw-r--r--lib/sanitizer_common/sanitizer_atomic_clang_other.h63
1 files changed, 0 insertions, 63 deletions
diff --git a/lib/sanitizer_common/sanitizer_atomic_clang_other.h b/lib/sanitizer_common/sanitizer_atomic_clang_other.h
index d2acc311b..35e2d007e 100644
--- a/lib/sanitizer_common/sanitizer_atomic_clang_other.h
+++ b/lib/sanitizer_common/sanitizer_atomic_clang_other.h
@@ -17,55 +17,6 @@
namespace __sanitizer {
-// MIPS32 does not support atomic > 4 bytes. To address this lack of
-// functionality, the sanitizer library provides helper methods which use an
-// internal spin lock mechanism to emulate atomic oprations when the size is
-// 8 bytes.
-#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
-static void __spin_lock(volatile int *lock) {
- while (__sync_lock_test_and_set(lock, 1))
- while (*lock) {
- }
-}
-
-static void __spin_unlock(volatile int *lock) { __sync_lock_release(lock); }
-
-
-// Make sure the lock is on its own cache line to prevent false sharing.
-// Put it inside a struct that is aligned and padded to the typical MIPS
-// cacheline which is 32 bytes.
-static struct {
- int lock;
- char pad[32 - sizeof(int)];
-} __attribute__((aligned(32))) lock = {0};
-
-template <class T>
-T __mips_sync_fetch_and_add(volatile T *ptr, T val) {
- T ret;
-
- __spin_lock(&lock.lock);
-
- ret = *ptr;
- *ptr = ret + val;
-
- __spin_unlock(&lock.lock);
-
- return ret;
-}
-
-template <class T>
-T __mips_sync_val_compare_and_swap(volatile T *ptr, T oldval, T newval) {
- T ret;
- __spin_lock(&lock.lock);
-
- ret = *ptr;
- if (ret == oldval) *ptr = newval;
-
- __spin_unlock(&lock.lock);
-
- return ret;
-}
-#endif
INLINE void proc_yield(int cnt) {
__asm__ __volatile__("" ::: "memory");
@@ -103,15 +54,8 @@ INLINE typename T::Type atomic_load(
// 64-bit load on 32-bit platform.
// Gross, but simple and reliable.
// Assume that it is not in read-only memory.
-#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
- typename T::Type volatile *val_ptr =
- const_cast<typename T::Type volatile *>(&a->val_dont_use);
- v = __mips_sync_fetch_and_add<u64>(
- reinterpret_cast<u64 volatile *>(val_ptr), 0);
-#else
v = __sync_fetch_and_add(
const_cast<typename T::Type volatile *>(&a->val_dont_use), 0);
-#endif
}
return v;
}
@@ -141,14 +85,7 @@ INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
typename T::Type cmp = a->val_dont_use;
typename T::Type cur;
for (;;) {
-#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
- typename T::Type volatile *val_ptr =
- const_cast<typename T::Type volatile *>(&a->val_dont_use);
- cur = __mips_sync_val_compare_and_swap<u64>(
- reinterpret_cast<u64 volatile *>(val_ptr), (u64)cmp, (u64)v);
-#else
cur = __sync_val_compare_and_swap(&a->val_dont_use, cmp, v);
-#endif
if (cmp == v)
break;
cmp = cur;