summaryrefslogtreecommitdiff
path: root/libbacktrace/mmap.c
diff options
context:
space:
mode:
authorian <ian@138bc75d-0d04-0410-961f-82ee72b054a4>2013-01-01 16:13:20 +0000
committerian <ian@138bc75d-0d04-0410-961f-82ee72b054a4>2013-01-01 16:13:20 +0000
commita666b85bd54089c50d9c0577a05afbb6f2dc8b92 (patch)
tree0ea7c16439b042c659c442ddc5377a98e78a3747 /libbacktrace/mmap.c
parentedc63b2f7e52e996c1756fdda0d9f80fd46e928b (diff)
PR other/55536
* mmap.c (backtrace_alloc): Don't call sync functions if not threaded. (backtrace_free): Likewise. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@194768 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'libbacktrace/mmap.c')
-rw-r--r--libbacktrace/mmap.c24
1 files changed, 20 insertions, 4 deletions
diff --git a/libbacktrace/mmap.c b/libbacktrace/mmap.c
index d3313c7cf1ec..a6c730ecd23f 100644
--- a/libbacktrace/mmap.c
+++ b/libbacktrace/mmap.c
@@ -84,6 +84,7 @@ backtrace_alloc (struct backtrace_state *state,
void *data)
{
void *ret;
+ int locked;
struct backtrace_freelist_struct **pp;
size_t pagesize;
size_t asksize;
@@ -96,7 +97,12 @@ backtrace_alloc (struct backtrace_state *state,
using mmap. __sync_lock_test_and_set returns the old state of
the lock, so we have acquired it if it returns 0. */
- if (!__sync_lock_test_and_set (&state->lock_alloc, 1))
+ if (!state->threaded)
+ locked = 1;
+ else
+ locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
+
+ if (locked)
{
for (pp = &state->freelist; *pp != NULL; pp = &(*pp)->next)
{
@@ -120,7 +126,8 @@ backtrace_alloc (struct backtrace_state *state,
}
}
- __sync_lock_release (&state->lock_alloc);
+ if (state->threaded)
+ __sync_lock_release (&state->lock_alloc);
}
if (ret == NULL)
@@ -154,15 +161,24 @@ backtrace_free (struct backtrace_state *state, void *addr, size_t size,
backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
void *data ATTRIBUTE_UNUSED)
{
+ int locked;
+
/* If we can acquire the lock, add the new space to the free list.
If we can't acquire the lock, just leak the memory.
__sync_lock_test_and_set returns the old state of the lock, so we
have acquired it if it returns 0. */
- if (!__sync_lock_test_and_set (&state->lock_alloc, 1))
+
+ if (!state->threaded)
+ locked = 1;
+ else
+ locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
+
+ if (locked)
{
backtrace_free_locked (state, addr, size);
- __sync_lock_release (&state->lock_alloc);
+ if (state->threaded)
+ __sync_lock_release (&state->lock_alloc);
}
}