summaryrefslogtreecommitdiff
path: root/src/fallback_malloc.cpp
diff options
context:
space:
mode:
authorIgor Kudrin <ikudrin.dev@gmail.com>2016-09-29 06:38:06 +0000
committerIgor Kudrin <ikudrin.dev@gmail.com>2016-09-29 06:38:06 +0000
commitfeb04d70afb8388150f2ba40df8333ca4c4817ec (patch)
tree768b51034d36d5bf7d60832406dcbdbc08c74938 /src/fallback_malloc.cpp
parenta0863319144d468d524c8606e56ebbfb77c2bf71 (diff)
[libc++abi] Use fallback_malloc to allocate __cxa_eh_globals in case of dynamic memory exhaustion.
Throwing an exception for the first time may lead to call calloc to allocate memory for __cxa_eh_globals. If the memory pool is exhausted at that moment, it results in abnormal termination of the program. This patch addresses the issue by using fallback_malloc in that case. Differential Revision: https://reviews.llvm.org/D17815 git-svn-id: https://llvm.org/svn/llvm-project/libcxxabi/trunk@282692 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'src/fallback_malloc.cpp')
-rw-r--r--src/fallback_malloc.cpp226
1 files changed, 226 insertions, 0 deletions
diff --git a/src/fallback_malloc.cpp b/src/fallback_malloc.cpp
new file mode 100644
index 0000000..a436ed0
--- /dev/null
+++ b/src/fallback_malloc.cpp
@@ -0,0 +1,226 @@
+//===------------------------ fallback_malloc.cpp -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "fallback_malloc.h"
+
+#include "config.h"
+
+#include <cstdlib> // for malloc, calloc, free
+#include <cstring> // for memset
+
+#ifndef _LIBCXXABI_HAS_NO_THREADS
+#include <pthread.h> // for mutexes
+#endif
+
+// A small, simple heap manager based (loosely) on
+// the startup heap manager from FreeBSD, optimized for space.
+//
+// Manages a fixed-size memory pool, supports malloc and free only.
+// No support for realloc.
+//
+// Allocates chunks in multiples of four bytes, with a four byte header
+// for each chunk. The overhead of each chunk is kept low by keeping pointers
+// as two byte offsets within the heap, rather than (4 or 8 byte) pointers.
+
+namespace {
+
+// When POSIX threads are not available, make the mutex operations a nop
+#ifndef _LIBCXXABI_HAS_NO_THREADS
+static pthread_mutex_t heap_mutex = PTHREAD_MUTEX_INITIALIZER;
+#else
+static void * heap_mutex = 0;
+#endif
+
+class mutexor {
+public:
+#ifndef _LIBCXXABI_HAS_NO_THREADS
+ mutexor ( pthread_mutex_t *m ) : mtx_(m) { pthread_mutex_lock ( mtx_ ); }
+ ~mutexor () { pthread_mutex_unlock ( mtx_ ); }
+#else
+ mutexor ( void * ) {}
+ ~mutexor () {}
+#endif
+private:
+ mutexor ( const mutexor &rhs );
+ mutexor & operator = ( const mutexor &rhs );
+#ifndef _LIBCXXABI_HAS_NO_THREADS
+ pthread_mutex_t *mtx_;
+#endif
+ };
+
+
+static const size_t HEAP_SIZE = 512;
+char heap [ HEAP_SIZE ] __attribute__((aligned));
+
+typedef unsigned short heap_offset;
+typedef unsigned short heap_size;
+
+struct heap_node {
+ heap_offset next_node; // offset into heap
+ heap_size len; // size in units of "sizeof(heap_node)"
+};
+
+static const heap_node *list_end = (heap_node *) ( &heap [ HEAP_SIZE ] ); // one past the end of the heap
+static heap_node *freelist = NULL;
+
+heap_node *node_from_offset ( const heap_offset offset )
+ { return (heap_node *) ( heap + ( offset * sizeof (heap_node))); }
+
+heap_offset offset_from_node ( const heap_node *ptr )
+ { return static_cast<heap_offset>(static_cast<size_t>(reinterpret_cast<const char *>(ptr) - heap) / sizeof (heap_node)); }
+
+void init_heap () {
+ freelist = (heap_node *) heap;
+ freelist->next_node = offset_from_node ( list_end );
+ freelist->len = HEAP_SIZE / sizeof (heap_node);
+ }
+
+// How big a chunk we allocate
+size_t alloc_size (size_t len)
+ { return (len + sizeof(heap_node) - 1) / sizeof(heap_node) + 1; }
+
+bool is_fallback_ptr ( void *ptr )
+ { return ptr >= heap && ptr < ( heap + HEAP_SIZE ); }
+
+void *fallback_malloc(size_t len) {
+ heap_node *p, *prev;
+ const size_t nelems = alloc_size ( len );
+ mutexor mtx ( &heap_mutex );
+
+ if ( NULL == freelist )
+ init_heap ();
+
+// Walk the free list, looking for a "big enough" chunk
+ for (p = freelist, prev = 0;
+ p && p != list_end; prev = p, p = node_from_offset ( p->next_node)) {
+
+ if (p->len > nelems) { // chunk is larger, shorten, and return the tail
+ heap_node *q;
+
+ p->len = static_cast<heap_size>(p->len - nelems);
+ q = p + p->len;
+ q->next_node = 0;
+ q->len = static_cast<heap_size>(nelems);
+ return (void *) (q + 1);
+ }
+
+ if (p->len == nelems) { // exact size match
+ if (prev == 0)
+ freelist = node_from_offset(p->next_node);
+ else
+ prev->next_node = p->next_node;
+ p->next_node = 0;
+ return (void *) (p + 1);
+ }
+ }
+ return NULL; // couldn't find a spot big enough
+}
+
+// Return the start of the next block
+heap_node *after ( struct heap_node *p ) { return p + p->len; }
+
+void fallback_free (void *ptr) {
+ struct heap_node *cp = ((struct heap_node *) ptr) - 1; // retrieve the chunk
+ struct heap_node *p, *prev;
+
+ mutexor mtx ( &heap_mutex );
+
+#ifdef DEBUG_FALLBACK_MALLOC
+ std::cout << "Freeing item at " << offset_from_node ( cp ) << " of size " << cp->len << std::endl;
+#endif
+
+ for (p = freelist, prev = 0;
+ p && p != list_end; prev = p, p = node_from_offset (p->next_node)) {
+#ifdef DEBUG_FALLBACK_MALLOC
+ std::cout << " p, cp, after (p), after(cp) "
+ << offset_from_node ( p ) << ' '
+ << offset_from_node ( cp ) << ' '
+ << offset_from_node ( after ( p )) << ' '
+ << offset_from_node ( after ( cp )) << std::endl;
+#endif
+ if ( after ( p ) == cp ) {
+#ifdef DEBUG_FALLBACK_MALLOC
+ std::cout << " Appending onto chunk at " << offset_from_node ( p ) << std::endl;
+#endif
+ p->len = static_cast<heap_size>(p->len + cp->len); // make the free heap_node larger
+ return;
+ }
+ else if ( after ( cp ) == p ) { // there's a free heap_node right after
+#ifdef DEBUG_FALLBACK_MALLOC
+ std::cout << " Appending free chunk at " << offset_from_node ( p ) << std::endl;
+#endif
+ cp->len = static_cast<heap_size>(cp->len + p->len);
+ if ( prev == 0 ) {
+ freelist = cp;
+ cp->next_node = p->next_node;
+ }
+ else
+ prev->next_node = offset_from_node(cp);
+ return;
+ }
+ }
+// Nothing to merge with, add it to the start of the free list
+#ifdef DEBUG_FALLBACK_MALLOC
+ std::cout << " Making new free list entry " << offset_from_node ( cp ) << std::endl;
+#endif
+ cp->next_node = offset_from_node ( freelist );
+ freelist = cp;
+}
+
+#ifdef INSTRUMENT_FALLBACK_MALLOC
+size_t print_free_list () {
+ struct heap_node *p, *prev;
+ heap_size total_free = 0;
+ if ( NULL == freelist )
+ init_heap ();
+
+ for (p = freelist, prev = 0;
+ p && p != list_end; prev = p, p = node_from_offset (p->next_node)) {
+ std::cout << ( prev == 0 ? "" : " ") << "Offset: " << offset_from_node ( p )
+ << "\tsize: " << p->len << " Next: " << p->next_node << std::endl;
+ total_free += p->len;
+ }
+ std::cout << "Total Free space: " << total_free << std::endl;
+ return total_free;
+ }
+#endif
+} // end unnamed namespace
+
+namespace __cxxabiv1 {
+
+#pragma GCC visibility push(hidden)
+
+void * __malloc_with_fallback(size_t size) {
+ void *ptr = std::malloc(size);
+ if (NULL == ptr) // if malloc fails, fall back to emergency stash
+ ptr = fallback_malloc(size);
+ return ptr;
+}
+
+void * __calloc_with_fallback(size_t count, size_t size) {
+ void *ptr = std::calloc(count, size);
+ if (NULL != ptr)
+ return ptr;
+ // if calloc fails, fall back to emergency stash
+ ptr = fallback_malloc(size * count);
+ if (NULL != ptr)
+ std::memset(ptr, 0, size * count);
+ return ptr;
+}
+
+void __free_with_fallback(void *ptr) {
+ if (is_fallback_ptr(ptr))
+ fallback_free(ptr);
+ else
+ std::free(ptr);
+}
+
+#pragma GCC visibility pop
+
+} // namespace __cxxabiv1