summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKees Cook <keescook@chromium.org>2016-06-23 15:24:05 -0700
committerAlex Shi <alex.shi@linaro.org>2016-08-27 11:23:38 +0800
commit3ad78bad4fd43467f1fc6dff63076789b30c116b (patch)
tree486b0bde0d21734f1a3ba5a52803e6204f128074
parent784bd0f8d7303ff0025e2ea44b68b6fc6323544d (diff)
mm: SLUB hardened usercopy support
Under CONFIG_HARDENED_USERCOPY, this adds object size checking to the SLUB allocator to catch any copies that may span objects. Includes a redzone handling fix discovered by Michael Ellerman. Based on code from PaX and grsecurity. Signed-off-by: Kees Cook <keescook@chromium.org> Tested-by: Michael Ellerman <mpe@ellerman.id.au> Reviwed-by: Laura Abbott <labbott@redhat.com> (cherry picked from commit ed18adc1cdd00a5c55a20fbdaed4804660772281) Signed-off-by: Alex Shi <alex.shi@linaro.org>
-rw-r--r--init/Kconfig1
-rw-r--r--mm/slub.c40
2 files changed, 41 insertions, 0 deletions
diff --git a/init/Kconfig b/init/Kconfig
index fa031a140397..e1d1d6936f92 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1727,6 +1727,7 @@ config SLAB
config SLUB
bool "SLUB (Unqueued Allocator)"
+ select HAVE_HARDENED_USERCOPY_ALLOCATOR
help
SLUB is a slab allocator that minimizes cache line usage
instead of managing queues of cached objects (SLAB approach).
diff --git a/mm/slub.c b/mm/slub.c
index 65d5f92d51d2..fbadb3753d4d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3585,6 +3585,46 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
EXPORT_SYMBOL(__kmalloc_node);
#endif
+#ifdef CONFIG_HARDENED_USERCOPY
+/*
+ * Rejects objects that are incorrectly sized.
+ *
+ * Returns NULL if check passes, otherwise const char * to name of cache
+ * to indicate an error.
+ */
+const char *__check_heap_object(const void *ptr, unsigned long n,
+ struct page *page)
+{
+ struct kmem_cache *s;
+ unsigned long offset;
+ size_t object_size;
+
+ /* Find object and usable object size. */
+ s = page->slab_cache;
+ object_size = slab_ksize(s);
+
+ /* Reject impossible pointers. */
+ if (ptr < page_address(page))
+ return s->name;
+
+ /* Find offset within object. */
+ offset = (ptr - page_address(page)) % s->size;
+
+ /* Adjust for redzone and reject if within the redzone. */
+ if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
+ if (offset < s->red_left_pad)
+ return s->name;
+ offset -= s->red_left_pad;
+ }
+
+ /* Allow address range falling entirely within object size. */
+ if (offset <= object_size && n <= object_size - offset)
+ return NULL;
+
+ return s->name;
+}
+#endif /* CONFIG_HARDENED_USERCOPY */
+
static size_t __ksize(const void *object)
{
struct page *page;