summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/mm_types.h3
-rw-r--r--include/linux/slab.h9
-rw-r--r--mm/slab.c68
3 files changed, 41 insertions, 39 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index faf4b7c1ad12..959cb369b197 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -130,6 +130,9 @@ struct page {
struct list_head list; /* slobs list of pages */
struct slab *slab_page; /* slab fields */
+ struct rcu_head rcu_head; /* Used by SLAB
+ * when destroying via RCU
+ */
};
/* Remainder is not double word aligned */
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 6c5cc0ea8713..caaad51fee1f 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -51,7 +51,14 @@
* }
* rcu_read_unlock();
*
- * See also the comment on struct slab_rcu in mm/slab.c.
+ * This is useful if we need to approach a kernel structure obliquely,
+ * from its address obtained without the usual locking. We can lock
+ * the structure to stabilize it and check it's still at the given address,
+ * only if we can be sure that the memory has not been meanwhile reused
+ * for some other kind of object (which our subsystem's lock might corrupt).
+ *
+ * rcu_read_lock before reading the address, then rcu_read_unlock after
+ * taking the spinlock within the structure expected at that address.
*/
#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
diff --git a/mm/slab.c b/mm/slab.c
index 7e1aabe2b5d8..84c4ed62c10d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -189,25 +189,6 @@ typedef unsigned int kmem_bufctl_t;
#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
/*
- * struct slab_rcu
- *
- * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
- * arrange for kmem_freepages to be called via RCU. This is useful if
- * we need to approach a kernel structure obliquely, from its address
- * obtained without the usual locking. We can lock the structure to
- * stabilize it and check it's still at the given address, only if we
- * can be sure that the memory has not been meanwhile reused for some
- * other kind of object (which our subsystem's lock might corrupt).
- *
- * rcu_read_lock before reading the address, then rcu_read_unlock after
- * taking the spinlock within the structure expected at that address.
- */
-struct slab_rcu {
- struct rcu_head head;
- struct page *page;
-};
-
-/*
* struct slab
*
* Manages the objs in a slab. Placed either at the beginning of mem allocated
@@ -215,14 +196,11 @@ struct slab_rcu {
* Slabs are chained into three list: fully used, partial, fully free slabs.
*/
struct slab {
- union {
- struct {
- struct list_head list;
- void *s_mem; /* including colour offset */
- unsigned int inuse; /* num of objs active in slab */
- kmem_bufctl_t free;
- };
- struct slab_rcu __slab_cover_slab_rcu;
+ struct {
+ struct list_head list;
+ void *s_mem; /* including colour offset */
+ unsigned int inuse; /* num of objs active in slab */
+ kmem_bufctl_t free;
};
};
@@ -1509,6 +1487,8 @@ void __init kmem_cache_init(void)
{
int i;
+ BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) <
+ sizeof(struct rcu_head));
kmem_cache = &kmem_cache_boot;
setup_node_pointer(kmem_cache);
@@ -1822,12 +1802,13 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
static void kmem_rcu_free(struct rcu_head *head)
{
- struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
- struct kmem_cache *cachep = slab_rcu->page->slab_cache;
+ struct kmem_cache *cachep;
+ struct page *page;
- kmem_freepages(cachep, slab_rcu->page);
- if (OFF_SLAB(cachep))
- kmem_cache_free(cachep->slabp_cache, slab_rcu);
+ page = container_of(head, struct page, rcu_head);
+ cachep = page->slab_cache;
+
+ kmem_freepages(cachep, page);
}
#if DEBUG
@@ -2048,16 +2029,27 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
slab_destroy_debugcheck(cachep, slabp);
if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
- struct slab_rcu *slab_rcu;
+ struct rcu_head *head;
+
+ /*
+ * RCU free overloads the RCU head over the LRU.
+ * slab_page has been overloeaded over the LRU,
+ * however it is not used from now on so that
+ * we can use it safely.
+ */
+ head = (void *)&page->rcu_head;
+ call_rcu(head, kmem_rcu_free);
- slab_rcu = (struct slab_rcu *)slabp;
- slab_rcu->page = page;
- call_rcu(&slab_rcu->head, kmem_rcu_free);
} else {
kmem_freepages(cachep, page);
- if (OFF_SLAB(cachep))
- kmem_cache_free(cachep->slabp_cache, slabp);
}
+
+ /*
+ * From now on, we don't use slab management
+ * although actual page can be freed in rcu context
+ */
+ if (OFF_SLAB(cachep))
+ kmem_cache_free(cachep->slabp_cache, slabp);
}
/**