summaryrefslogtreecommitdiff
path: root/mm/zswap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/zswap.c')
-rw-r--r--mm/zswap.c52
1 files changed, 38 insertions, 14 deletions
diff --git a/mm/zswap.c b/mm/zswap.c
index 3efd8cae315e..104835b379ec 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -36,13 +36,15 @@
#include <linux/pagemap.h>
#include <linux/workqueue.h>
+#include "swap.h"
+
/*********************************
* statistics
**********************************/
/* Total bytes used by the compressed storage */
-static u64 zswap_pool_total_size;
+u64 zswap_pool_total_size;
/* The number of compressed pages currently stored in zswap */
-static atomic_t zswap_stored_pages = ATOMIC_INIT(0);
+atomic_t zswap_stored_pages = ATOMIC_INIT(0);
/* The number of same-value filled pages currently stored in zswap */
static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
@@ -186,6 +188,7 @@ struct zswap_entry {
unsigned long handle;
unsigned long value;
};
+ struct obj_cgroup *objcg;
};
struct zswap_header {
@@ -357,6 +360,10 @@ static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
*/
static void zswap_free_entry(struct zswap_entry *entry)
{
+ if (entry->objcg) {
+ obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
+ obj_cgroup_put(entry->objcg);
+ }
if (!entry->length)
atomic_dec(&zswap_same_filled_pages);
else {
@@ -1094,6 +1101,8 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
struct zswap_entry *entry, *dupentry;
struct scatterlist input, output;
struct crypto_acomp_ctx *acomp_ctx;
+ struct obj_cgroup *objcg = NULL;
+ struct zswap_pool *pool;
int ret;
unsigned int hlen, dlen = PAGE_SIZE;
unsigned long handle, value;
@@ -1113,17 +1122,15 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
goto reject;
}
+ objcg = get_obj_cgroup_from_page(page);
+ if (objcg && !obj_cgroup_may_zswap(objcg))
+ goto shrink;
+
/* reclaim space if needed */
if (zswap_is_full()) {
- struct zswap_pool *pool;
-
zswap_pool_limit_hit++;
zswap_pool_reached_full = true;
- pool = zswap_pool_last_get();
- if (pool)
- queue_work(shrink_wq, &pool->shrink_work);
- ret = -ENOMEM;
- goto reject;
+ goto shrink;
}
if (zswap_pool_reached_full) {
@@ -1225,6 +1232,13 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
entry->length = dlen;
insert_entry:
+ entry->objcg = objcg;
+ if (objcg) {
+ obj_cgroup_charge_zswap(objcg, entry->length);
+ /* Account before objcg ref is moved to tree */
+ count_objcg_event(objcg, ZSWPOUT);
+ }
+
/* map */
spin_lock(&tree->lock);
do {
@@ -1241,6 +1255,7 @@ insert_entry:
/* update stats */
atomic_inc(&zswap_stored_pages);
zswap_update_total_size();
+ count_vm_event(ZSWPOUT);
return 0;
@@ -1250,7 +1265,16 @@ put_dstmem:
freepage:
zswap_entry_cache_free(entry);
reject:
+ if (objcg)
+ obj_cgroup_put(objcg);
return ret;
+
+shrink:
+ pool = zswap_pool_last_get();
+ if (pool)
+ queue_work(shrink_wq, &pool->shrink_work);
+ ret = -ENOMEM;
+ goto reject;
}
/*
@@ -1283,11 +1307,10 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
zswap_fill_page(dst, entry->value);
kunmap_atomic(dst);
ret = 0;
- goto freeentry;
+ goto stats;
}
if (!zpool_can_sleep_mapped(entry->pool->zpool)) {
-
tmp = kmalloc(entry->length, GFP_ATOMIC);
if (!tmp) {
ret = -ENOMEM;
@@ -1302,10 +1325,8 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
src += sizeof(struct zswap_header);
if (!zpool_can_sleep_mapped(entry->pool->zpool)) {
-
memcpy(tmp, src, entry->length);
src = tmp;
-
zpool_unmap_handle(entry->pool->zpool, entry->handle);
}
@@ -1324,7 +1345,10 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
kfree(tmp);
BUG_ON(ret);
-
+stats:
+ count_vm_event(ZSWPIN);
+ if (entry->objcg)
+ count_objcg_event(entry->objcg, ZSWPIN);
freeentry:
spin_lock(&tree->lock);
zswap_entry_put(tree, entry);