summaryrefslogtreecommitdiff
path: root/drivers/gpu/arm/midgard/mali_kbase_mem_pool.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/arm/midgard/mali_kbase_mem_pool.c')
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_mem_pool.c41
1 files changed, 15 insertions, 26 deletions
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_pool.c b/drivers/gpu/arm/midgard/mali_kbase_mem_pool.c
index 957061893b00..153cd4efac49 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_mem_pool.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem_pool.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2015-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -43,9 +43,6 @@ int __init kbase_carveout_mem_reserve(phys_addr_t size)
kbase_mem_pool_max_size(pool), \
##__VA_ARGS__)
-#define NOT_DIRTY false
-#define NOT_RECLAIMED false
-
static inline void kbase_mem_pool_lock(struct kbase_mem_pool *pool)
{
spin_lock(&pool->pool_lock);
@@ -264,8 +261,6 @@ static size_t kbase_mem_pool_grow(struct kbase_mem_pool *pool,
for (i = 0; i < nr_to_grow && !kbase_mem_pool_is_full(pool); i++) {
p = kbase_mem_pool_alloc_page(pool);
- if (!p)
- break;
kbase_mem_pool_add(pool, p);
}
@@ -510,7 +505,7 @@ int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_pages,
return 0;
err_rollback:
- kbase_mem_pool_free_pages(pool, i, pages, NOT_DIRTY, NOT_RECLAIMED);
+ kbase_mem_pool_free_pages(pool, i, pages, false);
return err;
}
@@ -553,7 +548,7 @@ static void kbase_mem_pool_add_array(struct kbase_mem_pool *pool,
}
void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
- phys_addr_t *pages, bool dirty, bool reclaimed)
+ phys_addr_t *pages, bool dirty)
{
struct kbase_mem_pool *next_pool = pool->next_pool;
struct page *p;
@@ -563,24 +558,22 @@ void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
pool_dbg(pool, "free_pages(%zu):\n", nr_pages);
- if (!reclaimed) {
- /* Add to this pool */
- nr_to_pool = kbase_mem_pool_capacity(pool);
- nr_to_pool = min(nr_pages, nr_to_pool);
+ /* Add to this pool */
+ nr_to_pool = kbase_mem_pool_capacity(pool);
+ nr_to_pool = min(nr_pages, nr_to_pool);
- kbase_mem_pool_add_array(pool, nr_to_pool, pages, false, dirty);
+ kbase_mem_pool_add_array(pool, nr_to_pool, pages, false, dirty);
- i += nr_to_pool;
+ i += nr_to_pool;
- if (i != nr_pages && next_pool) {
- /* Spill to next pool (may overspill) */
- nr_to_pool = kbase_mem_pool_capacity(next_pool);
- nr_to_pool = min(nr_pages - i, nr_to_pool);
+ if (i != nr_pages && next_pool) {
+ /* Spill to next pool (may overspill) */
+ nr_to_pool = kbase_mem_pool_capacity(next_pool);
+ nr_to_pool = min(nr_pages - i, nr_to_pool);
- kbase_mem_pool_add_array(next_pool, nr_to_pool,
- pages + i, true, dirty);
- i += nr_to_pool;
- }
+ kbase_mem_pool_add_array(next_pool, nr_to_pool, pages + i,
+ true, dirty);
+ i += nr_to_pool;
}
/* Free any remaining pages to kernel */
@@ -589,10 +582,6 @@ void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
continue;
p = phys_to_page(pages[i]);
- if (reclaimed)
- zone_page_state_add(-1, page_zone(p),
- NR_SLAB_RECLAIMABLE);
-
kbase_mem_pool_free_page(pool, p);
pages[i] = 0;
}