summaryrefslogtreecommitdiff
path: root/drivers/gpu/arm/midgard/mali_kbase_context.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/arm/midgard/mali_kbase_context.c')
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_context.c58
1 files changed, 7 insertions, 51 deletions
diff --git a/drivers/gpu/arm/midgard/mali_kbase_context.c b/drivers/gpu/arm/midgard/mali_kbase_context.c
index 344a1f16de8a..798979963937 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_context.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_context.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -23,7 +23,8 @@
#include <mali_kbase.h>
#include <mali_midg_regmap.h>
-#include <mali_kbase_mem_linux.h>
+#include <mali_kbase_instr.h>
+
/**
* kbase_create_context() - Create a kernel base context.
@@ -64,8 +65,6 @@ kbase_create_context(struct kbase_device *kbdev, bool is_compat)
kctx->process_mm = NULL;
atomic_set(&kctx->nonmapped_pages, 0);
kctx->slots_pullable = 0;
- kctx->tgid = current->tgid;
- kctx->pid = current->pid;
err = kbase_mem_pool_init(&kctx->mem_pool,
kbdev->mem_pool_max_size_default,
@@ -73,15 +72,11 @@ kbase_create_context(struct kbase_device *kbdev, bool is_compat)
if (err)
goto free_kctx;
- err = kbase_mem_evictable_init(kctx);
- if (err)
- goto free_pool;
-
atomic_set(&kctx->used_pages, 0);
err = kbase_jd_init(kctx);
if (err)
- goto deinit_evictable;
+ goto free_pool;
err = kbasep_js_kctx_init(kctx);
if (err)
@@ -91,22 +86,16 @@ kbase_create_context(struct kbase_device *kbdev, bool is_compat)
if (err)
goto free_jd;
- atomic_set(&kctx->drain_pending, 0);
-
mutex_init(&kctx->reg_lock);
INIT_LIST_HEAD(&kctx->waiting_soft_jobs);
- spin_lock_init(&kctx->waiting_soft_jobs_lock);
#ifdef CONFIG_KDS
INIT_LIST_HEAD(&kctx->waiting_kds_resource);
#endif
- err = kbase_dma_fence_init(kctx);
- if (err)
- goto free_event;
err = kbase_mmu_init(kctx);
if (err)
- goto term_dma_fence;
+ goto free_event;
kctx->pgd = kbase_mmu_alloc_pgd(kctx);
if (!kctx->pgd)
@@ -116,6 +105,8 @@ kbase_create_context(struct kbase_device *kbdev, bool is_compat)
if (!kctx->aliasing_sink_page)
goto no_sink_page;
+ kctx->tgid = current->tgid;
+ kctx->pid = current->pid;
init_waitqueue_head(&kctx->event_queue);
kctx->cookies = KBASE_COOKIE_MASK;
@@ -124,14 +115,6 @@ kbase_create_context(struct kbase_device *kbdev, bool is_compat)
err = kbase_region_tracker_init(kctx);
if (err)
goto no_region_tracker;
-
- err = kbase_sticky_resource_init(kctx);
- if (err)
- goto no_sticky;
-
- err = kbase_jit_init(kctx);
- if (err)
- goto no_jit;
#ifdef CONFIG_GPU_TRACEPOINTS
atomic_set(&kctx->jctx.work_id, 0);
#endif
@@ -143,18 +126,8 @@ kbase_create_context(struct kbase_device *kbdev, bool is_compat)
mutex_init(&kctx->vinstr_cli_lock);
- setup_timer(&kctx->soft_job_timeout,
- kbasep_soft_job_timeout_worker,
- (uintptr_t)kctx);
-
return kctx;
-no_jit:
- kbase_gpu_vm_lock(kctx);
- kbase_sticky_resource_term(kctx);
- kbase_gpu_vm_unlock(kctx);
-no_sticky:
- kbase_region_tracker_term(kctx);
no_region_tracker:
kbase_mem_pool_free(&kctx->mem_pool, kctx->aliasing_sink_page, false);
no_sink_page:
@@ -164,16 +137,12 @@ no_sink_page:
kbase_gpu_vm_unlock(kctx);
free_mmu:
kbase_mmu_term(kctx);
-term_dma_fence:
- kbase_dma_fence_term(kctx);
free_event:
kbase_event_cleanup(kctx);
free_jd:
/* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */
kbasep_js_kctx_term(kctx);
kbase_jd_exit(kctx);
-deinit_evictable:
- kbase_mem_evictable_deinit(kctx);
free_pool:
kbase_mem_pool_term(&kctx->mem_pool);
free_kctx:
@@ -219,18 +188,8 @@ void kbase_destroy_context(struct kbase_context *kctx)
kbase_jd_zap_context(kctx);
kbase_event_cleanup(kctx);
- /*
- * JIT must be terminated before the code below as it must be called
- * without the region lock being held.
- * The code above ensures no new JIT allocations can be made by
- * by the time we get to this point of context tear down.
- */
- kbase_jit_term(kctx);
-
kbase_gpu_vm_lock(kctx);
- kbase_sticky_resource_term(kctx);
-
/* MMU is disabled as part of scheduling out the context */
kbase_mmu_free_pgd(kctx);
@@ -260,15 +219,12 @@ void kbase_destroy_context(struct kbase_context *kctx)
kbase_pm_context_idle(kbdev);
- kbase_dma_fence_term(kctx);
-
kbase_mmu_term(kctx);
pages = atomic_read(&kctx->used_pages);
if (pages != 0)
dev_warn(kbdev->dev, "%s: %d pages in use!\n", __func__, pages);
- kbase_mem_evictable_deinit(kctx);
kbase_mem_pool_term(&kctx->mem_pool);
WARN_ON(atomic_read(&kctx->nonmapped_pages) != 0);