summaryrefslogtreecommitdiff
path: root/drivers/gpu/arm/midgard/mali_kbase_vinstr.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/arm/midgard/mali_kbase_vinstr.c')
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_vinstr.c401
1 files changed, 59 insertions, 342 deletions
diff --git a/drivers/gpu/arm/midgard/mali_kbase_vinstr.c b/drivers/gpu/arm/midgard/mali_kbase_vinstr.c
index bd6095f77480..d3d27e2958d7 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_vinstr.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_vinstr.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -28,10 +28,8 @@
#include <linux/wait.h>
#include <mali_kbase.h>
-#include <mali_kbase_hwaccess_instr.h>
#include <mali_kbase_hwcnt_reader.h>
#include <mali_kbase_mem_linux.h>
-#include <mali_kbase_tlstream.h>
/*****************************************************************************/
@@ -63,14 +61,6 @@ enum {
JM_HWCNT_BM
};
-enum vinstr_state {
- VINSTR_IDLE,
- VINSTR_DUMPING,
- VINSTR_SUSPENDING,
- VINSTR_SUSPENDED,
- VINSTR_RESUMING
-};
-
/**
* struct kbase_vinstr_context - vinstr context per device
* @lock: protects the entire vinstr context
@@ -84,12 +74,7 @@ enum vinstr_state {
* with hardware
* @reprogram: when true, reprogram hwcnt block with the new set of
* counters
- * @state: vinstr state
- * @state_lock: protects information about vinstr state
- * @suspend_waitq: notification queue to trigger state re-validation
- * @suspend_cnt: reference counter of vinstr's suspend state
- * @suspend_work: worker to execute on entering suspended state
- * @resume_work: worker to execute on leaving suspended state
+ * @suspended: when true, the context has been suspended
* @nclients: number of attached clients, pending or otherwise
* @waiting_clients: head of list of clients being periodically sampled
* @idle_clients: head of list of clients being idle
@@ -109,13 +94,7 @@ struct kbase_vinstr_context {
size_t dump_size;
u32 bitmap[4];
bool reprogram;
-
- enum vinstr_state state;
- struct spinlock state_lock;
- wait_queue_head_t suspend_waitq;
- unsigned int suspend_cnt;
- struct work_struct suspend_work;
- struct work_struct resume_work;
+ bool suspended;
u32 nclients;
struct list_head waiting_clients;
@@ -210,10 +189,7 @@ static const struct file_operations vinstr_client_fops = {
static int enable_hwcnt(struct kbase_vinstr_context *vinstr_ctx)
{
- struct kbase_context *kctx = vinstr_ctx->kctx;
- struct kbase_device *kbdev = kctx->kbdev;
struct kbase_uk_hwcnt_setup setup;
- int err;
setup.dump_buffer = vinstr_ctx->gpu_va;
setup.jm_bm = vinstr_ctx->bitmap[JM_HWCNT_BM];
@@ -221,46 +197,12 @@ static int enable_hwcnt(struct kbase_vinstr_context *vinstr_ctx)
setup.shader_bm = vinstr_ctx->bitmap[SHADER_HWCNT_BM];
setup.mmu_l2_bm = vinstr_ctx->bitmap[MMU_L2_HWCNT_BM];
- /* Mark the context as active so the GPU is kept turned on */
- /* A suspend won't happen here, because we're in a syscall from a
- * userspace thread. */
- kbase_pm_context_active(kbdev);
-
- /* Schedule the context in */
- kbasep_js_schedule_privileged_ctx(kbdev, kctx);
- err = kbase_instr_hwcnt_enable_internal(kbdev, kctx, &setup);
- if (err) {
- /* Release the context. This had its own Power Manager Active
- * reference */
- kbasep_js_release_privileged_ctx(kbdev, kctx);
-
- /* Also release our Power Manager Active reference */
- kbase_pm_context_idle(kbdev);
- }
-
- return err;
+ return kbase_instr_hwcnt_enable(vinstr_ctx->kctx, &setup);
}
static void disable_hwcnt(struct kbase_vinstr_context *vinstr_ctx)
{
- struct kbase_context *kctx = vinstr_ctx->kctx;
- struct kbase_device *kbdev = kctx->kbdev;
- int err;
-
- err = kbase_instr_hwcnt_disable_internal(kctx);
- if (err) {
- dev_warn(kbdev->dev, "Failed to disable HW counters (ctx:%p)",
- kctx);
- return;
- }
-
- /* Release the context. This had its own Power Manager Active reference. */
- kbasep_js_release_privileged_ctx(kbdev, kctx);
-
- /* Also release our Power Manager Active reference. */
- kbase_pm_context_idle(kbdev);
-
- dev_dbg(kbdev->dev, "HW counters dumping disabled for context %p", kctx);
+ kbase_instr_hwcnt_disable(vinstr_ctx->kctx);
}
static int reprogram_hwcnt(struct kbase_vinstr_context *vinstr_ctx)
@@ -367,10 +309,6 @@ static void kbasep_vinstr_unmap_kernel_dump_buffer(
*/
static int kbasep_vinstr_create_kctx(struct kbase_vinstr_context *vinstr_ctx)
{
- struct kbase_device *kbdev = vinstr_ctx->kbdev;
- struct kbasep_kctx_list_element *element;
- unsigned long flags;
- bool enable_backend = false;
int err;
vinstr_ctx->kctx = kbase_create_context(vinstr_ctx->kbdev, true);
@@ -386,48 +324,10 @@ static int kbasep_vinstr_create_kctx(struct kbase_vinstr_context *vinstr_ctx)
return err;
}
- /* Add kernel context to list of contexts associated with device. */
- element = kzalloc(sizeof(*element), GFP_KERNEL);
- if (element) {
- element->kctx = vinstr_ctx->kctx;
- mutex_lock(&kbdev->kctx_list_lock);
- list_add(&element->link, &kbdev->kctx_list);
-
- /* Inform timeline client about new context.
- * Do this while holding the lock to avoid tracepoint
- * being created in both body and summary stream. */
- kbase_tlstream_tl_new_ctx(
- vinstr_ctx->kctx,
- (u32)(vinstr_ctx->kctx->id),
- (u32)(vinstr_ctx->kctx->tgid));
-
- mutex_unlock(&kbdev->kctx_list_lock);
- } else {
- /* Don't treat this as a fail - just warn about it. */
- dev_warn(kbdev->dev,
- "couldn't add kctx to kctx_list\n");
- }
-
- /* Don't enable hardware counters if vinstr is suspended.
- * Note that vinstr resume code is run under vinstr context lock,
- * lower layer will be enabled as needed on resume. */
- spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
- if (VINSTR_IDLE == vinstr_ctx->state)
- enable_backend = true;
- spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
- if (enable_backend)
- err = enable_hwcnt(vinstr_ctx);
-
+ err = enable_hwcnt(vinstr_ctx);
if (err) {
kbasep_vinstr_unmap_kernel_dump_buffer(vinstr_ctx);
kbase_destroy_context(vinstr_ctx->kctx);
- if (element) {
- mutex_lock(&kbdev->kctx_list_lock);
- list_del(&element->link);
- kfree(element);
- mutex_unlock(&kbdev->kctx_list_lock);
- }
- kbase_tlstream_tl_del_ctx(vinstr_ctx->kctx);
vinstr_ctx->kctx = NULL;
return err;
}
@@ -440,13 +340,6 @@ static int kbasep_vinstr_create_kctx(struct kbase_vinstr_context *vinstr_ctx)
disable_hwcnt(vinstr_ctx);
kbasep_vinstr_unmap_kernel_dump_buffer(vinstr_ctx);
kbase_destroy_context(vinstr_ctx->kctx);
- if (element) {
- mutex_lock(&kbdev->kctx_list_lock);
- list_del(&element->link);
- kfree(element);
- mutex_unlock(&kbdev->kctx_list_lock);
- }
- kbase_tlstream_tl_del_ctx(vinstr_ctx->kctx);
vinstr_ctx->kctx = NULL;
return -EFAULT;
}
@@ -460,34 +353,11 @@ static int kbasep_vinstr_create_kctx(struct kbase_vinstr_context *vinstr_ctx)
*/
static void kbasep_vinstr_destroy_kctx(struct kbase_vinstr_context *vinstr_ctx)
{
- struct kbase_device *kbdev = vinstr_ctx->kbdev;
- struct kbasep_kctx_list_element *element;
- struct kbasep_kctx_list_element *tmp;
- bool found = false;
-
/* Release hw counters dumping resources. */
vinstr_ctx->thread = NULL;
disable_hwcnt(vinstr_ctx);
kbasep_vinstr_unmap_kernel_dump_buffer(vinstr_ctx);
kbase_destroy_context(vinstr_ctx->kctx);
-
- /* Remove kernel context from the device's contexts list. */
- mutex_lock(&kbdev->kctx_list_lock);
- list_for_each_entry_safe(element, tmp, &kbdev->kctx_list, link) {
- if (element->kctx == vinstr_ctx->kctx) {
- list_del(&element->link);
- kfree(element);
- found = true;
- }
- }
- mutex_unlock(&kbdev->kctx_list_lock);
-
- if (!found)
- dev_warn(kbdev->dev, "kctx not in kctx_list\n");
-
- /* Inform timeline client about context destruction. */
- kbase_tlstream_tl_del_ctx(vinstr_ctx->kctx);
-
vinstr_ctx->kctx = NULL;
}
@@ -509,10 +379,9 @@ static struct kbase_vinstr_client *kbasep_vinstr_attach_client(
struct kbase_vinstr_client *cli;
KBASE_DEBUG_ASSERT(vinstr_ctx);
-
- if (buffer_count > MAX_BUFFER_COUNT
- || (buffer_count & (buffer_count - 1)))
- return NULL;
+ KBASE_DEBUG_ASSERT(buffer_count >= 0);
+ KBASE_DEBUG_ASSERT(buffer_count <= MAX_BUFFER_COUNT);
+ KBASE_DEBUG_ASSERT(!(buffer_count & (buffer_count - 1)));
cli = kzalloc(sizeof(*cli), GFP_KERNEL);
if (!cli)
@@ -566,7 +435,7 @@ static struct kbase_vinstr_client *kbasep_vinstr_attach_client(
/* Allocate required number of dumping buffers. */
cli->dump_buffers = (char *)__get_free_pages(
- GFP_KERNEL | __GFP_ZERO,
+ GFP_KERNEL,
get_order(cli->dump_size * cli->buffer_count));
if (!cli->dump_buffers)
goto error;
@@ -933,7 +802,6 @@ static void kbasep_vinstr_add_dump_request(
static int kbasep_vinstr_collect_and_accumulate(
struct kbase_vinstr_context *vinstr_ctx, u64 *timestamp)
{
- unsigned long flags;
int rcode;
#ifdef CONFIG_MALI_NO_MALI
@@ -941,15 +809,6 @@ static int kbasep_vinstr_collect_and_accumulate(
gpu_model_set_dummy_prfcnt_base_cpu(vinstr_ctx->cpu_va);
#endif
- spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
- if (VINSTR_IDLE != vinstr_ctx->state) {
- spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
- return -EAGAIN;
- } else {
- vinstr_ctx->state = VINSTR_DUMPING;
- }
- spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
-
/* Request HW counters dump.
* Disable preemption to make dump timestamp more accurate. */
preempt_disable();
@@ -961,21 +820,6 @@ static int kbasep_vinstr_collect_and_accumulate(
rcode = kbase_instr_hwcnt_wait_for_dump(vinstr_ctx->kctx);
WARN_ON(rcode);
- spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
- switch (vinstr_ctx->state)
- {
- case VINSTR_SUSPENDING:
- schedule_work(&vinstr_ctx->suspend_work);
- break;
- case VINSTR_DUMPING:
- vinstr_ctx->state = VINSTR_IDLE;
- wake_up_all(&vinstr_ctx->suspend_waitq);
- break;
- default:
- break;
- }
- spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
-
/* Accumulate values of collected counters. */
if (!rcode)
accum_clients(vinstr_ctx);
@@ -1063,20 +907,6 @@ static int kbasep_vinstr_fill_dump_buffer_kernel(
static void kbasep_vinstr_reprogram(
struct kbase_vinstr_context *vinstr_ctx)
{
- unsigned long flags;
- bool suspended = false;
-
- /* Don't enable hardware counters if vinstr is suspended. */
- spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
- if (VINSTR_IDLE != vinstr_ctx->state)
- suspended = true;
- spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
- if (suspended)
- return;
-
- /* Change to suspended state is done while holding vinstr context
- * lock. Below code will then no re-enable the instrumentation. */
-
if (vinstr_ctx->reprogram) {
struct kbase_vinstr_client *iter;
@@ -1181,7 +1011,6 @@ static int kbasep_vinstr_service_task(void *data)
while (!kthread_should_stop()) {
struct kbase_vinstr_client *cli = NULL;
struct kbase_vinstr_client *tmp;
- int rcode;
u64 timestamp = kbasep_vinstr_get_timestamp();
u64 dump_time = 0;
@@ -1224,8 +1053,7 @@ static int kbasep_vinstr_service_task(void *data)
continue;
}
- rcode = kbasep_vinstr_collect_and_accumulate(vinstr_ctx,
- &timestamp);
+ kbasep_vinstr_collect_and_accumulate(vinstr_ctx, &timestamp);
INIT_LIST_HEAD(&expired_requests);
@@ -1254,11 +1082,10 @@ static int kbasep_vinstr_service_task(void *data)
/* Expect only periodically sampled clients. */
BUG_ON(0 == cli->dump_interval);
- if (!rcode)
- kbasep_vinstr_update_client(
- cli,
- timestamp,
- BASE_HWCNT_READER_EVENT_PERIODIC);
+ kbasep_vinstr_update_client(
+ cli,
+ timestamp,
+ BASE_HWCNT_READER_EVENT_PERIODIC);
/* Set new dumping time. Drop missed probing times. */
do {
@@ -1387,6 +1214,11 @@ static long kbasep_vinstr_hwcnt_reader_ioctl_set_interval(
mutex_lock(&vinstr_ctx->lock);
+ if (vinstr_ctx->suspended) {
+ mutex_unlock(&vinstr_ctx->lock);
+ return -EBUSY;
+ }
+
list_del(&cli->list);
cli->dump_interval = interval;
@@ -1623,8 +1455,7 @@ static int kbasep_vinstr_hwcnt_reader_mmap(struct file *filp,
struct vm_area_struct *vma)
{
struct kbase_vinstr_client *cli;
- unsigned long size, addr, pfn, offset;
- unsigned long vm_size = vma->vm_end - vma->vm_start;
+ size_t size;
KBASE_DEBUG_ASSERT(filp);
KBASE_DEBUG_ASSERT(vma);
@@ -1633,24 +1464,14 @@ static int kbasep_vinstr_hwcnt_reader_mmap(struct file *filp,
KBASE_DEBUG_ASSERT(cli);
size = cli->buffer_count * cli->dump_size;
-
- if (vma->vm_pgoff > (size >> PAGE_SHIFT))
- return -EINVAL;
- if (vm_size > size)
- return -EINVAL;
-
- offset = vma->vm_pgoff << PAGE_SHIFT;
- if ((vm_size + offset) > size)
- return -EINVAL;
-
- addr = __pa((unsigned long)cli->dump_buffers + offset);
- pfn = addr >> PAGE_SHIFT;
+ if (vma->vm_end - vma->vm_start > size)
+ return -ENOMEM;
return remap_pfn_range(
vma,
vma->vm_start,
- pfn,
- vm_size,
+ __pa((unsigned long)cli->dump_buffers) >> PAGE_SHIFT,
+ size,
vma->vm_page_prot);
}
@@ -1677,84 +1498,6 @@ static int kbasep_vinstr_hwcnt_reader_release(struct inode *inode,
/*****************************************************************************/
-/**
- * kbasep_vinstr_kick_scheduler - trigger scheduler cycle
- * @kbdev: pointer to kbase device structure
- */
-static void kbasep_vinstr_kick_scheduler(struct kbase_device *kbdev)
-{
- struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
- unsigned long flags;
-
- down(&js_devdata->schedule_sem);
- spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
- kbase_jm_kick_all(kbdev);
- spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
- up(&js_devdata->schedule_sem);
-}
-
-/**
- * kbasep_vinstr_suspend_worker - worker suspending vinstr module
- * @data: pointer to work structure
- */
-static void kbasep_vinstr_suspend_worker(struct work_struct *data)
-{
- struct kbase_vinstr_context *vinstr_ctx;
- unsigned long flags;
-
- vinstr_ctx = container_of(data, struct kbase_vinstr_context,
- suspend_work);
-
- mutex_lock(&vinstr_ctx->lock);
-
- if (vinstr_ctx->kctx)
- disable_hwcnt(vinstr_ctx);
-
- spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
- vinstr_ctx->state = VINSTR_SUSPENDED;
- wake_up_all(&vinstr_ctx->suspend_waitq);
- spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
-
- mutex_unlock(&vinstr_ctx->lock);
-
- /* Kick GPU scheduler to allow entering protected mode.
- * This must happen after vinstr was suspended. */
- kbasep_vinstr_kick_scheduler(vinstr_ctx->kbdev);
-}
-
-/**
- * kbasep_vinstr_suspend_worker - worker resuming vinstr module
- * @data: pointer to work structure
- */
-static void kbasep_vinstr_resume_worker(struct work_struct *data)
-{
- struct kbase_vinstr_context *vinstr_ctx;
- unsigned long flags;
-
- vinstr_ctx = container_of(data, struct kbase_vinstr_context,
- resume_work);
-
- mutex_lock(&vinstr_ctx->lock);
-
- if (vinstr_ctx->kctx)
- enable_hwcnt(vinstr_ctx);
-
- spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
- vinstr_ctx->state = VINSTR_IDLE;
- wake_up_all(&vinstr_ctx->suspend_waitq);
- spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
-
- mutex_unlock(&vinstr_ctx->lock);
-
- /* Kick GPU scheduler to allow entering protected mode.
- * Note that scheduler state machine might requested re-entry to
- * protected mode before vinstr was resumed.
- * This must happen after vinstr was release. */
- kbasep_vinstr_kick_scheduler(vinstr_ctx->kbdev);
-}
-
-/*****************************************************************************/
-
struct kbase_vinstr_context *kbase_vinstr_init(struct kbase_device *kbdev)
{
struct kbase_vinstr_context *vinstr_ctx;
@@ -1766,14 +1509,8 @@ struct kbase_vinstr_context *kbase_vinstr_init(struct kbase_device *kbdev)
INIT_LIST_HEAD(&vinstr_ctx->idle_clients);
INIT_LIST_HEAD(&vinstr_ctx->waiting_clients);
mutex_init(&vinstr_ctx->lock);
- spin_lock_init(&vinstr_ctx->state_lock);
vinstr_ctx->kbdev = kbdev;
vinstr_ctx->thread = NULL;
- vinstr_ctx->state = VINSTR_IDLE;
- vinstr_ctx->suspend_cnt = 0;
- INIT_WORK(&vinstr_ctx->suspend_work, kbasep_vinstr_suspend_worker);
- INIT_WORK(&vinstr_ctx->resume_work, kbasep_vinstr_resume_worker);
- init_waitqueue_head(&vinstr_ctx->suspend_waitq);
atomic_set(&vinstr_ctx->request_pending, 0);
init_waitqueue_head(&vinstr_ctx->waitq);
@@ -1789,10 +1526,6 @@ void kbase_vinstr_term(struct kbase_vinstr_context *vinstr_ctx)
if (vinstr_ctx->thread)
kthread_stop(vinstr_ctx->thread);
- /* Wait for workers. */
- flush_work(&vinstr_ctx->suspend_work);
- flush_work(&vinstr_ctx->resume_work);
-
while (1) {
struct list_head *list = &vinstr_ctx->idle_clients;
@@ -1925,6 +1658,11 @@ int kbase_vinstr_hwc_dump(struct kbase_vinstr_client *cli,
mutex_lock(&vinstr_ctx->lock);
+ if (vinstr_ctx->suspended) {
+ rcode = -EBUSY;
+ goto exit;
+ }
+
if (event_mask & cli->event_mask) {
rcode = kbasep_vinstr_collect_and_accumulate(
vinstr_ctx,
@@ -1960,6 +1698,11 @@ int kbase_vinstr_hwc_clear(struct kbase_vinstr_client *cli)
mutex_lock(&vinstr_ctx->lock);
+ if (vinstr_ctx->suspended) {
+ rcode = -EBUSY;
+ goto exit;
+ }
+
rcode = kbasep_vinstr_collect_and_accumulate(vinstr_ctx, &unused);
if (rcode)
goto exit;
@@ -1976,66 +1719,40 @@ exit:
return rcode;
}
-int kbase_vinstr_try_suspend(struct kbase_vinstr_context *vinstr_ctx)
+void kbase_vinstr_hwc_suspend(struct kbase_vinstr_context *vinstr_ctx)
{
- unsigned long flags;
- int ret = -EAGAIN;
+ u64 unused;
KBASE_DEBUG_ASSERT(vinstr_ctx);
- spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
- switch (vinstr_ctx->state) {
- case VINSTR_SUSPENDED:
- vinstr_ctx->suspend_cnt++;
- /* overflow shall not happen */
- BUG_ON(0 == vinstr_ctx->suspend_cnt);
- ret = 0;
- break;
-
- case VINSTR_IDLE:
- vinstr_ctx->state = VINSTR_SUSPENDING;
- schedule_work(&vinstr_ctx->suspend_work);
- break;
-
- case VINSTR_DUMPING:
- vinstr_ctx->state = VINSTR_SUSPENDING;
- break;
-
- case VINSTR_SUSPENDING:
- /* fall through */
- case VINSTR_RESUMING:
- break;
-
- default:
- BUG();
- break;
+ mutex_lock(&vinstr_ctx->lock);
+ if (!vinstr_ctx->nclients || vinstr_ctx->suspended) {
+ mutex_unlock(&vinstr_ctx->lock);
+ return;
}
- spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
-
- return ret;
-}
-void kbase_vinstr_suspend(struct kbase_vinstr_context *vinstr_ctx)
-{
- wait_event(vinstr_ctx->suspend_waitq,
- (0 == kbase_vinstr_try_suspend(vinstr_ctx)));
+ kbasep_vinstr_collect_and_accumulate(vinstr_ctx, &unused);
+ vinstr_ctx->suspended = true;
+ vinstr_ctx->suspended_clients = vinstr_ctx->waiting_clients;
+ INIT_LIST_HEAD(&vinstr_ctx->waiting_clients);
+ mutex_unlock(&vinstr_ctx->lock);
}
-void kbase_vinstr_resume(struct kbase_vinstr_context *vinstr_ctx)
+void kbase_vinstr_hwc_resume(struct kbase_vinstr_context *vinstr_ctx)
{
- unsigned long flags;
-
KBASE_DEBUG_ASSERT(vinstr_ctx);
- spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
- BUG_ON(VINSTR_SUSPENDING == vinstr_ctx->state);
- if (VINSTR_SUSPENDED == vinstr_ctx->state) {
- BUG_ON(0 == vinstr_ctx->suspend_cnt);
- vinstr_ctx->suspend_cnt--;
- if (0 == vinstr_ctx->suspend_cnt) {
- vinstr_ctx->state = VINSTR_RESUMING;
- schedule_work(&vinstr_ctx->resume_work);
- }
+ mutex_lock(&vinstr_ctx->lock);
+ if (!vinstr_ctx->nclients || !vinstr_ctx->suspended) {
+ mutex_unlock(&vinstr_ctx->lock);
+ return;
}
- spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
+
+ vinstr_ctx->suspended = false;
+ vinstr_ctx->waiting_clients = vinstr_ctx->suspended_clients;
+ vinstr_ctx->reprogram = true;
+ kbasep_vinstr_reprogram(vinstr_ctx);
+ atomic_set(&vinstr_ctx->request_pending, 1);
+ wake_up_all(&vinstr_ctx->waitq);
+ mutex_unlock(&vinstr_ctx->lock);
}