summaryrefslogtreecommitdiff
path: root/drivers/gpu/arm/midgard/backend
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/arm/midgard/backend')
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.c9
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.h10
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.c13
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_hw.c6
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpu.c7
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpuprops_backend.c14
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_backend.c88
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_defs.h6
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_linux.c8
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_defs.h9
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_hw.c380
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_internal.h4
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.c289
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_affinity.c23
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_affinity.h10
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_backend.c22
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_mmu_hw_direct.c50
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_backend.c3
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.c5
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_defs.h11
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_demand.c5
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_driver.c247
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_internal.h20
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.c63
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_power_model_simple.c15
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_power_model_simple.h6
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.c9
27 files changed, 479 insertions, 853 deletions
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.c
index c6862539c8dd..92a14fa1bae1 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2015-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -16,12 +16,7 @@
#include "backend/gpu/mali_kbase_cache_policy_backend.h"
+#include <backend/gpu/mali_kbase_pm_internal.h>
#include <backend/gpu/mali_kbase_device_internal.h>
-void kbase_cache_set_coherency_mode(struct kbase_device *kbdev,
- u32 mode)
-{
- if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_COHERENCY_REG))
- kbase_reg_write(kbdev, COHERENCY_ENABLE, mode, NULL);
-}
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.h
index fe9869109a82..42069fc88a1f 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.h
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2015-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -22,13 +22,5 @@
#include "mali_kbase.h"
#include "mali_base_kernel.h"
-/**
- * kbase_cache_set_coherency_mode() - Sets the system coherency mode
- * in the GPU.
- * @kbdev: Device pointer
- * @mode: Coherency mode. COHERENCY_ACE/ACE_LITE
- */
-void kbase_cache_set_coherency_mode(struct kbase_device *kbdev,
- u32 mode);
#endif /* _KBASE_CACHE_POLICY_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.c
index ad05fe5bea8d..86227d996257 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -135,14 +135,6 @@ kbase_devfreq_status(struct device *dev, struct devfreq_dev_status *stat)
stat->private_data = NULL;
-#ifdef CONFIG_DEVFREQ_THERMAL
-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
- if (kbdev->devfreq_cooling)
- memcpy(&kbdev->devfreq_cooling->last_status, stat,
- sizeof(*stat));
-#endif
-#endif
-
return 0;
}
@@ -213,8 +205,7 @@ int kbase_devfreq_init(struct kbase_device *kbdev)
dp = &kbdev->devfreq_profile;
dp->initial_freq = kbdev->current_freq;
- /* .KP : set devfreq_dvfs_interval_in_ms */
- dp->polling_ms = 20;
+ dp->polling_ms = 100;
dp->target = kbase_devfreq_target;
dp->get_dev_status = kbase_devfreq_status;
dp->get_cur_freq = kbase_devfreq_cur_freq;
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_hw.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_hw.c
index b9238a305177..83d5ec9f7a93 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_hw.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_hw.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -33,9 +33,7 @@ void kbase_reg_write(struct kbase_device *kbdev, u16 offset, u32 value,
KBASE_DEBUG_ASSERT(kctx == NULL || kctx->as_nr != KBASEP_AS_NR_INVALID);
KBASE_DEBUG_ASSERT(kbdev->dev != NULL);
dev_dbg(kbdev->dev, "w: reg %04x val %08x", offset, value);
-
writel(value, kbdev->reg + offset);
-
if (kctx && kctx->jctx.tb)
kbase_device_trace_register_access(kctx, REG_WRITE, offset,
value);
@@ -50,9 +48,7 @@ u32 kbase_reg_read(struct kbase_device *kbdev, u16 offset,
KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
KBASE_DEBUG_ASSERT(kctx == NULL || kctx->as_nr != KBASEP_AS_NR_INVALID);
KBASE_DEBUG_ASSERT(kbdev->dev != NULL);
-
val = readl(kbdev->reg + offset);
-
dev_dbg(kbdev->dev, "r: reg %04x val %08x", offset, val);
if (kctx && kctx->jctx.tb)
kbase_device_trace_register_access(kctx, REG_READ, offset, val);
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpu.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpu.c
index d578fd78e825..72a98d0f7952 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpu.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpu.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -20,6 +20,7 @@
* Register-based HW access backend APIs
*/
#include <mali_kbase.h>
+#include <mali_kbase_hwaccess_jm.h>
#include <mali_kbase_hwaccess_backend.h>
#include <backend/gpu/mali_kbase_irq_internal.h>
#include <backend/gpu/mali_kbase_jm_internal.h>
@@ -80,6 +81,7 @@ int kbase_backend_late_init(struct kbase_device *kbdev)
if (err)
goto fail_timer;
+/* Currently disabled on the prototype */
#ifdef CONFIG_MALI_DEBUG
#ifndef CONFIG_MALI_NO_MALI
if (kbasep_common_test_interrupt_handlers(kbdev) != 0) {
@@ -99,13 +101,12 @@ int kbase_backend_late_init(struct kbase_device *kbdev)
return 0;
fail_job_slot:
-
+/* Currently disabled on the prototype */
#ifdef CONFIG_MALI_DEBUG
#ifndef CONFIG_MALI_NO_MALI
fail_interrupt_test:
#endif /* !CONFIG_MALI_NO_MALI */
#endif /* CONFIG_MALI_DEBUG */
-
kbase_backend_timer_term(kbdev);
fail_timer:
kbase_hwaccess_pm_halt(kbdev);
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpuprops_backend.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpuprops_backend.c
index d410cd297889..705b1ebfa87f 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpuprops_backend.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpuprops_backend.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -86,20 +86,8 @@ void kbase_backend_gpuprops_get(struct kbase_device *kbdev,
void kbase_backend_gpuprops_get_features(struct kbase_device *kbdev,
struct kbase_gpuprops_regdump *regdump)
{
- if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_COHERENCY_REG)) {
- /* Ensure we can access the GPU registers */
- kbase_pm_register_access_enable(kbdev);
-
- regdump->coherency_features = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(COHERENCY_FEATURES), NULL);
-
- /* We're done accessing the GPU registers for now. */
- kbase_pm_register_access_disable(kbdev);
- } else {
- /* Pre COHERENCY_FEATURES we only supported ACE_LITE */
regdump->coherency_features =
COHERENCY_FEATURE_BIT(COHERENCY_NONE) |
COHERENCY_FEATURE_BIT(COHERENCY_ACE_LITE);
- }
}
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_backend.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_backend.c
index 3f06a10f7fed..2c987071a77c 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_backend.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_backend.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -23,7 +23,6 @@
#include <mali_kbase.h>
#include <mali_midg_regmap.h>
-#include <mali_kbase_hwaccess_instr.h>
#include <backend/gpu/mali_kbase_device_internal.h>
#include <backend/gpu/mali_kbase_pm_internal.h>
#include <backend/gpu/mali_kbase_instr_internal.h>
@@ -41,6 +40,14 @@ static void kbasep_instr_hwcnt_cacheclean(struct kbase_device *kbdev)
u32 irq_mask;
spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+ /* Wait for any reset to complete */
+ while (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_RESETTING) {
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ wait_event(kbdev->hwcnt.backend.cache_clean_wait,
+ kbdev->hwcnt.backend.state !=
+ KBASE_INSTR_STATE_RESETTING);
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+ }
KBASE_DEBUG_ASSERT(kbdev->hwcnt.backend.state ==
KBASE_INSTR_STATE_REQUEST_CLEAN);
@@ -67,14 +74,18 @@ int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev,
{
unsigned long flags, pm_flags;
int err = -EINVAL;
+ struct kbasep_js_device_data *js_devdata;
u32 irq_mask;
int ret;
u64 shader_cores_needed;
- u32 prfcnt_config;
+
+ KBASE_DEBUG_ASSERT(NULL == kbdev->hwcnt.suspended_kctx);
shader_cores_needed = kbase_pm_get_present_cores(kbdev,
KBASE_PM_CORE_SHADER);
+ js_devdata = &kbdev->js_data;
+
/* alignment failure */
if ((setup->dump_buffer == 0ULL) || (setup->dump_buffer & (2048 - 1)))
goto out_err;
@@ -89,6 +100,14 @@ int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev,
spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+ if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_RESETTING) {
+ /* GPU is being reset */
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ wait_event(kbdev->hwcnt.backend.wait,
+ kbdev->hwcnt.backend.triggered != 0);
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+ }
+
if (kbdev->hwcnt.backend.state != KBASE_INSTR_STATE_DISABLED) {
/* Instrumentation is already enabled */
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
@@ -106,6 +125,10 @@ int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev,
kbdev->hwcnt.kctx = kctx;
/* Remember the dump address so we can reprogram it later */
kbdev->hwcnt.addr = setup->dump_buffer;
+ /* Remember all the settings for suspend/resume */
+ if (&kbdev->hwcnt.suspended_state != setup)
+ memcpy(&kbdev->hwcnt.suspended_state, setup,
+ sizeof(kbdev->hwcnt.suspended_state));
/* Request the clean */
kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_REQUEST_CLEAN;
@@ -128,22 +151,9 @@ int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev,
kbase_pm_request_l2_caches(kbdev);
/* Configure */
- prfcnt_config = kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT;
-#ifdef CONFIG_MALI_PRFCNT_SET_SECONDARY
- {
- u32 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
- u32 product_id = (gpu_id & GPU_ID_VERSION_PRODUCT_ID)
- >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
- int arch_v6 = GPU_ID_IS_NEW_FORMAT(product_id);
-
- if (arch_v6)
- prfcnt_config |= 1 << PRFCNT_CONFIG_SETSELECT_SHIFT;
- }
-#endif
-
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG),
- prfcnt_config | PRFCNT_CONFIG_MODE_OFF, kctx);
-
+ (kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT)
+ | PRFCNT_CONFIG_MODE_OFF, kctx);
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO),
setup->dump_buffer & 0xFFFFFFFF, kctx);
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI),
@@ -164,7 +174,8 @@ int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev,
setup->tiler_bm, kctx);
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG),
- prfcnt_config | PRFCNT_CONFIG_MODE_MANUAL, kctx);
+ (kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT) |
+ PRFCNT_CONFIG_MODE_MANUAL, kctx);
/* If HW has PRLAM-8186 we can now re-enable the tiler HW counters dump
*/
@@ -174,6 +185,14 @@ int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev,
spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+ if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_RESETTING) {
+ /* GPU is being reset */
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ wait_event(kbdev->hwcnt.backend.wait,
+ kbdev->hwcnt.backend.triggered != 0);
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+ }
+
kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
kbdev->hwcnt.backend.triggered = 1;
wake_up(&kbdev->hwcnt.backend.wait);
@@ -340,11 +359,15 @@ void kbasep_cache_clean_worker(struct work_struct *data)
spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
/* Wait for our condition, and any reset to complete */
- while (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_CLEANING) {
+ while (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_RESETTING ||
+ kbdev->hwcnt.backend.state ==
+ KBASE_INSTR_STATE_CLEANING) {
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
wait_event(kbdev->hwcnt.backend.cache_clean_wait,
+ (kbdev->hwcnt.backend.state !=
+ KBASE_INSTR_STATE_RESETTING &&
kbdev->hwcnt.backend.state !=
- KBASE_INSTR_STATE_CLEANING);
+ KBASE_INSTR_STATE_CLEANING));
spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
}
KBASE_DEBUG_ASSERT(kbdev->hwcnt.backend.state ==
@@ -377,6 +400,9 @@ void kbase_instr_hwcnt_sample_done(struct kbase_device *kbdev)
&kbdev->hwcnt.backend.cache_clean_work);
KBASE_DEBUG_ASSERT(ret);
}
+ /* NOTE: In the state KBASE_INSTR_STATE_RESETTING, We're in a reset,
+ * and the instrumentation state hasn't been restored yet -
+ * kbasep_reset_timeout_worker() will do the rest of the work */
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
}
@@ -404,6 +430,10 @@ void kbase_clean_caches_done(struct kbase_device *kbdev)
kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_CLEANED;
wake_up(&kbdev->hwcnt.backend.cache_clean_wait);
}
+ /* NOTE: In the state KBASE_INSTR_STATE_RESETTING, We're in a
+ * reset, and the instrumentation state hasn't been restored yet
+ * - kbasep_reset_timeout_worker() will do the rest of the work
+ */
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
}
@@ -421,6 +451,14 @@ int kbase_instr_hwcnt_wait_for_dump(struct kbase_context *kctx)
spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+ if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_RESETTING) {
+ /* GPU is being reset */
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ wait_event(kbdev->hwcnt.backend.wait,
+ kbdev->hwcnt.backend.triggered != 0);
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+ }
+
if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_FAULT) {
err = -EINVAL;
kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
@@ -444,6 +482,14 @@ int kbase_instr_hwcnt_clear(struct kbase_context *kctx)
spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+ if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_RESETTING) {
+ /* GPU is being reset */
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ wait_event(kbdev->hwcnt.backend.wait,
+ kbdev->hwcnt.backend.triggered != 0);
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+ }
+
/* Check it's the context previously set up and we're not already
* dumping */
if (kbdev->hwcnt.kctx != kctx || kbdev->hwcnt.backend.state !=
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_defs.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_defs.h
index 4794672da8f0..23bd80a5a150 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_defs.h
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_defs.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014, 2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -39,6 +39,10 @@ enum kbase_instr_state {
/* Cache clean completed, and either a) a dump is complete, or
* b) instrumentation can now be setup. */
KBASE_INSTR_STATE_CLEANED,
+ /* kbasep_reset_timeout_worker() has started (but not compelted) a
+ * reset. This generally indicates the current action should be aborted,
+ * and kbasep_reset_timeout_worker() will handle the cleanup */
+ KBASE_INSTR_STATE_RESETTING,
/* An error has occured during DUMPING (page fault). */
KBASE_INSTR_STATE_FAULT
};
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_linux.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_linux.c
index b891b12a3299..49c72f90aac6 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_linux.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_linux.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -28,6 +28,7 @@
#define MMU_IRQ_TAG 1
#define GPU_IRQ_TAG 2
+
static void *kbase_tag(void *ptr, u32 tag)
{
return (void *)(((uintptr_t) ptr) | tag);
@@ -38,6 +39,9 @@ static void *kbase_untag(void *ptr)
return (void *)(((uintptr_t) ptr) & ~3);
}
+
+
+
static irqreturn_t kbase_job_irq_handler(int irq, void *data)
{
unsigned long flags;
@@ -147,13 +151,13 @@ static irqreturn_t kbase_gpu_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-
static irq_handler_t kbase_handler_table[] = {
[JOB_IRQ_TAG] = kbase_job_irq_handler,
[MMU_IRQ_TAG] = kbase_mmu_irq_handler,
[GPU_IRQ_TAG] = kbase_gpu_irq_handler,
};
+
#ifdef CONFIG_MALI_DEBUG
#define JOB_IRQ_HANDLER JOB_IRQ_TAG
#define MMU_IRQ_HANDLER MMU_IRQ_TAG
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_defs.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_defs.h
index 83d477898c5e..8ccc440171a2 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_defs.h
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_defs.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -71,7 +71,6 @@ struct slot_rb {
* @reset_work: Work item for performing the reset
* @reset_wait: Wait event signalled when the reset is complete
* @reset_timer: Timeout for soft-stops before the reset
- * @timeouts_updated: Have timeout values just been updated?
*
* The kbasep_js_device_data::runpool_irq::lock (a spinlock) must be held when
* accessing this structure
@@ -98,15 +97,11 @@ struct kbase_backend_data {
/* The GPU reset process is currently occuring (timeout has expired or
* kbasep_try_reset_gpu_early was called) */
#define KBASE_RESET_GPU_HAPPENING 3
-/* Reset the GPU silently, used when resetting the GPU as part of normal
- * behavior (e.g. when exiting protected mode). */
-#define KBASE_RESET_GPU_SILENT 4
+
struct workqueue_struct *reset_workq;
struct work_struct reset_work;
wait_queue_head_t reset_wait;
struct hrtimer reset_timer;
-
- bool timeouts_updated;
};
/**
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_hw.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_hw.c
index 00900a99a898..33d6aef0ec72 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_hw.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_hw.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -15,6 +15,8 @@
+
+
/*
* Base kernel job manager APIs
*/
@@ -25,9 +27,11 @@
#if defined(CONFIG_MALI_GATOR_SUPPORT)
#include <mali_kbase_gator.h>
#endif
+#if defined(CONFIG_MALI_MIPE_ENABLED)
#include <mali_kbase_tlstream.h>
-#include <mali_kbase_vinstr.h>
+#endif
#include <mali_kbase_hw.h>
+#include <mali_kbase_config_defaults.h>
#include <mali_kbase_hwaccess_jm.h>
#include <backend/gpu/mali_kbase_device_internal.h>
#include <backend/gpu/mali_kbase_irq_internal.h>
@@ -83,31 +87,14 @@ void kbase_job_hw_submit(struct kbase_device *kbdev,
* start */
cfg = kctx->as_nr;
- if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_FLUSH_REDUCTION))
- cfg |= JS_CONFIG_ENABLE_FLUSH_REDUCTION;
-
#ifndef CONFIG_MALI_COH_GPU
- if (0 != (katom->core_req & BASE_JD_REQ_SKIP_CACHE_START))
- cfg |= JS_CONFIG_START_FLUSH_NO_ACTION;
- else
- cfg |= JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE;
-
- if (0 != (katom->core_req & BASE_JD_REQ_SKIP_CACHE_END))
- cfg |= JS_CONFIG_END_FLUSH_NO_ACTION;
- else
- cfg |= JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE;
-#endif /* CONFIG_MALI_COH_GPU */
-
- if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10649) ||
- !kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_T76X_3982))
- cfg |= JS_CONFIG_START_MMU;
+ cfg |= JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE;
+ cfg |= JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE;
+#endif
+ cfg |= JS_CONFIG_START_MMU;
cfg |= JS_CONFIG_THREAD_PRI(8);
- if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE) &&
- (katom->atom_flags & KBASE_KATOM_FLAG_PROTECTED))
- cfg |= JS_CONFIG_DISABLE_DESCRIPTOR_WR_BK;
-
if (kbase_hw_has_feature(kbdev,
BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) {
if (!kbdev->hwaccess.backend.slot_rb[js].job_chain_flag) {
@@ -124,9 +111,6 @@ void kbase_job_hw_submit(struct kbase_device *kbdev,
kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_CONFIG_NEXT), cfg, kctx);
- if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_FLUSH_REDUCTION))
- kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_FLUSH_ID_NEXT),
- katom->flush_id, kctx);
/* Write an approximate start timestamp.
* It's approximate because there might be a job in the HEAD register.
@@ -146,6 +130,7 @@ void kbase_job_hw_submit(struct kbase_device *kbdev,
GATOR_MAKE_EVENT(GATOR_JOB_SLOT_START, js),
kctx, kbase_jd_atom_id(kctx, katom));
#endif
+#if defined(CONFIG_MALI_MIPE_ENABLED)
kbase_tlstream_tl_attrib_atom_config(katom, jc_head,
katom->affinity, cfg);
kbase_tlstream_tl_ret_ctx_lpu(
@@ -157,6 +142,7 @@ void kbase_job_hw_submit(struct kbase_device *kbdev,
katom,
&kbdev->gpu_props.props.raw_props.js_features[js],
"ctx_nr,atom_nr");
+#endif
#ifdef CONFIG_GPU_TRACEPOINTS
if (!kbase_backend_nr_atoms_submitted(kbdev, js)) {
/* If this is the only job on the slot, trace it as starting */
@@ -218,24 +204,6 @@ static void kbasep_job_slot_update_head_start_timestamp(
}
}
-/**
- * kbasep_trace_tl_nret_atom_lpu - Call nret_atom_lpu timeline tracepoint
- * @kbdev: kbase device
- * @i: job slot
- *
- * Get kbase atom by calling kbase_gpu_inspect for given job slot.
- * Then use obtained katom and name of slot associated with the given
- * job slot number in tracepoint call to the instrumentation module
- * informing that given atom is no longer executed on given lpu (job slot).
- */
-static void kbasep_trace_tl_nret_atom_lpu(struct kbase_device *kbdev, int i)
-{
- struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, i, 0);
-
- kbase_tlstream_tl_nret_atom_lpu(katom,
- &kbdev->gpu_props.props.raw_props.js_features[i]);
-}
-
void kbase_job_done(struct kbase_device *kbdev, u32 done)
{
unsigned long flags;
@@ -296,12 +264,9 @@ void kbase_job_done(struct kbase_device *kbdev, u32 done)
GATOR_JOB_SLOT_SOFT_STOPPED, i),
NULL, 0);
#endif
-
+#if defined(CONFIG_MALI_MIPE_ENABLED)
kbase_tlstream_aux_job_softstop(i);
-
- kbasep_trace_tl_nret_atom_lpu(
- kbdev, i);
-
+#endif
/* Soft-stopped job - read the value of
* JS<n>_TAIL so that the job chain can
* be resumed */
@@ -472,21 +437,19 @@ void kbase_job_done(struct kbase_device *kbdev, u32 done)
KBASE_EXPORT_TEST_API(kbase_job_done);
static bool kbasep_soft_stop_allowed(struct kbase_device *kbdev,
- struct kbase_jd_atom *katom)
+ u16 core_reqs)
{
bool soft_stops_allowed = true;
- if (kbase_jd_katom_is_protected(katom)) {
- soft_stops_allowed = false;
- } else if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408)) {
- if ((katom->core_req & BASE_JD_REQ_T) != 0)
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408)) {
+ if ((core_reqs & BASE_JD_REQ_T) != 0)
soft_stops_allowed = false;
}
return soft_stops_allowed;
}
static bool kbasep_hard_stop_allowed(struct kbase_device *kbdev,
- base_jd_core_req core_reqs)
+ u16 core_reqs)
{
bool hard_stops_allowed = true;
@@ -500,7 +463,7 @@ static bool kbasep_hard_stop_allowed(struct kbase_device *kbdev,
void kbasep_job_slot_soft_or_hard_stop_do_action(struct kbase_device *kbdev,
int js,
u32 action,
- base_jd_core_req core_reqs,
+ u16 core_reqs,
struct kbase_jd_atom *target_katom)
{
struct kbase_context *kctx = target_katom->kctx;
@@ -523,13 +486,12 @@ void kbasep_job_slot_soft_or_hard_stop_do_action(struct kbase_device *kbdev,
if (action == JS_COMMAND_SOFT_STOP) {
bool soft_stop_allowed = kbasep_soft_stop_allowed(kbdev,
- target_katom);
+ core_reqs);
if (!soft_stop_allowed) {
#ifdef CONFIG_MALI_DEBUG
- dev_dbg(kbdev->dev,
- "Attempt made to soft-stop a job that cannot be soft-stopped. core_reqs = 0x%X",
- (unsigned int)core_reqs);
+ dev_dbg(kbdev->dev, "Attempt made to soft-stop a job that cannot be soft-stopped. core_reqs = 0x%X",
+ (unsigned int)core_reqs);
#endif /* CONFIG_MALI_DEBUG */
return;
}
@@ -537,51 +499,9 @@ void kbasep_job_slot_soft_or_hard_stop_do_action(struct kbase_device *kbdev,
/* We are about to issue a soft stop, so mark the atom as having
* been soft stopped */
target_katom->atom_flags |= KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED;
+ }
- /* Mark the point where we issue the soft-stop command */
- kbase_tlstream_aux_issue_job_softstop(target_katom);
-
- if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316)) {
- int i;
-
- for (i = 0;
- i < kbase_backend_nr_atoms_submitted(kbdev, js);
- i++) {
- struct kbase_jd_atom *katom;
-
- katom = kbase_gpu_inspect(kbdev, js, i);
-
- KBASE_DEBUG_ASSERT(katom);
-
- /* For HW_ISSUE_8316, only 'bad' jobs attacking
- * the system can cause this issue: normally,
- * all memory should be allocated in multiples
- * of 4 pages, and growable memory should be
- * changed size in multiples of 4 pages.
- *
- * Whilst such 'bad' jobs can be cleared by a
- * GPU reset, the locking up of a uTLB entry
- * caused by the bad job could also stall other
- * ASs, meaning that other ASs' jobs don't
- * complete in the 'grace' period before the
- * reset. We don't want to lose other ASs' jobs
- * when they would normally complete fine, so we
- * must 'poke' the MMU regularly to help other
- * ASs complete */
- kbase_as_poking_timer_retain_atom(
- kbdev, katom->kctx, katom);
- }
- }
-
- if (kbase_hw_has_feature(
- kbdev,
- BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) {
- action = (target_katom->atom_flags &
- KBASE_KATOM_FLAGS_JOBCHAIN) ?
- JS_COMMAND_SOFT_STOP_1 :
- JS_COMMAND_SOFT_STOP_0;
- }
- } else if (action == JS_COMMAND_HARD_STOP) {
+ if (action == JS_COMMAND_HARD_STOP) {
bool hard_stop_allowed = kbasep_hard_stop_allowed(kbdev,
core_reqs);
@@ -605,21 +525,55 @@ void kbasep_job_slot_soft_or_hard_stop_do_action(struct kbase_device *kbdev,
* hard-stop fails, so it is safe to just return and
* ignore the hard-stop request.
*/
- dev_warn(kbdev->dev,
- "Attempt made to hard-stop a job that cannot be hard-stopped. core_reqs = 0x%X",
- (unsigned int)core_reqs);
+ dev_warn(kbdev->dev, "Attempt made to hard-stop a job that cannot be hard-stopped. core_reqs = 0x%X",
+ (unsigned int)core_reqs);
return;
}
target_katom->atom_flags |= KBASE_KATOM_FLAG_BEEN_HARD_STOPPED;
+ }
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316) &&
+ action == JS_COMMAND_SOFT_STOP) {
+ int i;
- if (kbase_hw_has_feature(
- kbdev,
+ for (i = 0; i < kbase_backend_nr_atoms_submitted(kbdev, js);
+ i++) {
+ struct kbase_jd_atom *katom;
+
+ katom = kbase_gpu_inspect(kbdev, js, i);
+
+ KBASE_DEBUG_ASSERT(katom);
+
+ /* For HW_ISSUE_8316, only 'bad' jobs attacking the
+ * system can cause this issue: normally, all memory
+ * should be allocated in multiples of 4 pages, and
+ * growable memory should be changed size in multiples
+ * of 4 pages.
+ *
+ * Whilst such 'bad' jobs can be cleared by a GPU reset,
+ * the locking up of a uTLB entry caused by the bad job
+ * could also stall other ASs, meaning that other ASs'
+ * jobs don't complete in the 'grace' period before the
+ * reset. We don't want to lose other ASs' jobs when
+ * they would normally complete fine, so we must 'poke'
+ * the MMU regularly to help other ASs complete */
+ kbase_as_poking_timer_retain_atom(kbdev, katom->kctx,
+ katom);
+ }
+ }
+
+ if (kbase_hw_has_feature(kbdev,
BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) {
+ if (action == JS_COMMAND_SOFT_STOP)
action = (target_katom->atom_flags &
- KBASE_KATOM_FLAGS_JOBCHAIN) ?
- JS_COMMAND_HARD_STOP_1 :
- JS_COMMAND_HARD_STOP_0;
- }
+ KBASE_KATOM_FLAGS_JOBCHAIN) ?
+ JS_COMMAND_SOFT_STOP_1 :
+ JS_COMMAND_SOFT_STOP_0;
+ else
+ action = (target_katom->atom_flags &
+ KBASE_KATOM_FLAGS_JOBCHAIN) ?
+ JS_COMMAND_HARD_STOP_1 :
+ JS_COMMAND_HARD_STOP_0;
}
kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_COMMAND), action, kctx);
@@ -745,6 +699,7 @@ void kbase_job_slot_ctx_priority_check_locked(struct kbase_context *kctx,
struct kbase_jd_atom *target_katom)
{
struct kbase_device *kbdev;
+ struct kbasep_js_device_data *js_devdata;
int js = target_katom->slot_nr;
int priority = target_katom->sched_priority;
int i;
@@ -752,6 +707,7 @@ void kbase_job_slot_ctx_priority_check_locked(struct kbase_context *kctx,
KBASE_DEBUG_ASSERT(kctx != NULL);
kbdev = kctx->kbdev;
KBASE_DEBUG_ASSERT(kbdev != NULL);
+ js_devdata = &kbdev->js_data;
lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
@@ -869,13 +825,6 @@ u32 kbase_backend_get_current_flush_id(struct kbase_device *kbdev)
{
u32 flush_id = 0;
- if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_FLUSH_REDUCTION)) {
- mutex_lock(&kbdev->pm.lock);
- if (kbdev->pm.backend.gpu_powered)
- flush_id = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(LATEST_FLUSH), NULL);
- mutex_unlock(&kbdev->pm.lock);
- }
return flush_id;
}
@@ -1083,7 +1032,7 @@ void kbase_job_slot_hardstop(struct kbase_context *kctx, int js,
* state when the soft/hard-stop action is complete
*/
void kbase_job_check_enter_disjoint(struct kbase_device *kbdev, u32 action,
- base_jd_core_req core_reqs, struct kbase_jd_atom *target_katom)
+ u16 core_reqs, struct kbase_jd_atom *target_katom)
{
u32 hw_action = action & JS_COMMAND_MASK;
@@ -1095,7 +1044,7 @@ void kbase_job_check_enter_disjoint(struct kbase_device *kbdev, u32 action,
/* For soft-stop, don't enter if soft-stop not allowed, or isn't
* causing disjoint */
if (hw_action == JS_COMMAND_SOFT_STOP &&
- !(kbasep_soft_stop_allowed(kbdev, target_katom) &&
+ !(kbasep_soft_stop_allowed(kbdev, core_reqs) &&
(action & JS_COMMAND_SW_CAUSES_DISJOINT)))
return;
@@ -1160,6 +1109,26 @@ static void kbase_debug_dump_registers(struct kbase_device *kbdev)
kbase_reg_read(kbdev, GPU_CONTROL_REG(L2_MMU_CONFIG), NULL));
}
+static void kbasep_save_hwcnt_setup(struct kbase_device *kbdev,
+ struct kbase_context *kctx,
+ struct kbase_uk_hwcnt_setup *hwcnt_setup)
+{
+ hwcnt_setup->dump_buffer =
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO), kctx) &
+ 0xffffffff;
+ hwcnt_setup->dump_buffer |= (u64)
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI), kctx) <<
+ 32;
+ hwcnt_setup->jm_bm =
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_JM_EN), kctx);
+ hwcnt_setup->shader_bm =
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_SHADER_EN), kctx);
+ hwcnt_setup->tiler_bm =
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), kctx);
+ hwcnt_setup->mmu_l2_bm =
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_MMU_L2_EN), kctx);
+}
+
static void kbasep_reset_timeout_worker(struct work_struct *data)
{
unsigned long flags, mmu_flags;
@@ -1167,8 +1136,10 @@ static void kbasep_reset_timeout_worker(struct work_struct *data)
int i;
ktime_t end_timestamp = ktime_get();
struct kbasep_js_device_data *js_devdata;
+ struct kbase_uk_hwcnt_setup hwcnt_setup = { {0} };
+ enum kbase_instr_state bckp_state;
bool try_schedule = false;
- bool silent = false;
+ bool restore_hwc = false;
KBASE_DEBUG_ASSERT(data);
@@ -1178,16 +1149,8 @@ static void kbasep_reset_timeout_worker(struct work_struct *data)
KBASE_DEBUG_ASSERT(kbdev);
js_devdata = &kbdev->js_data;
- if (atomic_read(&kbdev->hwaccess.backend.reset_gpu) ==
- KBASE_RESET_GPU_SILENT)
- silent = true;
-
KBASE_TRACE_ADD(kbdev, JM_BEGIN_RESET_WORKER, NULL, NULL, 0u, 0);
- /* Suspend vinstr.
- * This call will block until vinstr is suspended. */
- kbase_vinstr_suspend(kbdev->vinstr_ctx);
-
/* Make sure the timer has completed - this cannot be done from
* interrupt context, so this cannot be done within
* kbasep_try_reset_gpu_early. */
@@ -1237,14 +1200,39 @@ static void kbasep_reset_timeout_worker(struct work_struct *data)
* assume that anything that is still left on the GPU is stuck there and
* we'll kill it when we reset the GPU */
- if (!silent)
- dev_err(kbdev->dev, "Resetting GPU (allowing up to %d ms)",
+ dev_err(kbdev->dev, "Resetting GPU (allowing up to %d ms)",
RESET_TIMEOUT);
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+ if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_RESETTING) {
+ /* the same interrupt handler preempted itself */
+ /* GPU is being reset */
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ wait_event(kbdev->hwcnt.backend.wait,
+ kbdev->hwcnt.backend.triggered != 0);
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+ }
+ /* Save the HW counters setup */
+ if (kbdev->hwcnt.kctx != NULL) {
+ struct kbase_context *kctx = kbdev->hwcnt.kctx;
+
+ if (kctx->jctx.sched_info.ctx.is_scheduled) {
+ kbasep_save_hwcnt_setup(kbdev, kctx, &hwcnt_setup);
+
+ restore_hwc = true;
+ }
+ }
+
/* Output the state of some interesting registers to help in the
* debugging of GPU resets */
- if (!silent)
- kbase_debug_dump_registers(kbdev);
+ kbase_debug_dump_registers(kbdev);
+
+ bckp_state = kbdev->hwcnt.backend.state;
+ kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_RESETTING;
+ kbdev->hwcnt.backend.triggered = 0;
+
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
/* Reset the GPU */
kbase_pm_init_hw(kbdev, 0);
@@ -1284,14 +1272,101 @@ static void kbasep_reset_timeout_worker(struct work_struct *data)
kbase_disjoint_state_down(kbdev);
wake_up(&kbdev->hwaccess.backend.reset_wait);
- if (!silent)
- dev_err(kbdev->dev, "Reset complete");
+ dev_err(kbdev->dev, "Reset complete");
if (js_devdata->nr_contexts_pullable > 0 && !kbdev->poweroff_pending)
try_schedule = true;
mutex_unlock(&js_devdata->runpool_mutex);
+ spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+ /* Restore the HW counters setup */
+ if (restore_hwc) {
+ struct kbase_context *kctx = kbdev->hwcnt.kctx;
+
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG),
+ (kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT) |
+ PRFCNT_CONFIG_MODE_OFF, kctx);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO),
+ hwcnt_setup.dump_buffer & 0xFFFFFFFF, kctx);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI),
+ hwcnt_setup.dump_buffer >> 32, kctx);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_JM_EN),
+ hwcnt_setup.jm_bm, kctx);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_SHADER_EN),
+ hwcnt_setup.shader_bm, kctx);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_MMU_L2_EN),
+ hwcnt_setup.mmu_l2_bm, kctx);
+
+ /* Due to PRLAM-8186 we need to disable the Tiler before we
+ * enable the HW counter dump. */
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186))
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN),
+ 0, kctx);
+ else
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN),
+ hwcnt_setup.tiler_bm, kctx);
+
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG),
+ (kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT) |
+ PRFCNT_CONFIG_MODE_MANUAL, kctx);
+
+ /* If HW has PRLAM-8186 we can now re-enable the tiler HW
+ * counters dump */
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186))
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN),
+ hwcnt_setup.tiler_bm, kctx);
+ }
+ kbdev->hwcnt.backend.state = bckp_state;
+ switch (kbdev->hwcnt.backend.state) {
+ /* Cases for waking kbasep_cache_clean_worker worker */
+ case KBASE_INSTR_STATE_CLEANED:
+ /* Cache-clean IRQ occurred, but we reset:
+ * Wakeup incase the waiter saw RESETTING */
+ case KBASE_INSTR_STATE_REQUEST_CLEAN:
+ /* After a clean was requested, but before the regs were
+ * written:
+ * Wakeup incase the waiter saw RESETTING */
+ wake_up(&kbdev->hwcnt.backend.cache_clean_wait);
+ break;
+ case KBASE_INSTR_STATE_CLEANING:
+ /* Either:
+ * 1) We've not got the Cache-clean IRQ yet: it was lost, or:
+ * 2) We got it whilst resetting: it was voluntarily lost
+ *
+ * So, move to the next state and wakeup: */
+ kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_CLEANED;
+ wake_up(&kbdev->hwcnt.backend.cache_clean_wait);
+ break;
+
+ /* Cases for waking anyone else */
+ case KBASE_INSTR_STATE_DUMPING:
+ /* If dumping, abort the dump, because we may've lost the IRQ */
+ kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
+ kbdev->hwcnt.backend.triggered = 1;
+ wake_up(&kbdev->hwcnt.backend.wait);
+ break;
+ case KBASE_INSTR_STATE_DISABLED:
+ case KBASE_INSTR_STATE_IDLE:
+ case KBASE_INSTR_STATE_FAULT:
+ /* Every other reason: wakeup in that state */
+ kbdev->hwcnt.backend.triggered = 1;
+ wake_up(&kbdev->hwcnt.backend.wait);
+ break;
+
+ /* Unhandled cases */
+ case KBASE_INSTR_STATE_RESETTING:
+ default:
+ BUG();
+ break;
+ }
+ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+
+ /* Resume the vinstr core */
+ kbase_vinstr_hwc_resume(kbdev->vinstr_ctx);
+
+ /* Note: counter dumping may now resume */
+
mutex_lock(&kbdev->pm.lock);
/* Find out what cores are required now */
@@ -1311,10 +1386,6 @@ static void kbasep_reset_timeout_worker(struct work_struct *data)
}
kbase_pm_context_idle(kbdev);
-
- /* Release vinstr */
- kbase_vinstr_resume(kbdev->vinstr_ctx);
-
KBASE_TRACE_ADD(kbdev, JM_END_RESET_WORKER, NULL, NULL, 0u, 0);
}
@@ -1396,7 +1467,7 @@ static void kbasep_try_reset_gpu_early(struct kbase_device *kbdev)
*
* Return:
* The function returns a boolean which should be interpreted as follows:
- * true - Prepared for reset, kbase_reset_gpu_locked should be called.
+ * true - Prepared for reset, kbase_reset_gpu should be called.
* false - Another thread is performing a reset, kbase_reset_gpu should
* not be called.
*/
@@ -1490,29 +1561,4 @@ void kbase_reset_gpu_locked(struct kbase_device *kbdev)
/* Try resetting early */
kbasep_try_reset_gpu_early_locked(kbdev);
}
-
-void kbase_reset_gpu_silent(struct kbase_device *kbdev)
-{
- if (atomic_cmpxchg(&kbdev->hwaccess.backend.reset_gpu,
- KBASE_RESET_GPU_NOT_PENDING,
- KBASE_RESET_GPU_SILENT) !=
- KBASE_RESET_GPU_NOT_PENDING) {
- /* Some other thread is already resetting the GPU */
- return;
- }
-
- kbase_disjoint_state_up(kbdev);
-
- queue_work(kbdev->hwaccess.backend.reset_workq,
- &kbdev->hwaccess.backend.reset_work);
-}
-
-bool kbase_reset_gpu_active(struct kbase_device *kbdev)
-{
- if (atomic_read(&kbdev->hwaccess.backend.reset_gpu) ==
- KBASE_RESET_GPU_NOT_PENDING)
- return false;
-
- return true;
-}
#endif /* KBASE_GPU_RESET_EN */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_internal.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_internal.h
index 8f1e5615ea43..eb068d40283b 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_internal.h
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_internal.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -96,7 +96,7 @@ void kbase_job_hw_submit(struct kbase_device *kbdev,
void kbasep_job_slot_soft_or_hard_stop_do_action(struct kbase_device *kbdev,
int js,
u32 action,
- base_jd_core_req core_reqs,
+ u16 core_reqs,
struct kbase_jd_atom *target_katom);
/**
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.c
index da7c4df7d277..c0168c74f815 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -24,11 +24,11 @@
#include <mali_kbase_hwaccess_jm.h>
#include <mali_kbase_jm.h>
#include <mali_kbase_js.h>
-#include <mali_kbase_tlstream.h>
#include <mali_kbase_10969_workaround.h>
#include <backend/gpu/mali_kbase_device_internal.h>
#include <backend/gpu/mali_kbase_jm_internal.h>
#include <backend/gpu/mali_kbase_js_affinity.h>
+#include <backend/gpu/mali_kbase_js_internal.h>
#include <backend/gpu/mali_kbase_pm_internal.h>
/* Return whether the specified ringbuffer is empty. HW access lock must be
@@ -592,7 +592,7 @@ static void kbase_gpu_release_atom(struct kbase_device *kbdev,
case KBASE_ATOM_GPU_RB_READY:
/* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
- case KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_ENTRY:
+ case KBASE_ATOM_GPU_RB_WAITING_SECURE_MODE:
/* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
case KBASE_ATOM_GPU_RB_WAITING_AFFINITY:
@@ -603,9 +603,6 @@ static void kbase_gpu_release_atom(struct kbase_device *kbdev,
case KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE:
break;
- case KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_EXIT:
- /* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
-
case KBASE_ATOM_GPU_RB_WAITING_BLOCKED:
/* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
@@ -657,145 +654,53 @@ static inline bool kbase_gpu_rmu_workaround(struct kbase_device *kbdev, int js)
return true;
}
-static inline bool kbase_gpu_in_protected_mode(struct kbase_device *kbdev)
+static bool kbase_gpu_in_secure_mode(struct kbase_device *kbdev)
{
- return kbdev->protected_mode;
+ return kbdev->secure_mode;
}
-static int kbase_gpu_protected_mode_enter(struct kbase_device *kbdev)
+static int kbase_gpu_secure_mode_enable(struct kbase_device *kbdev)
{
int err = -EINVAL;
lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
- WARN_ONCE(!kbdev->protected_ops,
- "Cannot enter protected mode: protected callbacks not specified.\n");
+ WARN_ONCE(!kbdev->secure_ops,
+ "Cannot enable secure mode: secure callbacks not specified.\n");
- if (kbdev->protected_ops) {
- /* Switch GPU to protected mode */
- err = kbdev->protected_ops->protected_mode_enter(kbdev);
+ if (kbdev->secure_ops) {
+ /* Switch GPU to secure mode */
+ err = kbdev->secure_ops->secure_mode_enable(kbdev);
if (err)
- dev_warn(kbdev->dev, "Failed to enable protected mode: %d\n",
- err);
+ dev_warn(kbdev->dev, "Failed to enable secure mode: %d\n", err);
else
- kbdev->protected_mode = true;
+ kbdev->secure_mode = true;
}
return err;
}
-static int kbase_gpu_protected_mode_reset(struct kbase_device *kbdev)
-{
- lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
-
- WARN_ONCE(!kbdev->protected_ops,
- "Cannot exit protected mode: protected callbacks not specified.\n");
-
- if (!kbdev->protected_ops)
- return -EINVAL;
-
- kbdev->protected_mode_transition = true;
- kbase_reset_gpu_silent(kbdev);
-
- return 0;
-}
-
-static int kbase_jm_exit_protected_mode(struct kbase_device *kbdev,
- struct kbase_jd_atom **katom, int idx, int js)
+static int kbase_gpu_secure_mode_disable(struct kbase_device *kbdev)
{
- int err = 0;
-
- switch (katom[idx]->exit_protected_state) {
- case KBASE_ATOM_EXIT_PROTECTED_CHECK:
- /*
- * If the atom ahead of this one hasn't got to being
- * submitted yet then bail.
- */
- if (idx == 1 &&
- (katom[0]->gpu_rb_state != KBASE_ATOM_GPU_RB_SUBMITTED &&
- katom[0]->gpu_rb_state != KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB))
- return -EAGAIN;
-
- /* If we're not exiting protected mode then we're done here. */
- if (!(kbase_gpu_in_protected_mode(kbdev) &&
- !kbase_jd_katom_is_protected(katom[idx])))
- return 0;
-
- /*
- * If there is a transition in progress, or work still
- * on the GPU try again later.
- */
- if (kbdev->protected_mode_transition ||
- kbase_gpu_atoms_submitted_any(kbdev))
- return -EAGAIN;
-
- /*
- * Exiting protected mode requires a reset, but first the L2
- * needs to be powered down to ensure it's not active when the
- * reset is issued.
- */
- katom[idx]->exit_protected_state =
- KBASE_ATOM_EXIT_PROTECTED_IDLE_L2;
-
- /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
-
- case KBASE_ATOM_EXIT_PROTECTED_IDLE_L2:
- if (kbase_pm_get_active_cores(kbdev, KBASE_PM_CORE_L2) ||
- kbase_pm_get_trans_cores(kbdev, KBASE_PM_CORE_L2)) {
- /*
- * The L2 is still powered, wait for all the users to
- * finish with it before doing the actual reset.
- */
- return -EAGAIN;
- }
- katom[idx]->exit_protected_state =
- KBASE_ATOM_EXIT_PROTECTED_RESET;
-
- /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
-
- case KBASE_ATOM_EXIT_PROTECTED_RESET:
- /* Issue the reset to the GPU */
- err = kbase_gpu_protected_mode_reset(kbdev);
- if (err) {
- /* Failed to exit protected mode, fail atom */
- katom[idx]->event_code = BASE_JD_EVENT_JOB_INVALID;
- kbase_gpu_mark_atom_for_return(kbdev, katom[idx]);
- /* Only return if head atom or previous atom
- * already removed - as atoms must be returned
- * in order */
- if (idx == 0 || katom[0]->gpu_rb_state ==
- KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
- kbase_gpu_dequeue_atom(kbdev, js, NULL);
- kbase_jm_return_atom_to_js(kbdev, katom[idx]);
- }
-
- kbase_vinstr_resume(kbdev->vinstr_ctx);
-
- return -EINVAL;
- }
+ int err = -EINVAL;
- katom[idx]->exit_protected_state =
- KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT;
+ lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
- /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+ WARN_ONCE(!kbdev->secure_ops,
+ "Cannot disable secure mode: secure callbacks not specified.\n");
- case KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT:
- if (kbase_reset_gpu_active(kbdev))
- return -EAGAIN;
+ if (kbdev->secure_ops) {
+ /* Switch GPU to non-secure mode */
+ err = kbdev->secure_ops->secure_mode_disable(kbdev);
- /* protected mode sanity checks */
- KBASE_DEBUG_ASSERT_MSG(
- kbase_jd_katom_is_protected(katom[idx]) == kbase_gpu_in_protected_mode(kbdev),
- "Protected mode of atom (%d) doesn't match protected mode of GPU (%d)",
- kbase_jd_katom_is_protected(katom[idx]), kbase_gpu_in_protected_mode(kbdev));
- KBASE_DEBUG_ASSERT_MSG(
- (kbase_jd_katom_is_protected(katom[idx]) && js == 0) ||
- !kbase_jd_katom_is_protected(katom[idx]),
- "Protected atom on JS%d not supported", js);
+ if (err)
+ dev_warn(kbdev->dev, "Failed to disable secure mode: %d\n", err);
+ else
+ kbdev->secure_mode = false;
}
- return 0;
+ return err;
}
void kbase_gpu_slot_update(struct kbase_device *kbdev)
@@ -814,7 +719,6 @@ void kbase_gpu_slot_update(struct kbase_device *kbdev)
for (idx = 0; idx < SLOT_RB_SIZE; idx++) {
bool cores_ready;
- int ret;
if (!katom[idx])
continue;
@@ -831,48 +735,10 @@ void kbase_gpu_slot_update(struct kbase_device *kbdev)
break;
katom[idx]->gpu_rb_state =
- KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_EXIT;
+ KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE;
/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
-
- case KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_EXIT:
- /*
- * Exiting protected mode must be done before
- * the references on the cores are taken as
- * a power down the L2 is required which
- * can't happen after the references for this
- * atom are taken.
- */
- ret = kbase_jm_exit_protected_mode(kbdev,
- katom, idx, js);
- if (ret)
- break;
-
- katom[idx]->gpu_rb_state =
- KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE;
-
- /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
-
case KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE:
- if (katom[idx]->will_fail_event_code) {
- kbase_gpu_mark_atom_for_return(kbdev,
- katom[idx]);
- /* Set EVENT_DONE so this atom will be
- completed, not unpulled. */
- katom[idx]->event_code =
- BASE_JD_EVENT_DONE;
- /* Only return if head atom or previous
- * atom already removed - as atoms must
- * be returned in order. */
- if (idx == 0 || katom[0]->gpu_rb_state ==
- KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
- kbase_gpu_dequeue_atom(kbdev, js, NULL);
- kbase_jm_return_atom_to_js(kbdev, katom[idx]);
- }
- break;
- }
-
-
cores_ready =
kbasep_js_job_check_ref_cores(kbdev, js,
katom[idx]);
@@ -899,28 +765,12 @@ void kbase_gpu_slot_update(struct kbase_device *kbdev)
break;
katom[idx]->gpu_rb_state =
- KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_ENTRY;
+ KBASE_ATOM_GPU_RB_WAITING_SECURE_MODE;
/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
- case KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_ENTRY:
-
- /* Only submit if head atom or previous atom
- * already submitted */
- if (idx == 1 &&
- (katom[0]->gpu_rb_state != KBASE_ATOM_GPU_RB_SUBMITTED &&
- katom[0]->gpu_rb_state != KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB))
- break;
-
- /*
- * If the GPU is transitioning protected mode
- * then bail now and we'll be called when the
- * new state has settled.
- */
- if (kbdev->protected_mode_transition)
- break;
-
- if (!kbase_gpu_in_protected_mode(kbdev) && kbase_jd_katom_is_protected(katom[idx])) {
+ case KBASE_ATOM_GPU_RB_WAITING_SECURE_MODE:
+ if (kbase_gpu_in_secure_mode(kbdev) != kbase_jd_katom_is_secure(katom[idx])) {
int err = 0;
/* Not in correct mode, take action */
@@ -934,26 +784,16 @@ void kbase_gpu_slot_update(struct kbase_device *kbdev)
*/
break;
}
- if (kbase_vinstr_try_suspend(kbdev->vinstr_ctx) < 0) {
- /*
- * We can't switch now because
- * the vinstr core state switch
- * is not done yet.
- */
- break;
- }
- /* Once reaching this point GPU must be
- * switched to protected mode or vinstr
- * re-enabled. */
/* No jobs running, so we can switch GPU mode right now */
- err = kbase_gpu_protected_mode_enter(kbdev);
+ if (kbase_jd_katom_is_secure(katom[idx])) {
+ err = kbase_gpu_secure_mode_enable(kbdev);
+ } else {
+ err = kbase_gpu_secure_mode_disable(kbdev);
+ }
+
if (err) {
- /*
- * Failed to switch into protected mode, resume
- * vinstr core and fail atom.
- */
- kbase_vinstr_resume(kbdev->vinstr_ctx);
+ /* Failed to switch secure mode, fail atom */
katom[idx]->event_code = BASE_JD_EVENT_JOB_INVALID;
kbase_gpu_mark_atom_for_return(kbdev, katom[idx]);
/* Only return if head atom or previous atom
@@ -968,18 +808,22 @@ void kbase_gpu_slot_update(struct kbase_device *kbdev)
}
}
- /* Protected mode sanity checks */
+ /* Secure mode sanity checks */
KBASE_DEBUG_ASSERT_MSG(
- kbase_jd_katom_is_protected(katom[idx]) == kbase_gpu_in_protected_mode(kbdev),
- "Protected mode of atom (%d) doesn't match protected mode of GPU (%d)",
- kbase_jd_katom_is_protected(katom[idx]), kbase_gpu_in_protected_mode(kbdev));
+ kbase_jd_katom_is_secure(katom[idx]) == kbase_gpu_in_secure_mode(kbdev),
+ "Secure mode of atom (%d) doesn't match secure mode of GPU (%d)",
+ kbase_jd_katom_is_secure(katom[idx]), kbase_gpu_in_secure_mode(kbdev));
+ KBASE_DEBUG_ASSERT_MSG(
+ (kbase_jd_katom_is_secure(katom[idx]) && js == 0) ||
+ !kbase_jd_katom_is_secure(katom[idx]),
+ "Secure atom on JS%d not supported", js);
+
katom[idx]->gpu_rb_state =
KBASE_ATOM_GPU_RB_READY;
/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
case KBASE_ATOM_GPU_RB_READY:
-
/* Only submit if head atom or previous atom
* already submitted */
if (idx == 1 &&
@@ -1100,16 +944,8 @@ void kbase_gpu_complete_hw(struct kbase_device *kbdev, int js,
}
katom = kbase_gpu_dequeue_atom(kbdev, js, end_timestamp);
+
kbase_timeline_job_slot_done(kbdev, katom->kctx, katom, js, 0);
- kbase_tlstream_tl_nret_atom_lpu(
- katom,
- &kbdev->gpu_props.props.raw_props.js_features[
- katom->slot_nr]);
- kbase_tlstream_tl_nret_atom_as(katom, &kbdev->as[kctx->as_nr]);
- kbase_tlstream_tl_nret_ctx_lpu(
- kctx,
- &kbdev->gpu_props.props.raw_props.js_features[
- katom->slot_nr]);
if (completion_code == BASE_JD_EVENT_STOPPED) {
struct kbase_jd_atom *next_katom = kbase_gpu_inspect(kbdev, js,
@@ -1262,34 +1098,13 @@ void kbase_backend_reset(struct kbase_device *kbdev, ktime_t *end_timestamp)
for (idx = 0; idx < 2; idx++) {
struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev,
js, 0);
- bool keep_in_jm_rb = false;
- if (!katom)
- continue;
-
- if (katom->gpu_rb_state < KBASE_ATOM_GPU_RB_SUBMITTED)
- keep_in_jm_rb = true;
-
- kbase_gpu_release_atom(kbdev, katom, NULL);
-
- /*
- * If the atom wasn't on HW when the reset was issued
- * then leave it in the RB and next time we're kicked
- * it will be processed again from the starting state.
- */
- if (keep_in_jm_rb) {
- katom->coreref_state = KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED;
- katom->exit_protected_state = KBASE_ATOM_EXIT_PROTECTED_CHECK;
- continue;
+ if (katom) {
+ kbase_gpu_release_atom(kbdev, katom, NULL);
+ kbase_gpu_dequeue_atom(kbdev, js, NULL);
+ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+ kbase_jm_complete(kbdev, katom, end_timestamp);
}
-
- /*
- * The atom was on the HW when the reset was issued
- * all we can do is fail the atom.
- */
- kbase_gpu_dequeue_atom(kbdev, js, NULL);
- katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
- kbase_jm_complete(kbdev, katom, end_timestamp);
}
}
}
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_affinity.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_affinity.c
index d665420ab380..6a49669af630 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_affinity.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_affinity.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -23,7 +23,6 @@
#include <mali_kbase.h>
#include "mali_kbase_js_affinity.h"
-#include "mali_kbase_hw.h"
#include <backend/gpu/mali_kbase_pm_internal.h>
@@ -115,14 +114,9 @@ bool kbase_js_choose_affinity(u64 * const affinity,
if ((core_req & (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T)) ==
BASE_JD_REQ_T) {
spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
- /* If the hardware supports XAFFINITY then we'll only enable
- * the tiler (which is the default so this is a no-op),
- * otherwise enable shader core 0. */
- if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_XAFFINITY))
- *affinity = 1;
- else
- *affinity = 0;
-
+ /* Tiler only job, bit 0 needed to enable tiler but no shader
+ * cores required */
+ *affinity = 1;
return true;
}
@@ -178,12 +172,9 @@ bool kbase_js_choose_affinity(u64 * const affinity,
if (*affinity == 0)
return false;
- /* Enable core 0 if tiler required for hardware without XAFFINITY
- * support (notes above) */
- if (core_req & BASE_JD_REQ_T) {
- if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_XAFFINITY))
- *affinity = *affinity | 1;
- }
+ /* Enable core 0 if tiler required */
+ if (core_req & BASE_JD_REQ_T)
+ *affinity = *affinity | 1;
return true;
}
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_affinity.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_affinity.h
index fbffa3b40962..3026e6a58303 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_affinity.h
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_affinity.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -24,6 +24,14 @@
#ifndef _KBASE_JS_AFFINITY_H_
#define _KBASE_JS_AFFINITY_H_
+#ifdef CONFIG_MALI_DEBUG_SHADER_SPLIT_FS
+/* Import the external affinity mask variables */
+extern u64 mali_js0_affinity_mask;
+extern u64 mali_js1_affinity_mask;
+extern u64 mali_js2_affinity_mask;
+#endif /* CONFIG_MALI_DEBUG_SHADER_SPLIT_FS */
+
+
/**
* kbase_js_can_run_job_on_slot_no_lock - Decide whether it is possible to
* submit a job to a particular job slot in the current status
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_backend.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_backend.c
index a23deb4ca20c..1e9a7e4c466d 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_backend.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_backend.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -138,17 +138,6 @@ static enum hrtimer_restart timer_callback(struct hrtimer *timer)
js_devdata->gpu_reset_ticks_ss;
}
- /* If timeouts have been changed then ensure
- * that atom tick count is not greater than the
- * new soft_stop timeout. This ensures that
- * atoms do not miss any of the timeouts due to
- * races between this worker and the thread
- * changing the timeouts. */
- if (backend->timeouts_updated &&
- ticks > soft_stop_ticks)
- ticks = atom->sched_info.cfs.ticks =
- soft_stop_ticks;
-
/* Job is Soft-Stoppable */
if (ticks == soft_stop_ticks) {
int disjoint_threshold =
@@ -268,8 +257,6 @@ static enum hrtimer_restart timer_callback(struct hrtimer *timer)
HR_TIMER_DELAY_NSEC(js_devdata->scheduling_period_ns),
HRTIMER_MODE_REL);
- backend->timeouts_updated = false;
-
spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
return HRTIMER_NORESTART;
@@ -348,10 +335,3 @@ void kbase_backend_timer_resume(struct kbase_device *kbdev)
kbase_backend_ctx_count_changed(kbdev);
}
-void kbase_backend_timeouts_changed(struct kbase_device *kbdev)
-{
- struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
-
- backend->timeouts_updated = true;
-}
-
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_mmu_hw_direct.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_mmu_hw_direct.c
index 4a3572d971a6..4fd13e2de63e 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_mmu_hw_direct.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_mmu_hw_direct.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -20,9 +20,11 @@
#include <mali_kbase.h>
#include <mali_kbase_mem.h>
#include <mali_kbase_mmu_hw.h>
+#if defined(CONFIG_MALI_MIPE_ENABLED)
#include <mali_kbase_tlstream.h>
+#endif
+#include <backend/gpu/mali_kbase_mmu_hw_direct.h>
#include <backend/gpu/mali_kbase_device_internal.h>
-#include <mali_kbase_as_fault_debugfs.h>
static inline u64 lock_region(struct kbase_device *kbdev, u64 pfn,
u32 num_pages)
@@ -152,9 +154,6 @@ void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
AS_FAULTADDRESS_LO),
kctx);
- /* report the fault to debugfs */
- kbase_as_fault_debugfs_new(kbdev, as_no);
-
/* record the fault status */
as->fault_status = kbase_reg_read(kbdev,
MMU_AS_REG(as_no,
@@ -166,15 +165,6 @@ void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
KBASE_MMU_FAULT_TYPE_BUS :
KBASE_MMU_FAULT_TYPE_PAGE;
-#ifdef CONFIG_MALI_GPU_MMU_AARCH64
- as->fault_extra_addr = kbase_reg_read(kbdev,
- MMU_AS_REG(as_no, AS_FAULTEXTRA_HI),
- kctx);
- as->fault_extra_addr <<= 32;
- as->fault_extra_addr |= kbase_reg_read(kbdev,
- MMU_AS_REG(as_no, AS_FAULTEXTRA_LO),
- kctx);
-#endif /* CONFIG_MALI_GPU_MMU_AARCH64 */
if (kbase_as_has_bus_fault(as)) {
/* Mark bus fault as handled.
@@ -213,36 +203,10 @@ void kbase_mmu_hw_configure(struct kbase_device *kbdev, struct kbase_as *as,
struct kbase_context *kctx)
{
struct kbase_mmu_setup *current_setup = &as->current_setup;
+#ifdef CONFIG_MALI_MIPE_ENABLED
u32 transcfg = 0;
+#endif
-#ifdef CONFIG_MALI_GPU_MMU_AARCH64
- transcfg = current_setup->transcfg & 0xFFFFFFFFUL;
-
- /* Set flag AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK */
- /* Clear PTW_MEMATTR bits */
- transcfg &= ~AS_TRANSCFG_PTW_MEMATTR_MASK;
- /* Enable correct PTW_MEMATTR bits */
- transcfg |= AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK;
-
- if (kbdev->system_coherency == COHERENCY_ACE) {
- /* Set flag AS_TRANSCFG_PTW_SH_OS (outer shareable) */
- /* Clear PTW_SH bits */
- transcfg = (transcfg & ~AS_TRANSCFG_PTW_SH_MASK);
- /* Enable correct PTW_SH bits */
- transcfg = (transcfg | AS_TRANSCFG_PTW_SH_OS);
- }
-
- kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSCFG_LO),
- transcfg, kctx);
- kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSCFG_HI),
- (current_setup->transcfg >> 32) & 0xFFFFFFFFUL, kctx);
-
-#else /* CONFIG_MALI_GPU_MMU_AARCH64 */
-
- if (kbdev->system_coherency == COHERENCY_ACE)
- current_setup->transtab |= AS_TRANSTAB_LPAE_SHARE_OUTER;
-
-#endif /* CONFIG_MALI_GPU_MMU_AARCH64 */
kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSTAB_LO),
current_setup->transtab & 0xFFFFFFFFUL, kctx);
@@ -254,10 +218,12 @@ void kbase_mmu_hw_configure(struct kbase_device *kbdev, struct kbase_as *as,
kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_MEMATTR_HI),
(current_setup->memattr >> 32) & 0xFFFFFFFFUL, kctx);
+#if defined(CONFIG_MALI_MIPE_ENABLED)
kbase_tlstream_tl_attrib_as_config(as,
current_setup->transtab,
current_setup->memattr,
transcfg);
+#endif
write_cmd(kbdev, as->number, AS_COMMAND_UPDATE, kctx);
}
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_backend.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_backend.c
index 711e44c7f80a..947a7ed285d6 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_backend.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_backend.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -168,7 +168,6 @@ bool kbase_pm_do_poweroff(struct kbase_device *kbdev, bool is_suspend)
/* Force all cores off */
kbdev->pm.backend.desired_shader_state = 0;
- kbdev->pm.backend.desired_tiler_state = 0;
/* Force all cores to be unavailable, in the situation where
* transitions are in progress for some cores but not others,
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.c
index f891fa225a89..487391168e25 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -35,8 +35,7 @@ static u64 coarse_demand_get_core_mask(struct kbase_device *kbdev)
static bool coarse_demand_get_core_active(struct kbase_device *kbdev)
{
if (0 == kbdev->pm.active_count && !(kbdev->shader_needed_bitmap |
- kbdev->shader_inuse_bitmap) && !kbdev->tiler_needed_cnt
- && !kbdev->tiler_inuse_cnt)
+ kbdev->shader_inuse_bitmap))
return false;
return true;
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_defs.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_defs.h
index e8f96fe6c514..60e40915869c 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_defs.h
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_defs.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -192,14 +192,12 @@ union kbase_pm_ca_policy_data {
* @gpu_poweroff_pending: number of poweroff timer ticks until the GPU is
* powered off
* @shader_poweroff_pending_time: number of poweroff timer ticks until shaders
- * and/or timers are powered off
+ * are powered off
* @gpu_poweroff_timer: Timer for powering off GPU
* @gpu_poweroff_wq: Workqueue to power off GPU on when timer fires
* @gpu_poweroff_work: Workitem used on @gpu_poweroff_wq
* @shader_poweroff_pending: Bit mask of shaders to be powered off on next
* timer callback
- * @tiler_poweroff_pending: Bit mask of tilers to be powered off on next timer
- * callback
* @poweroff_timer_needed: true if the poweroff timer is currently required,
* false otherwise
* @poweroff_timer_running: true if the poweroff timer is currently running,
@@ -221,6 +219,9 @@ union kbase_pm_ca_policy_data {
* &struct kbase_pm_callback_conf
* @callback_power_runtime_idle: Optional callback when the GPU may be idle. See
* &struct kbase_pm_callback_conf
+ * @callback_cci_snoop_ctrl: Callback when the GPU L2 power may transition.
+ * If enable is set then snoops should be enabled
+ * otherwise snoops should be disabled
*
* Note:
* During an IRQ, @ca_current_policy or @pm_current_policy can be NULL when the
@@ -276,7 +277,6 @@ struct kbase_pm_backend_data {
struct work_struct gpu_poweroff_work;
u64 shader_poweroff_pending;
- u64 tiler_poweroff_pending;
bool poweroff_timer_needed;
bool poweroff_timer_running;
@@ -288,6 +288,7 @@ struct kbase_pm_backend_data {
int (*callback_power_runtime_on)(struct kbase_device *kbdev);
void (*callback_power_runtime_off)(struct kbase_device *kbdev);
int (*callback_power_runtime_idle)(struct kbase_device *kbdev);
+
};
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_demand.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_demand.c
index 81322fd0dd17..9dac2303bd00 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_demand.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_demand.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -37,8 +37,7 @@ static u64 demand_get_core_mask(struct kbase_device *kbdev)
static bool demand_get_core_active(struct kbase_device *kbdev)
{
if (0 == kbdev->pm.active_count && !(kbdev->shader_needed_bitmap |
- kbdev->shader_inuse_bitmap) && !kbdev->tiler_needed_cnt
- && !kbdev->tiler_inuse_cnt)
+ kbdev->shader_inuse_bitmap))
return false;
return true;
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_driver.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_driver.c
index 03ba23d54365..5c1388448d28 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_driver.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_driver.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -30,8 +30,11 @@
#if defined(CONFIG_MALI_GATOR_SUPPORT)
#include <mali_kbase_gator.h>
#endif
+#if defined(CONFIG_MALI_MIPE_ENABLED)
#include <mali_kbase_tlstream.h>
+#endif
#include <mali_kbase_pm.h>
+#include <mali_kbase_cache_policy.h>
#include <mali_kbase_config_defaults.h>
#include <mali_kbase_smc.h>
#include <mali_kbase_hwaccess_jm.h>
@@ -119,39 +122,6 @@ static u32 core_type_to_reg(enum kbase_pm_core_type core_type,
return (u32)core_type + (u32)action;
}
-#ifdef CONFIG_ARM64
-static void mali_cci_flush_l2(struct kbase_device *kbdev)
-{
- const u32 mask = CLEAN_CACHES_COMPLETED | RESET_COMPLETED;
- u32 loops = KBASE_CLEAN_CACHE_MAX_LOOPS;
- u32 raw;
-
- /*
- * Note that we don't take the cache flush mutex here since
- * we expect to be the last user of the L2, all other L2 users
- * would have dropped their references, to initiate L2 power
- * down, L2 power down being the only valid place for this
- * to be called from.
- */
-
- kbase_reg_write(kbdev,
- GPU_CONTROL_REG(GPU_COMMAND),
- GPU_COMMAND_CLEAN_INV_CACHES,
- NULL);
-
- raw = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(GPU_IRQ_RAWSTAT),
- NULL);
-
- /* Wait for cache flush to complete before continuing, exit on
- * gpu resets or loop expiry. */
- while (((raw & mask) == 0) && --loops) {
- raw = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(GPU_IRQ_RAWSTAT),
- NULL);
- }
-}
-#endif
/**
* kbase_pm_invoke - Invokes an action on a core set
@@ -206,7 +176,7 @@ static void kbase_pm_invoke(struct kbase_device *kbdev,
kbase_trace_mali_pm_power_off(core_type, cores);
}
#endif
-
+#if defined(CONFIG_MALI_MIPE_ENABLED)
if (cores) {
u64 state = kbase_pm_get_state(kbdev, core_type, ACTION_READY);
@@ -216,7 +186,7 @@ static void kbase_pm_invoke(struct kbase_device *kbdev,
state &= ~cores;
kbase_tlstream_aux_pm_state(core_type, state);
}
-
+#endif
/* Tracing */
if (cores) {
if (action == ACTION_PWRON)
@@ -249,8 +219,6 @@ static void kbase_pm_invoke(struct kbase_device *kbdev,
case KBASE_PM_CORE_L2:
KBASE_TRACE_ADD(kbdev, PM_PWROFF_L2, NULL, NULL,
0u, lo);
- /* disable snoops before L2 is turned off */
- kbase_pm_cache_snoop_disable(kbdev);
break;
default:
break;
@@ -478,12 +446,6 @@ static bool kbase_pm_transition_core_type(struct kbase_device *kbdev,
/* All are ready, none will be turned off, and none are
* transitioning */
kbdev->pm.backend.l2_powered = 1;
- /*
- * Ensure snoops are enabled after L2 is powered up,
- * note that kbase keeps track of the snoop state, so
- * safe to repeatedly call.
- */
- kbase_pm_cache_snoop_enable(kbdev);
if (kbdev->l2_users_count > 0) {
/* Notify any registered l2 cache users
* (optimized out when no users waiting) */
@@ -551,12 +513,10 @@ KBASE_EXPORT_TEST_API(kbase_pm_transition_core_type);
* @present: The bit mask of present caches
* @cores_powered: A bit mask of cores (or L2 caches) that are desired to
* be powered
- * @tilers_powered: The bit mask of tilers that are desired to be powered
*
* Return: A bit mask of the caches that should be turned on
*/
-static u64 get_desired_cache_status(u64 present, u64 cores_powered,
- u64 tilers_powered)
+static u64 get_desired_cache_status(u64 present, u64 cores_powered)
{
u64 desired = 0;
@@ -579,10 +539,6 @@ static u64 get_desired_cache_status(u64 present, u64 cores_powered,
present &= ~bit_mask;
}
- /* Power up the required L2(s) for the tiler */
- if (tilers_powered)
- desired |= 1;
-
return desired;
}
@@ -595,7 +551,6 @@ MOCKABLE(kbase_pm_check_transitions_nolock) (struct kbase_device *kbdev)
bool in_desired_state = true;
u64 desired_l2_state;
u64 cores_powered;
- u64 tilers_powered;
u64 tiler_available_bitmap;
u64 shader_available_bitmap;
u64 shader_ready_bitmap;
@@ -629,10 +584,6 @@ MOCKABLE(kbase_pm_check_transitions_nolock) (struct kbase_device *kbdev)
cores_powered |= kbdev->pm.backend.desired_shader_state;
- /* Work out which tilers want to be powered */
- tilers_powered = kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_TILER);
- tilers_powered |= kbdev->pm.backend.desired_tiler_state;
-
/* If there are l2 cache users registered, keep all l2s powered even if
* all other cores are off. */
if (kbdev->l2_users_count > 0)
@@ -640,11 +591,17 @@ MOCKABLE(kbase_pm_check_transitions_nolock) (struct kbase_device *kbdev)
desired_l2_state = get_desired_cache_status(
kbdev->gpu_props.props.raw_props.l2_present,
- cores_powered, tilers_powered);
+ cores_powered);
/* If any l2 cache is on, then enable l2 #0, for use by job manager */
- if (0 != desired_l2_state)
+ if (0 != desired_l2_state) {
desired_l2_state |= 1;
+ /* Also enable tiler if l2 cache is powered */
+ kbdev->pm.backend.desired_tiler_state =
+ kbdev->gpu_props.props.raw_props.tiler_present;
+ } else {
+ kbdev->pm.backend.desired_tiler_state = 0;
+ }
prev_l2_available_bitmap = kbdev->l2_available_bitmap;
in_desired_state &= kbase_pm_transition_core_type(kbdev,
@@ -750,7 +707,7 @@ MOCKABLE(kbase_pm_check_transitions_nolock) (struct kbase_device *kbdev)
kbase_pm_get_ready_cores(kbdev,
KBASE_PM_CORE_TILER));
#endif
-
+#if defined(CONFIG_MALI_MIPE_ENABLED)
kbase_tlstream_aux_pm_state(
KBASE_PM_CORE_L2,
kbase_pm_get_ready_cores(
@@ -764,6 +721,7 @@ MOCKABLE(kbase_pm_check_transitions_nolock) (struct kbase_device *kbdev)
kbase_pm_get_ready_cores(
kbdev,
KBASE_PM_CORE_TILER));
+#endif
KBASE_TRACE_ADD(kbdev, PM_DESIRED_REACHED, NULL, NULL,
kbdev->pm.backend.gpu_in_desired_state,
@@ -1060,7 +1018,6 @@ bool kbase_pm_clock_off(struct kbase_device *kbdev, bool is_suspend)
return false;
}
- kbase_pm_cache_snoop_disable(kbdev);
/* The GPU power may be turned off from this point */
kbdev->pm.backend.gpu_powered = false;
@@ -1143,20 +1100,18 @@ static void kbase_pm_hw_issues_detect(struct kbase_device *kbdev)
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10327))
kbdev->hw_quirks_sc |= SC_SDC_DISABLE_OQ_DISCARD;
-#ifdef CONFIG_MALI_PRFCNT_SET_SECONDARY
/* Enable alternative hardware counter selection if configured. */
- if (!GPU_ID_IS_NEW_FORMAT(prod_id))
+ if (DEFAULT_ALTERNATIVE_HWC)
kbdev->hw_quirks_sc |= SC_ALT_COUNTERS;
-#endif
/* Needed due to MIDBASE-2795. ENABLE_TEXGRD_FLAGS. See PRLAM-10797. */
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10797))
kbdev->hw_quirks_sc |= SC_ENABLE_TEXGRD_FLAGS;
if (!kbase_hw_has_issue(kbdev, GPUCORE_1619)) {
- if (prod_id < 0x750 || prod_id == 0x6956) /* T60x, T62x, T72x */
+ if (prod_id < 0x760 || prod_id == 0x6956) /* T60x, T62x, T72x */
kbdev->hw_quirks_sc |= SC_LS_ATTR_CHECK_DISABLE;
- else if (prod_id >= 0x750 && prod_id <= 0x880) /* T76x, T8xx */
+ else if (prod_id >= 0x760 && prod_id <= 0x880) /* T76x, T8xx */
kbdev->hw_quirks_sc |= SC_LS_ALLOW_ATTR_TYPES;
}
@@ -1181,12 +1136,6 @@ static void kbase_pm_hw_issues_detect(struct kbase_device *kbdev)
kbdev->hw_quirks_mmu |= (DEFAULT_AWID_LIMIT & 0x3) <<
L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT;
- if (kbdev->system_coherency == COHERENCY_ACE) {
- /* Allow memory configuration disparity to be ignored, we
- * optimize the use of shared memory and thus we expect
- * some disparity in the memory configuration */
- kbdev->hw_quirks_mmu |= L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY;
- }
/* Only for T86x/T88x-based products after r2p0 */
if (prod_id >= 0x860 && prod_id <= 0x880 && major >= 2) {
@@ -1251,42 +1200,51 @@ static void kbase_pm_hw_issues_apply(struct kbase_device *kbdev)
}
-void kbase_pm_cache_snoop_enable(struct kbase_device *kbdev)
-{
- if ((kbdev->system_coherency == COHERENCY_ACE) &&
- !kbdev->cci_snoop_enabled) {
-#ifdef CONFIG_ARM64
- if (kbdev->snoop_enable_smc != 0)
- kbase_invoke_smc_fid(kbdev->snoop_enable_smc, 0, 0, 0);
-#endif /* CONFIG_ARM64 */
- dev_dbg(kbdev->dev, "MALI - CCI Snoops - Enabled\n");
- kbdev->cci_snoop_enabled = true;
- }
-}
-void kbase_pm_cache_snoop_disable(struct kbase_device *kbdev)
+int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
{
- if ((kbdev->system_coherency == COHERENCY_ACE) &&
- kbdev->cci_snoop_enabled) {
-#ifdef CONFIG_ARM64
- if (kbdev->snoop_disable_smc != 0) {
- mali_cci_flush_l2(kbdev);
- kbase_invoke_smc_fid(kbdev->snoop_disable_smc, 0, 0, 0);
- }
-#endif /* CONFIG_ARM64 */
- dev_dbg(kbdev->dev, "MALI - CCI Snoops Disabled\n");
- kbdev->cci_snoop_enabled = false;
+ unsigned long irq_flags;
+ struct kbasep_reset_timeout_data rtdata;
+
+ KBASE_DEBUG_ASSERT(NULL != kbdev);
+ lockdep_assert_held(&kbdev->pm.lock);
+
+ /* Ensure the clock is on before attempting to access the hardware */
+ if (!kbdev->pm.backend.gpu_powered) {
+ if (kbdev->pm.backend.callback_power_on)
+ kbdev->pm.backend.callback_power_on(kbdev);
+
+ spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock,
+ irq_flags);
+ kbdev->pm.backend.gpu_powered = true;
+ spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
+ irq_flags);
}
-}
-static int kbase_pm_reset_do_normal(struct kbase_device *kbdev)
-{
- struct kbasep_reset_timeout_data rtdata;
+ /* Ensure interrupts are off to begin with, this also clears any
+ * outstanding interrupts */
+ kbase_pm_disable_interrupts(kbdev);
+ /* Prepare for the soft-reset */
+ kbdev->pm.backend.reset_done = false;
- KBASE_TRACE_ADD(kbdev, CORE_GPU_SOFT_RESET, NULL, NULL, 0u, 0);
+ /* The cores should be made unavailable due to the reset */
+ spin_lock_irqsave(&kbdev->pm.power_change_lock, irq_flags);
+ if (kbdev->shader_available_bitmap != 0u)
+ KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE, NULL,
+ NULL, 0u, (u32)0u);
+ if (kbdev->tiler_available_bitmap != 0u)
+ KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE_TILER,
+ NULL, NULL, 0u, (u32)0u);
+ kbdev->shader_available_bitmap = 0u;
+ kbdev->tiler_available_bitmap = 0u;
+ kbdev->l2_available_bitmap = 0u;
+ spin_unlock_irqrestore(&kbdev->pm.power_change_lock, irq_flags);
+ /* Soft reset the GPU */
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_SOFT_RESET, NULL, NULL, 0u, 0);
+#if defined(CONFIG_MALI_MIPE_ENABLED)
kbase_tlstream_jd_gpu_soft_reset(kbdev);
-
+#endif
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
GPU_COMMAND_SOFT_RESET, NULL);
@@ -1312,7 +1270,7 @@ static int kbase_pm_reset_do_normal(struct kbase_device *kbdev)
/* GPU has been reset */
hrtimer_cancel(&rtdata.timer);
destroy_hrtimer_on_stack(&rtdata.timer);
- return 0;
+ goto out;
}
/* No interrupt has been received - check if the RAWSTAT register says
@@ -1348,7 +1306,7 @@ static int kbase_pm_reset_do_normal(struct kbase_device *kbdev)
/* GPU has been reset */
hrtimer_cancel(&rtdata.timer);
destroy_hrtimer_on_stack(&rtdata.timer);
- return 0;
+ goto out;
}
destroy_hrtimer_on_stack(&rtdata.timer);
@@ -1356,90 +1314,16 @@ static int kbase_pm_reset_do_normal(struct kbase_device *kbdev)
dev_err(kbdev->dev, "Failed to hard-reset the GPU (timed out after %d ms)\n",
RESET_TIMEOUT);
+ /* The GPU still hasn't reset, give up */
return -EINVAL;
-}
-
-static int kbase_pm_reset_do_protected(struct kbase_device *kbdev)
-{
- KBASE_TRACE_ADD(kbdev, CORE_GPU_SOFT_RESET, NULL, NULL, 0u, 0);
- kbase_tlstream_jd_gpu_soft_reset(kbdev);
-
- return kbdev->protected_ops->protected_mode_reset(kbdev);
-}
-
-int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
-{
- unsigned long irq_flags;
- int err;
- bool resume_vinstr = false;
-
- KBASE_DEBUG_ASSERT(NULL != kbdev);
- lockdep_assert_held(&kbdev->pm.lock);
-
- /* Ensure the clock is on before attempting to access the hardware */
- if (!kbdev->pm.backend.gpu_powered) {
- if (kbdev->pm.backend.callback_power_on)
- kbdev->pm.backend.callback_power_on(kbdev);
-
- spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock,
- irq_flags);
- kbdev->pm.backend.gpu_powered = true;
- spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
- irq_flags);
- }
-
- /* Ensure interrupts are off to begin with, this also clears any
- * outstanding interrupts */
- kbase_pm_disable_interrupts(kbdev);
- /* Ensure cache snoops are disabled before reset. */
- kbase_pm_cache_snoop_disable(kbdev);
- /* Prepare for the soft-reset */
- kbdev->pm.backend.reset_done = false;
-
- /* The cores should be made unavailable due to the reset */
- spin_lock_irqsave(&kbdev->pm.power_change_lock, irq_flags);
- if (kbdev->shader_available_bitmap != 0u)
- KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE, NULL,
- NULL, 0u, (u32)0u);
- if (kbdev->tiler_available_bitmap != 0u)
- KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE_TILER,
- NULL, NULL, 0u, (u32)0u);
- kbdev->shader_available_bitmap = 0u;
- kbdev->tiler_available_bitmap = 0u;
- kbdev->l2_available_bitmap = 0u;
- spin_unlock_irqrestore(&kbdev->pm.power_change_lock, irq_flags);
- /* Soft reset the GPU */
- if (kbdev->protected_mode_support &&
- kbdev->protected_ops->protected_mode_reset)
- err = kbase_pm_reset_do_protected(kbdev);
- else
- err = kbase_pm_reset_do_normal(kbdev);
-
- spin_lock_irqsave(&kbdev->js_data.runpool_irq.lock, irq_flags);
- if (kbdev->protected_mode)
- resume_vinstr = true;
- kbdev->protected_mode_transition = false;
- kbdev->protected_mode = false;
- spin_unlock_irqrestore(&kbdev->js_data.runpool_irq.lock, irq_flags);
-
- if (err)
- goto exit;
+out:
if (flags & PM_HW_ISSUES_DETECT)
kbase_pm_hw_issues_detect(kbdev);
kbase_pm_hw_issues_apply(kbdev);
- kbase_cache_set_coherency_mode(kbdev, kbdev->system_coherency);
-
- /* Sanity check protected mode was left after reset */
- if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE)) {
- u32 gpu_status = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(GPU_STATUS), NULL);
-
- WARN_ON(gpu_status & GPU_STATUS_PROTECTED_MODE_ACTIVE);
- }
/* If cycle counter was in use re-enable it, enable_irqs will only be
* false when called from kbase_pm_powerup */
@@ -1467,12 +1351,7 @@ int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
if (flags & PM_ENABLE_IRQS)
kbase_pm_enable_interrupts(kbdev);
-exit:
- /* If GPU is leaving protected mode resume vinstr operation. */
- if (kbdev->vinstr_ctx && resume_vinstr)
- kbase_vinstr_resume(kbdev->vinstr_ctx);
-
- return err;
+ return 0;
}
/**
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_internal.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_internal.h
index aa51b8cdef8f..943eda567cb5 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_internal.h
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_internal.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -501,23 +501,5 @@ void kbase_pm_power_changed(struct kbase_device *kbdev);
void kbase_pm_metrics_update(struct kbase_device *kbdev,
ktime_t *now);
-/**
- * kbase_pm_cache_snoop_enable - Allow CPU snoops on the GPU
- * If the GPU does not have coherency this is a no-op
- * @kbdev: Device pointer
- *
- * This function should be called after L2 power up.
- */
-
-void kbase_pm_cache_snoop_enable(struct kbase_device *kbdev);
-
-/**
- * kbase_pm_cache_snoop_disable - Prevent CPU snoops on the GPU
- * If the GPU does not have coherency this is a no-op
- * @kbdev: Device pointer
- *
- * This function should be called before L2 power off.
- */
-void kbase_pm_cache_snoop_disable(struct kbase_device *kbdev);
#endif /* _KBASE_BACKEND_PM_INTERNAL_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.c
index 4d006028089a..343436fc353d 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -21,6 +21,7 @@
#include <mali_kbase.h>
#include <mali_midg_regmap.h>
+#include <mali_kbase_gator.h>
#include <mali_kbase_pm.h>
#include <mali_kbase_config_defaults.h>
#include <backend/gpu/mali_kbase_pm_internal.h>
@@ -154,22 +155,16 @@ static inline void kbase_timeline_pm_cores_func(struct kbase_device *kbdev,
static void kbasep_pm_do_poweroff_cores(struct kbase_device *kbdev)
{
u64 prev_shader_state = kbdev->pm.backend.desired_shader_state;
- u64 prev_tiler_state = kbdev->pm.backend.desired_tiler_state;
lockdep_assert_held(&kbdev->pm.power_change_lock);
kbdev->pm.backend.desired_shader_state &=
~kbdev->pm.backend.shader_poweroff_pending;
- kbdev->pm.backend.desired_tiler_state &=
- ~kbdev->pm.backend.tiler_poweroff_pending;
kbdev->pm.backend.shader_poweroff_pending = 0;
- kbdev->pm.backend.tiler_poweroff_pending = 0;
- if (prev_shader_state != kbdev->pm.backend.desired_shader_state ||
- prev_tiler_state !=
- kbdev->pm.backend.desired_tiler_state ||
- kbdev->pm.backend.ca_in_transition) {
+ if (prev_shader_state != kbdev->pm.backend.desired_shader_state
+ || kbdev->pm.backend.ca_in_transition) {
bool cores_are_available;
KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
@@ -207,8 +202,7 @@ kbasep_pm_do_gpu_poweroff_callback(struct hrtimer *timer)
queue_work(kbdev->pm.backend.gpu_poweroff_wq,
&kbdev->pm.backend.gpu_poweroff_work);
- if (kbdev->pm.backend.shader_poweroff_pending ||
- kbdev->pm.backend.tiler_poweroff_pending) {
+ if (kbdev->pm.backend.shader_poweroff_pending) {
kbdev->pm.backend.shader_poweroff_pending_time--;
KBASE_DEBUG_ASSERT(
@@ -333,7 +327,6 @@ void kbase_pm_cancel_deferred_poweroff(struct kbase_device *kbdev)
kbdev->pm.backend.gpu_poweroff_pending = 0;
kbdev->pm.backend.shader_poweroff_pending = 0;
- kbdev->pm.backend.tiler_poweroff_pending = 0;
kbdev->pm.backend.shader_poweroff_pending_time = 0;
spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
@@ -388,10 +381,8 @@ void kbase_pm_update_active(struct kbase_device *kbdev)
* when there are contexts active */
KBASE_DEBUG_ASSERT(pm->active_count == 0);
- if (backend->shader_poweroff_pending ||
- backend->tiler_poweroff_pending) {
+ if (backend->shader_poweroff_pending) {
backend->shader_poweroff_pending = 0;
- backend->tiler_poweroff_pending = 0;
backend->shader_poweroff_pending_time = 0;
}
@@ -450,7 +441,6 @@ void kbase_pm_update_active(struct kbase_device *kbdev)
void kbase_pm_update_cores_state_nolock(struct kbase_device *kbdev)
{
u64 desired_bitmap;
- u64 desired_tiler_bitmap;
bool cores_are_available;
bool do_poweroff = false;
@@ -463,37 +453,23 @@ void kbase_pm_update_cores_state_nolock(struct kbase_device *kbdev)
kbdev->pm.backend.pm_current_policy->get_core_mask(kbdev);
desired_bitmap &= kbase_pm_ca_get_core_mask(kbdev);
+ /* Enable core 0 if tiler required, regardless of core availability */
if (kbdev->tiler_needed_cnt > 0 || kbdev->tiler_inuse_cnt > 0)
- desired_tiler_bitmap = 1;
- else
- desired_tiler_bitmap = 0;
-
- if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_XAFFINITY)) {
- /* Unless XAFFINITY is supported, enable core 0 if tiler
- * required, regardless of core availability */
- if (kbdev->tiler_needed_cnt > 0 || kbdev->tiler_inuse_cnt > 0)
- desired_bitmap |= 1;
- }
+ desired_bitmap |= 1;
if (kbdev->pm.backend.desired_shader_state != desired_bitmap)
KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_DESIRED, NULL, NULL, 0u,
(u32)desired_bitmap);
/* Are any cores being powered on? */
if (~kbdev->pm.backend.desired_shader_state & desired_bitmap ||
- ~kbdev->pm.backend.desired_tiler_state & desired_tiler_bitmap ||
kbdev->pm.backend.ca_in_transition) {
/* Check if we are powering off any cores before updating shader
* state */
- if (kbdev->pm.backend.desired_shader_state & ~desired_bitmap ||
- kbdev->pm.backend.desired_tiler_state &
- ~desired_tiler_bitmap) {
+ if (kbdev->pm.backend.desired_shader_state & ~desired_bitmap) {
/* Start timer to power off cores */
kbdev->pm.backend.shader_poweroff_pending |=
(kbdev->pm.backend.desired_shader_state &
~desired_bitmap);
- kbdev->pm.backend.tiler_poweroff_pending |=
- (kbdev->pm.backend.desired_tiler_state &
- ~desired_tiler_bitmap);
if (kbdev->pm.poweroff_shader_ticks)
kbdev->pm.backend.shader_poweroff_pending_time =
@@ -503,28 +479,21 @@ void kbase_pm_update_cores_state_nolock(struct kbase_device *kbdev)
}
kbdev->pm.backend.desired_shader_state = desired_bitmap;
- kbdev->pm.backend.desired_tiler_state = desired_tiler_bitmap;
/* If any cores are being powered on, transition immediately */
cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
- } else if (kbdev->pm.backend.desired_shader_state & ~desired_bitmap ||
- kbdev->pm.backend.desired_tiler_state &
- ~desired_tiler_bitmap) {
+ } else if (kbdev->pm.backend.desired_shader_state & ~desired_bitmap) {
/* Start timer to power off cores */
kbdev->pm.backend.shader_poweroff_pending |=
(kbdev->pm.backend.desired_shader_state &
~desired_bitmap);
- kbdev->pm.backend.tiler_poweroff_pending |=
- (kbdev->pm.backend.desired_tiler_state &
- ~desired_tiler_bitmap);
if (kbdev->pm.poweroff_shader_ticks)
kbdev->pm.backend.shader_poweroff_pending_time =
kbdev->pm.poweroff_shader_ticks;
else
kbasep_pm_do_poweroff_cores(kbdev);
} else if (kbdev->pm.active_count == 0 && desired_bitmap != 0 &&
- desired_tiler_bitmap != 0 &&
- kbdev->pm.backend.poweroff_timer_needed) {
+ kbdev->pm.backend.poweroff_timer_needed) {
/* If power policy is keeping cores on despite there being no
* active contexts then disable poweroff timer as it isn't
* required.
@@ -535,17 +504,11 @@ void kbase_pm_update_cores_state_nolock(struct kbase_device *kbdev)
/* Ensure timer does not power off wanted cores and make sure to power
* off unwanted cores */
- if (kbdev->pm.backend.shader_poweroff_pending ||
- kbdev->pm.backend.tiler_poweroff_pending) {
+ if (kbdev->pm.backend.shader_poweroff_pending != 0) {
kbdev->pm.backend.shader_poweroff_pending &=
~(kbdev->pm.backend.desired_shader_state &
desired_bitmap);
- kbdev->pm.backend.tiler_poweroff_pending &=
- ~(kbdev->pm.backend.desired_tiler_state &
- desired_tiler_bitmap);
-
- if (!kbdev->pm.backend.shader_poweroff_pending &&
- !kbdev->pm.backend.tiler_poweroff_pending)
+ if (kbdev->pm.backend.shader_poweroff_pending == 0)
kbdev->pm.backend.shader_poweroff_pending_time = 0;
}
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_power_model_simple.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_power_model_simple.c
index d965033905ca..9d3eb10bd3c9 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_power_model_simple.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_power_model_simple.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -36,12 +36,7 @@ static struct thermal_zone_device *gpu_tz;
static unsigned long model_static_power(unsigned long voltage)
{
-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
- unsigned long temperature;
-#else
- int temperature;
-#endif
- unsigned long temp;
+ int temperature, temp;
unsigned long temp_squared, temp_cubed, temp_scaling_factor;
const unsigned long voltage_cubed = (voltage * voltage * voltage) >> 10;
@@ -90,11 +85,7 @@ static unsigned long model_dynamic_power(unsigned long freq,
return (dynamic_coefficient * v2 * f_mhz) / 1000000; /* mW */
}
-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)
-struct devfreq_cooling_ops power_model_simple_ops = {
-#else
struct devfreq_cooling_power power_model_simple_ops = {
-#endif
.get_static_power = model_static_power,
.get_dynamic_power = model_dynamic_power,
};
@@ -159,7 +150,7 @@ int kbase_power_model_simple_init(struct kbase_device *kbdev)
dynamic_coefficient = (((dynamic_power * 1000) / voltage_squared)
* 1000) / frequency;
- if (of_property_read_u32_array(power_model_node, "ts", (u32 *)ts, 4)) {
+ if (of_property_read_u32_array(power_model_node, "ts", ts, 4)) {
dev_err(kbdev->dev, "ts in power_model not available\n");
return -EINVAL;
}
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_power_model_simple.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_power_model_simple.h
index 9b5e69a9323b..17eede4d917c 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_power_model_simple.h
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_power_model_simple.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -38,10 +38,6 @@
*/
int kbase_power_model_simple_init(struct kbase_device *kbdev);
-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)
-extern struct devfreq_cooling_ops power_model_simple_ops;
-#else
extern struct devfreq_cooling_power power_model_simple_ops;
-#endif
#endif /* _BASE_POWER_MODEL_SIMPLE_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.c
index d992989123e8..4bcde85f3ee1 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -74,10 +74,9 @@ void kbase_wait_write_flush(struct kbase_context *kctx)
{
u32 base_count = 0;
- /*
- * The caller must be holding onto the kctx or the call is from
- * userspace.
- */
+ /* A suspend won't happen here, because we're in a syscall from a
+ * userspace thread */
+
kbase_pm_context_active(kctx->kbdev);
kbase_pm_request_gpu_cycle_counter(kctx->kbdev);